././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1730826256.7455924 pixman-0.44.0/0000755000175000017500000000000014712450021013101 5ustar00mattst88mattst88././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.editorconfig0000664000175000017500000000030014712446423015564 0ustar00mattst88mattst88# To use this config on you editor, follow the instructions at: # http://editorconfig.org root = true [*] tab_width = 8 [meson.build,meson_options.txt] indent_style = space indent_size = 2 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitignore0000664000175000017500000000122214712446423015103 0ustar00mattst88mattst88Makefile Makefile.in .deps .libs .msg *.pc *.lo *.la *.a *.o *~ aclocal.m4 autom4te.cache compile config.guess config.log config.status config.sub configure depcomp install-sh libtool ltmain.sh missing stamp-h? config.h config.h.in .*.swp demos/*-test demos/checkerboard demos/clip-in demos/linear-gradient demos/quad2quad demos/scale demos/dither pixman/pixman-srgb.c pixman/pixman-version.h test/*-test test/affine-bench test/alpha-loop test/alphamap test/check-formats test/clip-in test/composite test/infinite-loop test/lowlevel-blt-bench test/radial-invalid test/region-translate test/scaling-bench test/trap-crasher *.pdb *.dll *.lib *.ilk *.obj *.exe ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/0000775000175000017500000000000014712446423015251 5ustar00mattst88mattst88././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/01-docker/0000775000175000017500000000000014712446423016736 5ustar00mattst88mattst88././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/01-docker/Dockerfile0000664000175000017500000001166014712446423020734 0ustar00mattst88mattst88ARG BASE_IMAGE=docker.io/debian ARG BASE_IMAGE_TAG=bookworm-slim FROM ${BASE_IMAGE}:${BASE_IMAGE_TAG} AS base LABEL org.opencontainers.image.title="Pixman build environment for platform coverage" \ org.opencontainers.image.authors="Marek PikuĊ‚a " ARG DEBIAN_FRONTEND=noninteractive ENV APT_UPDATE="apt-get update" \ APT_INSTALL="apt-get install -y --no-install-recommends" \ APT_CLEANUP="rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*" ARG GCOVR_VERSION="~=7.2" ARG MESON_VERSION="~=1.6" RUN ${APT_UPDATE} \ && ${APT_INSTALL} \ # Build dependencies. build-essential \ ninja-build \ pkg-config \ qemu-user \ # pipx dependencies. python3-argcomplete \ python3-packaging \ python3-pip \ python3-platformdirs \ python3-userpath \ python3-venv \ # gcovr dependencies. libxml2-dev \ libxslt-dev \ python3-dev \ && ${APT_CLEANUP} \ # Install pipx using pip to have a more recent version of pipx, which # supports the `--global` flag. && pip install pipx --break-system-packages \ # Install a recent version of meson and gcovr using pipx to have the same # version across all variants regardless of base. && pipx install --global \ gcovr${GCOVR_VERSION} \ meson${MESON_VERSION} \ && gcovr --version \ && echo Meson version: \ && meson --version FROM base AS llvm-base # LLVM 16 is the highest available in Bookworm. Preferably, we should use the # same version for all platforms, but it's not possible at the moment. ARG LLVM_VERSION=16 RUN ${APT_UPDATE} \ && ${APT_INSTALL} \ clang-${LLVM_VERSION} \ libclang-rt-${LLVM_VERSION}-dev \ lld-${LLVM_VERSION} \ llvm-${LLVM_VERSION} \ && ${APT_CLEANUP} \ && ln -f /usr/bin/clang-${LLVM_VERSION} /usr/bin/clang \ && ln -f /usr/bin/lld-${LLVM_VERSION} /usr/bin/lld \ && ln -f /usr/bin/llvm-ar-${LLVM_VERSION} /usr/bin/llvm-ar \ && ln -f /usr/bin/llvm-strip-${LLVM_VERSION} /usr/bin/llvm-strip FROM llvm-base AS native-base ARG LLVM_VERSION=16 RUN ${APT_UPDATE} \ && ${APT_INSTALL} \ # Runtime library dependencies. libglib2.0-dev \ libgtk-3-dev \ libpng-dev \ # Install libomp-dev if available (OpenMP support for LLVM). It's done only # for the native images, as OpenMP support in cross-build environment is # tricky for LLVM. && (${APT_INSTALL} libomp-${LLVM_VERSION}-dev \ || echo "OpenMP not available on this platform.") \ && ${APT_CLEANUP} # The following targets differ in BASE_IMAGE. FROM native-base AS linux-386 FROM native-base AS linux-amd64 FROM native-base AS linux-arm-v5 FROM native-base AS linux-arm-v7 FROM native-base AS linux-arm64-v8 FROM native-base AS linux-mips64el FROM native-base AS linux-mipsel FROM native-base AS linux-ppc64le FROM native-base AS linux-riscv64 # The following targets should have a common BASE_IMAGE. FROM llvm-base AS linux-mips RUN ${APT_UPDATE} \ && ${APT_INSTALL} gcc-multilib-mips-linux-gnu \ && ${APT_CLEANUP} FROM llvm-base AS linux-ppc RUN ${APT_UPDATE} \ && ${APT_INSTALL} gcc-multilib-powerpc-linux-gnu \ && ${APT_CLEANUP} FROM llvm-base AS linux-ppc64 RUN ${APT_UPDATE} \ && ${APT_INSTALL} gcc-multilib-powerpc64-linux-gnu \ && ${APT_CLEANUP} # We use a common image for Windows i686 and amd64, as it doesn't make sense to # make them separate in terms of build time and image size. After two runs they # should use the same cache layers, so in the end it makes the collective image # size smaller. FROM base AS windows-base ARG LLVM_MINGW_RELEASE=20240619 ARG LLVM_MINGW_VARIANT=llvm-mingw-${LLVM_MINGW_RELEASE}-msvcrt-ubuntu-20.04-x86_64 RUN ${APT_UPDATE} \ && ${APT_INSTALL} wget \ && ${APT_CLEANUP} \ && cd /opt \ && wget https://github.com/mstorsjo/llvm-mingw/releases/download/${LLVM_MINGW_RELEASE}/${LLVM_MINGW_VARIANT}.tar.xz \ && tar -xf ${LLVM_MINGW_VARIANT}.tar.xz \ && rm -f ${LLVM_MINGW_VARIANT}.tar.xz ENV PATH=${PATH}:/opt/${LLVM_MINGW_VARIANT}/bin FROM windows-base AS windows-x86-base RUN dpkg --add-architecture i386 \ && ${APT_UPDATE} \ && ${APT_INSTALL} \ gcc-mingw-w64-i686 \ gcc-mingw-w64-x86-64 \ mingw-w64-tools \ procps \ wine \ wine32 \ wine64 \ && ${APT_CLEANUP} \ # Inspired by https://code.videolan.org/videolan/docker-images && wine wineboot --init \ && while pgrep wineserver > /dev/null; do \ echo "waiting ..."; \ sleep 1; \ done \ && rm -rf /tmp/wine-* FROM windows-x86-base AS windows-686 FROM windows-x86-base AS windows-amd64 # aarch64 image requires linaro/wine-arm64 as a base. FROM windows-base AS windows-arm64-v8 RUN wine-arm64 wineboot --init \ && while pgrep wineserver > /dev/null; do \ echo "waiting ..."; \ sleep 1; \ done \ && rm -rf /tmp/wine-* ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/01-docker/target-env/0000775000175000017500000000000014712446423021012 5ustar00mattst88mattst88././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/01-docker/target-env/linux-386.env0000664000175000017500000000015014712446423023175 0ustar00mattst88mattst88DOCKER_PLATFORM=linux/386 BASE_IMAGE=docker.io/i386/debian BASE_IMAGE_TAG=bookworm-slim LLVM_VERSION=16 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/01-docker/target-env/linux-amd64.env0000664000175000017500000000015314712446423023573 0ustar00mattst88mattst88DOCKER_PLATFORM=linux/amd64 BASE_IMAGE=docker.io/amd64/debian BASE_IMAGE_TAG=bookworm-slim LLVM_VERSION=16 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/01-docker/target-env/linux-arm-v5.env0000664000175000017500000000015614712446423023772 0ustar00mattst88mattst88DOCKER_PLATFORM=linux/arm/v5 BASE_IMAGE=docker.io/arm32v5/debian BASE_IMAGE_TAG=bookworm-slim LLVM_VERSION=16 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/01-docker/target-env/linux-arm-v7.env0000664000175000017500000000015614712446423023774 0ustar00mattst88mattst88DOCKER_PLATFORM=linux/arm/v7 BASE_IMAGE=docker.io/arm32v7/debian BASE_IMAGE_TAG=bookworm-slim LLVM_VERSION=16 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/01-docker/target-env/linux-arm64-v8.env0000664000175000017500000000016014712446423024142 0ustar00mattst88mattst88DOCKER_PLATFORM=linux/arm64/v8 BASE_IMAGE=docker.io/arm64v8/debian BASE_IMAGE_TAG=bookworm-slim LLVM_VERSION=16 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/01-docker/target-env/linux-mips.env0000664000175000017500000000015314712446423023630 0ustar00mattst88mattst88DOCKER_PLATFORM=linux/amd64 BASE_IMAGE=docker.io/amd64/debian BASE_IMAGE_TAG=bookworm-slim LLVM_VERSION=16 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/01-docker/target-env/linux-mips64el.env0000664000175000017500000000016114712446423024322 0ustar00mattst88mattst88DOCKER_PLATFORM=linux/mips64el BASE_IMAGE=docker.io/mips64le/debian BASE_IMAGE_TAG=bookworm-slim LLVM_VERSION=16 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/01-docker/target-env/linux-mipsel.env0000664000175000017500000000020614712446423024150 0ustar00mattst88mattst88DOCKER_PLATFORM=linux/mipsel BASE_IMAGE=docker.io/serenitycode/debian-debootstrap BASE_IMAGE_TAG=mipsel-bookworm-slim LLVM_VERSION=14 ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1730826256.7322593 pixman-0.44.0/.gitlab-ci.d/01-docker/target-env/linux-ppc.env0000777000175000017500000000000014712450021026205 2linux-amd64.envustar00mattst88mattst88././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1730826256.7322593 pixman-0.44.0/.gitlab-ci.d/01-docker/target-env/linux-ppc64.env0000777000175000017500000000000014712450021026357 2linux-amd64.envustar00mattst88mattst88././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/01-docker/target-env/linux-ppc64le.env0000664000175000017500000000015714712446423024141 0ustar00mattst88mattst88DOCKER_PLATFORM=linux/ppc64le BASE_IMAGE=docker.io/ppc64le/debian BASE_IMAGE_TAG=bookworm-slim LLVM_VERSION=16 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/01-docker/target-env/linux-riscv64.env0000664000175000017500000000015214712446423024157 0ustar00mattst88mattst88DOCKER_PLATFORM=linux/riscv64 BASE_IMAGE=docker.io/riscv64/debian BASE_IMAGE_TAG=sid-slim LLVM_VERSION=18 ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1730826256.7322593 pixman-0.44.0/.gitlab-ci.d/01-docker/target-env/windows-686.env0000777000175000017500000000000014712450021026301 2linux-amd64.envustar00mattst88mattst88././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1730826256.7322593 pixman-0.44.0/.gitlab-ci.d/01-docker/target-env/windows-amd64.env0000777000175000017500000000000014712450021026671 2linux-amd64.envustar00mattst88mattst88././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/01-docker/target-env/windows-arm64-v8.env0000664000175000017500000000013114712446423024473 0ustar00mattst88mattst88DOCKER_PLATFORM=linux/amd64 BASE_IMAGE=docker.io/linaro/wine-arm64 BASE_IMAGE_TAG=latest ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/01-docker.yml0000664000175000017500000000536514712446423017472 0ustar00mattst88mattst88# Docker build stage # # It builds a multi-arch image for all required architectures. Each image can be # later easily used with properly configured Docker (which uses binfmt and QEMU # underneath). docker: stage: docker image: quay.io/buildah/stable rules: - if: "$CI_PIPELINE_SOURCE == 'merge_request_event' && $TARGET =~ $ACTIVE_TARGET_PATTERN" changes: paths: - .gitlab-ci.d/01-docker.yml - .gitlab-ci.d/01-docker/**/* variables: DOCKER_TAG: $CI_COMMIT_REF_SLUG DOCKER_IMAGE_NAME: ${CI_REGISTRY_IMAGE}/pixman:${DOCKER_TAG} - if: "$CI_PIPELINE_SOURCE == 'schedule' && $TARGET =~ $ACTIVE_TARGET_PATTERN" - if: "$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH && $TARGET =~ $ACTIVE_TARGET_PATTERN" - if: "$CI_COMMIT_TAG && $TARGET =~ $ACTIVE_TARGET_PATTERN" variables: # Use vfs with buildah. Docker offers overlayfs as a default, but Buildah # cannot stack overlayfs on top of another overlayfs filesystem. STORAGE_DRIVER: vfs # Write all image metadata in the docker format, not the standard OCI # format. Newer versions of docker can handle the OCI format, but older # versions, like the one shipped with Fedora 30, cannot handle the format. BUILDAH_FORMAT: docker BUILDAH_ISOLATION: chroot CACHE_IMAGE: ${CI_REGISTRY_IMAGE}/cache CACHE_ARGS: --cache-from ${CACHE_IMAGE} --cache-to ${CACHE_IMAGE} before_script: # Login to the target registry. - echo "${CI_REGISTRY_PASSWORD}" | buildah login -u "${CI_REGISTRY_USER}" --password-stdin ${CI_REGISTRY} # Docker Hub login is optional, and can be used to circumvent image pull # quota for anonymous pulls for base images. - echo "${DOCKERHUB_PASSWORD}" | buildah login -u "${DOCKERHUB_USER}" --password-stdin docker.io || echo "Failed to login to Docker Hub." parallel: matrix: - TARGET: - linux-386 - linux-amd64 - linux-arm-v5 - linux-arm-v7 - linux-arm64-v8 - linux-mips - linux-mips64el - linux-mipsel - linux-ppc - linux-ppc64 - linux-ppc64le - linux-riscv64 - windows-686 - windows-amd64 - windows-arm64-v8 script: # Prepare environment. - ${LOAD_TARGET_ENV} - FULL_IMAGE_NAME=${DOCKER_IMAGE_NAME}-${TARGET} # Build and push the image. - buildah bud --tag ${FULL_IMAGE_NAME} --layers ${CACHE_ARGS} --target ${TARGET} --platform=${DOCKER_PLATFORM} --build-arg BASE_IMAGE=${BASE_IMAGE} --build-arg BASE_IMAGE_TAG=${BASE_IMAGE_TAG} --build-arg LLVM_VERSION=${LLVM_VERSION} -f Dockerfile .gitlab-ci.d/01-docker/ - buildah images - buildah push ${FULL_IMAGE_NAME} ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/02-build.yml0000664000175000017500000000617714712446423017325 0ustar00mattst88mattst88# Build stage # # This stage builds pixman with enabled coverage for all supported # architectures. # # Some targets don't support atomic profile update, so to decrease the number of # gcov errors, they need to be built without OpenMP (single threaded) by adding # `-Dopenmp=disabled` Meson argument. variables: # Used in test stage as well. BUILD_DIR: build-${TOOLCHAIN} # Applicable to all build targets. include: - local: .gitlab-ci.d/templates/build.yml inputs: target: linux-386 - local: .gitlab-ci.d/templates/build.yml inputs: target: linux-amd64 - local: .gitlab-ci.d/templates/build.yml inputs: target: linux-arm-v5 qemu_cpu: arm1136 # Disable coverage, as the tests take too long to run with a single thread. enable_gnu_coverage: false - local: .gitlab-ci.d/templates/build.yml inputs: target: linux-arm-v7 qemu_cpu: max - local: .gitlab-ci.d/templates/build.yml inputs: target: linux-arm64-v8 qemu_cpu: max - local: .gitlab-ci.d/templates/build.yml inputs: target: linux-mips toolchain: [gnu] qemu_cpu: 74Kf enable_gnu_coverage: false # TODO: Merge with the one above once the following issue is resolved: # https://gitlab.freedesktop.org/pixman/pixman/-/issues/105). - local: .gitlab-ci.d/templates/build.yml inputs: target: linux-mips toolchain: [llvm] qemu_cpu: 74Kf job_name_prefix: "." job_name_suffix: ":failing" allow_failure: true retry: 0 - local: .gitlab-ci.d/templates/build.yml inputs: target: linux-mips64el qemu_cpu: Loongson-3A4000 - local: .gitlab-ci.d/templates/build.yml inputs: target: linux-mipsel toolchain: [gnu] qemu_cpu: 74Kf # Disable coverage, as the tests take too long to run with a single thread. enable_gnu_coverage: false # TODO: Merge with the one above once the following issue is resolved: # https://gitlab.freedesktop.org/pixman/pixman/-/issues/105). - local: .gitlab-ci.d/templates/build.yml inputs: target: linux-mipsel toolchain: [llvm] qemu_cpu: 74Kf job_name_prefix: "." job_name_suffix: ":failing" allow_failure: true retry: 0 - local: .gitlab-ci.d/templates/build.yml inputs: target: linux-ppc qemu_cpu: g4 enable_gnu_coverage: false - local: .gitlab-ci.d/templates/build.yml inputs: target: linux-ppc64 qemu_cpu: ppc64 enable_gnu_coverage: false - local: .gitlab-ci.d/templates/build.yml inputs: target: linux-ppc64le qemu_cpu: power10 - local: .gitlab-ci.d/templates/build.yml inputs: target: linux-riscv64 qemu_cpu: rv64 - local: .gitlab-ci.d/templates/build.yml inputs: target: windows-686 enable_gnu_coverage: false - local: .gitlab-ci.d/templates/build.yml inputs: target: windows-amd64 enable_gnu_coverage: false - local: .gitlab-ci.d/templates/build.yml inputs: target: windows-arm64-v8 toolchain: [llvm] # GNU toolchain doesn't seem to support Windows on ARM. qemu_cpu: max enable_gnu_coverage: false ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/03-test.yml0000664000175000017500000001237114712446423017177 0ustar00mattst88mattst88# Test stage # # This stage executes the test suite for pixman for all architectures in # different configurations. Build and test is split, as some architectures can # have different QEMU configuration or have multiple supported pixman backends, # which are executed as job matrix. # # Mind that `PIXMAN_ENABLE` variable in matrix runs does nothing, but it looks # better in CI to indicate what is actually being tested. # # Some emulated targets are really slow or cannot be run in multithreaded mode # (mipsel, arm-v5). Thus coverage reporting is disabled for them. variables: # Used in summary stage as well. COVERAGE_BASE_DIR: coverage COVERAGE_OUT: ${COVERAGE_BASE_DIR}/${CI_JOB_ID} TEST_NAME: "" # Allow to specify a set of tests to run with run variables. include: - local: .gitlab-ci.d/templates/test.yml inputs: target: linux-386 toolchain: [gnu] pixman_disable: - "sse2 ssse3" # Testing "mmx" - "mmx ssse3" # Testing "sse2" - "mmx sse2" # Testing "ssse3" # TODO: Merge up after resolving # https://gitlab.freedesktop.org/pixman/pixman/-/issues/106 - local: .gitlab-ci.d/templates/test.yml inputs: target: linux-386 toolchain: [llvm] pixman_disable: # Same as above. - "sse2 ssse3" - "mmx ssse3" - "mmx sse2" job_name_prefix: "." job_name_suffix: ":failing" allow_failure: true retry: 0 - local: .gitlab-ci.d/templates/test.yml inputs: target: linux-amd64 pixman_disable: - "" - "fast" - "wholeops" - local: .gitlab-ci.d/templates/test.yml inputs: target: linux-arm-v5 toolchain: [gnu] qemu_cpu: [arm1136] pixman_disable: ["arm-neon"] # Test only arm-simd. timeout: 3h test_timeout_multiplier: 40 # TODO: Merge up after resolving # https://gitlab.freedesktop.org/pixman/pixman/-/issues/107 - local: .gitlab-ci.d/templates/test.yml inputs: target: linux-arm-v5 toolchain: [llvm] qemu_cpu: [arm1136] pixman_disable: ["arm-neon"] # Test only arm-simd. timeout: 3h test_timeout_multiplier: 40 job_name_prefix: "." job_name_suffix: ":failing" allow_failure: true retry: 0 - local: .gitlab-ci.d/templates/test.yml inputs: target: linux-arm-v7 qemu_cpu: [max] - local: .gitlab-ci.d/templates/test.yml inputs: target: linux-arm64-v8 qemu_cpu: [max] - local: .gitlab-ci.d/templates/test.yml inputs: target: linux-mips toolchain: [gnu] # TODO: Add llvm once the build is fixed. qemu_cpu: [74Kf] job_name_prefix: "." job_name_suffix: ":failing" allow_failure: true # Some tests seem to fail. retry: 0 - local: .gitlab-ci.d/templates/test.yml inputs: target: linux-mips64el toolchain: [gnu] qemu_cpu: [Loongson-3A4000] # TODO: Merge up after resolving # https://gitlab.freedesktop.org/pixman/pixman/-/issues/108 - local: .gitlab-ci.d/templates/test.yml inputs: target: linux-mips64el toolchain: [llvm] qemu_cpu: [Loongson-3A4000] job_name_prefix: "." job_name_suffix: ":failing" allow_failure: true retry: 0 - local: .gitlab-ci.d/templates/test.yml inputs: target: linux-mipsel toolchain: [gnu] # TODO: Add llvm once the build is fixed. qemu_cpu: [74Kf] timeout: 2h - local: .gitlab-ci.d/templates/test.yml inputs: target: linux-ppc qemu_cpu: [g4] job_name_prefix: "." job_name_suffix: ":failing" allow_failure: true # SIGILL for some tests retry: 0 - local: .gitlab-ci.d/templates/test.yml inputs: target: linux-ppc64 qemu_cpu: [ppc64] job_name_prefix: "." job_name_suffix: ":failing" allow_failure: true # SIGSEGV for some tests retry: 0 - local: .gitlab-ci.d/templates/test.yml inputs: target: linux-ppc64le toolchain: [gnu] qemu_cpu: [power10] # TODO: Merge up after resolving # https://gitlab.freedesktop.org/pixman/pixman/-/issues/109 - local: .gitlab-ci.d/templates/test.yml inputs: target: linux-ppc64le toolchain: [llvm] qemu_cpu: [power10] job_name_prefix: "." job_name_suffix: ":failing" allow_failure: true retry: 0 - local: .gitlab-ci.d/templates/test.yml inputs: target: linux-riscv64 qemu_cpu: # Test on target without RVV (verify no autovectorization). - rv64,v=false # Test correctness for different VLENs. - rv64,v=true,vext_spec=v1.0,vlen=128,elen=64 - rv64,v=true,vext_spec=v1.0,vlen=256,elen=64 - rv64,v=true,vext_spec=v1.0,vlen=512,elen=64 - rv64,v=true,vext_spec=v1.0,vlen=1024,elen=64 - local: .gitlab-ci.d/templates/test.yml inputs: target: windows-686 pixman_disable: # The same as for linux-386. - "sse2 ssse3" - "mmx ssse3" - "mmx sse2" - local: .gitlab-ci.d/templates/test.yml inputs: target: windows-amd64 pixman_disable: # The same as for linux-amd64. - "" - "fast" - "wholeops" - local: .gitlab-ci.d/templates/test.yml inputs: target: windows-arm64-v8 toolchain: [llvm] qemu_cpu: [max] ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/04-summary.yml0000664000175000017500000000260414712446423017714 0ustar00mattst88mattst88# Summary stage # # This stage takes coverage reports from test runs for all architectures, and # merges it into a single report, with GitLab visualization. There is also an # HTML report generated as a separate artifact. summary: extends: .target:all stage: summary variables: TARGET: linux-amd64 COVERAGE_SUMMARY_DIR: ${COVERAGE_BASE_DIR}/summary needs: - job: test:linux-386 optional: true - job: test:linux-amd64 optional: true - job: test:linux-arm-v7 optional: true - job: test:linux-arm64-v8 optional: true - job: test:linux-mips64el optional: true - job: test:linux-ppc64le optional: true - job: test:linux-riscv64 optional: true script: - echo "Input coverage reports:" && ls ${COVERAGE_BASE_DIR}/*.json || (echo "No coverage reports available." && exit) - | args=( ) for f in ${COVERAGE_BASE_DIR}/*.json; do args+=( "-a" "$f" ) done - mkdir -p ${COVERAGE_SUMMARY_DIR} - gcovr "${args[@]}" --cobertura-pretty --cobertura ${COVERAGE_SUMMARY_DIR}/coverage.xml --html-details ${COVERAGE_SUMMARY_DIR}/coverage.html --txt --print-summary coverage: '/^TOTAL.*\s+(\d+\%)$/' artifacts: reports: coverage_report: coverage_format: cobertura path: ${COVERAGE_SUMMARY_DIR}/coverage.xml paths: - ${COVERAGE_SUMMARY_DIR}/ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/meson-cross/0000775000175000017500000000000014712446423017521 5ustar00mattst88mattst88././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1730826256.7322593 pixman-0.44.0/.gitlab-ci.d/meson-cross/linux-386-gnu.meson0000777000175000017500000000000014712450021026130 2native-gnu.mesonustar00mattst88mattst88././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1730826256.7322593 pixman-0.44.0/.gitlab-ci.d/meson-cross/linux-386-llvm.meson0000777000175000017500000000000014712450021026472 2native-llvm.mesonustar00mattst88mattst88././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1730826256.7355924 pixman-0.44.0/.gitlab-ci.d/meson-cross/linux-amd64-gnu.meson0000777000175000017500000000000014712450021026523 2native-gnu.mesonustar00mattst88mattst88././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1730826256.7355924 pixman-0.44.0/.gitlab-ci.d/meson-cross/linux-amd64-llvm.meson0000777000175000017500000000000014712450021027065 2native-llvm.mesonustar00mattst88mattst88././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1730826256.7355924 pixman-0.44.0/.gitlab-ci.d/meson-cross/linux-arm-v5-gnu.meson0000777000175000017500000000000014712450021030550 2native-gnu-noopenmp.mesonustar00mattst88mattst88././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1730826256.7355924 pixman-0.44.0/.gitlab-ci.d/meson-cross/linux-arm-v5-llvm.meson0000777000175000017500000000000014712450021031112 2native-llvm-noopenmp.mesonustar00mattst88mattst88././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1730826256.7355924 pixman-0.44.0/.gitlab-ci.d/meson-cross/linux-arm-v7-gnu.meson0000777000175000017500000000000014712450021026721 2native-gnu.mesonustar00mattst88mattst88././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1730826256.7355924 pixman-0.44.0/.gitlab-ci.d/meson-cross/linux-arm-v7-llvm.meson0000777000175000017500000000000014712450021027263 2native-llvm.mesonustar00mattst88mattst88././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1730826256.7355924 pixman-0.44.0/.gitlab-ci.d/meson-cross/linux-arm64-v8-gnu.meson0000777000175000017500000000000014712450021027074 2native-gnu.mesonustar00mattst88mattst88././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1730826256.7355924 pixman-0.44.0/.gitlab-ci.d/meson-cross/linux-arm64-v8-llvm.meson0000777000175000017500000000000014712450021027436 2native-llvm.mesonustar00mattst88mattst88././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/meson-cross/linux-mips-gnu.meson0000664000175000017500000000041714712446423023462 0ustar00mattst88mattst88[binaries] c = ['mips-linux-gnu-gcc', '-DCI_HAS_ALL_MIPS_CPU_FEATURES'] ar = 'mips-linux-gnu-ar' strip = 'mips-linux-gnu-strip' exe_wrapper = ['qemu-mips', '-L', '/usr/mips-linux-gnu/'] [host_machine] system = 'linux' cpu_family = 'mips32' cpu = 'mips32' endian = 'big' ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/meson-cross/linux-mips-llvm.meson0000664000175000017500000000054414712446423023644 0ustar00mattst88mattst88[binaries] c = ['clang', '-target', 'mips-linux-gnu', '-fPIC', '-DCI_HAS_ALL_MIPS_CPU_FEATURES'] ar = 'llvm-ar' strip = 'llvm-strip' exe_wrapper = ['qemu-mips', '-L', '/usr/mips-linux-gnu/'] [built-in options] c_link_args = ['-target', 'mips-linux-gnu', '-fuse-ld=lld'] [host_machine] system = 'linux' cpu_family = 'mips32' cpu = 'mips32' endian = 'big' ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/meson-cross/linux-mips64el-gnu.meson0000664000175000017500000000023014712446423024146 0ustar00mattst88mattst88[binaries] c = ['gcc', '-DCI_HAS_ALL_MIPS_CPU_FEATURES'] ar = 'ar' strip = 'strip' pkg-config = 'pkg-config' [project options] mips-dspr2 = 'disabled' ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/meson-cross/linux-mips64el-llvm.meson0000664000175000017500000000024414712446423024334 0ustar00mattst88mattst88[binaries] c = ['clang', '-DCI_HAS_ALL_MIPS_CPU_FEATURES'] ar = 'llvm-ar' strip = 'llvm-strip' pkg-config = 'pkg-config' [project options] mips-dspr2 = 'disabled' ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1730826256.7355924 pixman-0.44.0/.gitlab-ci.d/meson-cross/linux-mipsel-gnu.meson0000777000175000017500000000000014712450021030732 2native-gnu-noopenmp.mesonustar00mattst88mattst88././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1730826256.7355924 pixman-0.44.0/.gitlab-ci.d/meson-cross/linux-mipsel-llvm.meson0000777000175000017500000000000014712450021031274 2native-llvm-noopenmp.mesonustar00mattst88mattst88././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/meson-cross/linux-ppc-gnu.meson0000664000175000017500000000035714712446423023277 0ustar00mattst88mattst88[binaries] c = 'powerpc-linux-gnu-gcc' ar = 'powerpc-linux-gnu-ar' strip = 'powerpc-linux-gnu-strip' exe_wrapper = ['qemu-ppc', '-L', '/usr/powerpc-linux-gnu'] [host_machine] system = 'linux' cpu_family = 'ppc' cpu = 'ppc' endian = 'big' ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/meson-cross/linux-ppc-llvm.meson0000664000175000017500000000054614712446423023460 0ustar00mattst88mattst88[binaries] c = ['clang', '-target', 'powerpc-linux-gnu'] ar = 'llvm-ar' strip = 'llvm-strip' exe_wrapper = ['qemu-ppc', '-L', '/usr/powerpc-linux-gnu/'] [built-in options] # We cannot use LLD, as it doesn't support big-endian PPC. c_link_args = ['-target', 'powerpc-linux-gnu'] [host_machine] system = 'linux' cpu_family = 'ppc' cpu = 'ppc' endian = 'big' ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/meson-cross/linux-ppc64-gnu.meson0000664000175000017500000000037614712446423023452 0ustar00mattst88mattst88[binaries] c = 'powerpc64-linux-gnu-gcc' ar = 'powerpc64-linux-gnu-ar' strip = 'powerpc64-linux-gnu-strip' exe_wrapper = ['qemu-ppc64', '-L', '/usr/powerpc64-linux-gnu/'] [host_machine] system = 'linux' cpu_family = 'ppc64' cpu = 'ppc64' endian = 'big' ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/meson-cross/linux-ppc64-llvm.meson0000664000175000017500000000056214712446423023630 0ustar00mattst88mattst88[binaries] c = ['clang', '-target', 'powerpc64-linux-gnu'] ar = 'llvm-ar' strip = 'llvm-strip' exe_wrapper = ['qemu-ppc64', '-L', '/usr/powerpc64-linux-gnu/'] [built-in options] # We cannot use LLD, as it doesn't support big-endian PPC. c_link_args = ['-target', 'powerpc64-linux-gnu'] [host_machine] system = 'linux' cpu_family = 'ppc64' cpu = 'ppc64' endian = 'big' ././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1730826256.7355924 pixman-0.44.0/.gitlab-ci.d/meson-cross/linux-ppc64le-gnu.meson0000777000175000017500000000000014712450021027065 2native-gnu.mesonustar00mattst88mattst88././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1730826256.7355924 pixman-0.44.0/.gitlab-ci.d/meson-cross/linux-ppc64le-llvm.meson0000777000175000017500000000000014712450021027427 2native-llvm.mesonustar00mattst88mattst88././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1730826256.7355924 pixman-0.44.0/.gitlab-ci.d/meson-cross/linux-riscv64-gnu.meson0000777000175000017500000000000014712450021027110 2native-gnu.mesonustar00mattst88mattst88././@PaxHeader0000000000000000000000000000003400000000000010212 xustar0028 mtime=1730826256.7355924 pixman-0.44.0/.gitlab-ci.d/meson-cross/linux-riscv64-llvm.meson0000777000175000017500000000000014712450021027452 2native-llvm.mesonustar00mattst88mattst88././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/meson-cross/native-gnu-noopenmp.meson0000664000175000017500000000022414712446423024470 0ustar00mattst88mattst88[binaries] c = ['gcc', '-DCI_HAS_ALL_MIPS_CPU_FEATURES'] ar = 'ar' strip = 'strip' pkg-config = 'pkg-config' [project options] openmp = 'disabled' ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/meson-cross/native-gnu.meson0000664000175000017500000000011114712446423022632 0ustar00mattst88mattst88[binaries] c = 'gcc' ar = 'ar' strip = 'strip' pkg-config = 'pkg-config' ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/meson-cross/native-llvm-noopenmp.meson0000664000175000017500000000024014712446423024647 0ustar00mattst88mattst88[binaries] c = ['clang', '-DCI_HAS_ALL_MIPS_CPU_FEATURES'] ar = 'llvm-ar' strip = 'llvm-strip' pkg-config = 'pkg-config' [project options] openmp = 'disabled' ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/meson-cross/native-llvm.meson0000664000175000017500000000012514712446423023020 0ustar00mattst88mattst88[binaries] c = 'clang' ar = 'llvm-ar' strip = 'llvm-strip' pkg-config = 'pkg-config' ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/meson-cross/windows-686-gnu.meson0000664000175000017500000000051514712446423023367 0ustar00mattst88mattst88[binaries] c = 'i686-w64-mingw32-gcc' ar = 'i686-w64-mingw32-ar' strip = 'i686-w64-mingw32-strip' windres = 'i686-w64-mingw32-windres' exe_wrapper = 'wine' [built-in options] c_link_args = ['-static-libgcc'] [host_machine] system = 'windows' cpu_family = 'x86' cpu = 'i686' endian = 'little' [project options] openmp = 'disabled' ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/meson-cross/windows-686-llvm.meson0000664000175000017500000000051514712446423023550 0ustar00mattst88mattst88[binaries] c = 'i686-w64-mingw32-clang' ar = 'i686-w64-mingw32-llvm-ar' strip = 'i686-w64-mingw32-strip' windres = 'i686-w64-mingw32-windres' exe_wrapper = 'wine' [built-in options] c_link_args = ['-static'] [project options] openmp = 'disabled' [host_machine] system = 'windows' cpu_family = 'x86' cpu = 'i686' endian = 'little' ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/meson-cross/windows-amd64-gnu.meson0000664000175000017500000000046314712446423023761 0ustar00mattst88mattst88[binaries] c = 'x86_64-w64-mingw32-gcc' ar = 'x86_64-w64-mingw32-ar' strip = 'x86_64-w64-mingw32-strip' windres = 'x86_64-w64-mingw32-windres' exe_wrapper = 'wine' [built-in options] c_link_args = ['-static-libgcc'] [host_machine] system = 'windows' cpu_family = 'x86_64' cpu = 'x86_64' endian = 'little' ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/meson-cross/windows-amd64-llvm.meson0000664000175000017500000000077714712446423024152 0ustar00mattst88mattst88[binaries] c = 'x86_64-w64-mingw32-clang' ar = 'x86_64-w64-mingw32-llvm-ar' strip = 'x86_64-w64-mingw32-strip' windres = 'x86_64-w64-mingw32-windres' exe_wrapper = 'wine' [built-in options] # Static linking is a workaround around `libwinpthread-1` not being discovered correctly. c_link_args = ['-static'] [project options] # OpenMP is disabled as it is not being discovered correctly during tests. openmp = 'disabled' [host_machine] system = 'windows' cpu_family = 'x86_64' cpu = 'x86_64' endian = 'little' ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/meson-cross/windows-arm64-v8-llvm.meson0000664000175000017500000000054614712446423024515 0ustar00mattst88mattst88[binaries] c = 'aarch64-w64-mingw32-clang' ar = 'aarch64-w64-mingw32-llvm-ar' strip = 'aarch64-w64-mingw32-strip' windres = 'aarch64-w64-mingw32-windres' exe_wrapper = 'wine-arm64' [built-in options] c_link_args = ['-static'] [project options] openmp = 'disabled' [host_machine] system = 'windows' cpu_family = 'aarch64' cpu = 'aarch64' endian = 'little' ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/pixman-project.yml0000664000175000017500000000417514712446423020743 0ustar00mattst88mattst88# This file contains the set of jobs run by the pixman project: # https://gitlab.freedesktop.org/pixman/pixman/-/pipelines stages: - docker - build - test - summary variables: # Make it possible to change RUNNER_TAG from GitLab variables. The default # `kvm` tag has been tested with FDO infrastructure. RUNNER_TAG: kvm # Docker image global configuration. DOCKER_TAG: latest DOCKER_IMAGE_NAME: registry.freedesktop.org/pixman/pixman/pixman:${DOCKER_TAG} # Execute to load a target-specific environment. LOAD_TARGET_ENV: source .gitlab-ci.d/01-docker/target-env/${TARGET}.env # Enable/disable specific targets for code and platform coverage targets. ACTIVE_TARGET_PATTERN: '/linux-386|linux-amd64|linux-arm-v5|linux-arm-v7|linux-arm64-v8|linux-mips|linux-mips64el|linux-mipsel|linux-ppc|linux-ppc64|linux-ppc64le|linux-riscv64|windows-686|windows-amd64|windows-arm64-v8/i' workflow: rules: # Use modified Docker image if building in MR and Docker image is affected # by the MR. - if: $CI_PIPELINE_SOURCE == 'merge_request_event' changes: paths: - .gitlab-ci.d/01-docker.yml - .gitlab-ci.d/01-docker/**/* variables: DOCKER_TAG: $CI_COMMIT_REF_SLUG DOCKER_IMAGE_NAME: ${CI_REGISTRY_IMAGE}/pixman:${DOCKER_TAG} # A standard set of GitLab CI triggers (i.e., MR, schedule, default branch, # and tag). - if: $CI_PIPELINE_SOURCE == 'merge_request_event' - if: $CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS when: never - if: $CI_PIPELINE_SOURCE == 'schedule' - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH - if: $CI_COMMIT_BRANCH - if: $CI_COMMIT_TAG auto_cancel: on_new_commit: conservative on_job_failure: all default: tags: - $RUNNER_TAG # Retry in case the runner is misconfigured for multi-arch builds or some # random unexpected runner error occurs (it happened during testing). retry: 1 include: - local: "/.gitlab-ci.d/templates/targets.yml" - local: "/.gitlab-ci.d/01-docker.yml" - local: "/.gitlab-ci.d/02-build.yml" - local: "/.gitlab-ci.d/03-test.yml" - local: "/.gitlab-ci.d/04-summary.yml" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/templates/0000775000175000017500000000000014712446423017247 5ustar00mattst88mattst88././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/templates/build.yml0000664000175000017500000000515314712446423021075 0ustar00mattst88mattst88spec: inputs: target: description: Build target in form of "OS-ARCH" pair (e.g., linux-amd64). Mostly the same as platform string for Docker but with a hyphen instead of slash. toolchain: description: An array of toolchains to test with. Each toolchain should have an appropriate Meson cross file. type: array default: [gnu, llvm] qemu_cpu: description: QEMU_CPU environmental variable used by Docker (which uses QEMU underneath). It is not used by x86 targets, as they are executed natively on the host. default: "" enable_gnu_coverage: description: Enable coverage build flags. It can be later used to compile a coverage report for all the jobs. Should be enabled only for native build environments as they have all the optional dependencies, and are the most reliable and uniform (so disable for cross environments). type: boolean default: true job_name_prefix: description: Additional prefix for the job name. Can be used to disable a job with a "." prefix. default: "" job_name_suffix: description: Additional suffix for the job name. Can be used to prevent job duplication for jobs for the same target. default: "" allow_failure: description: Set the `allow_failure` flag for jobs that are expected to fail. Remember to set `retry` argument to 0 to prevent unnecessary retries. type: boolean default: false retry: description: Set the `retry` flag for a job. Usually used together with `allow_failure`. type: number default: 1 --- "$[[ inputs.job_name_prefix ]]build:$[[ inputs.target ]]$[[ inputs.job_name_suffix ]]": extends: .target:all stage: build allow_failure: $[[ inputs.allow_failure ]] retry: $[[ inputs.retry ]] needs: - job: docker optional: true parallel: matrix: - TARGET: $[[ inputs.target ]] variables: TARGET: $[[ inputs.target ]] QEMU_CPU: $[[ inputs.qemu_cpu ]] parallel: matrix: - TOOLCHAIN: $[[ inputs.toolchain ]] script: - | if [ "$[[ inputs.enable_gnu_coverage ]]" == "true" ] && [ "${TOOLCHAIN}" == "gnu" ]; then COV_C_ARGS=-fprofile-update=atomic COV_MESON_BUILD_ARGS=-Db_coverage=true fi - meson setup ${BUILD_DIR} --cross-file .gitlab-ci.d/meson-cross/${TARGET}-${TOOLCHAIN}.meson -Dc_args="${COV_C_ARGS}" ${COV_MESON_BUILD_ARGS} - meson compile -C ${BUILD_DIR} artifacts: paths: - ${BUILD_DIR}/ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/templates/targets.yml0000664000175000017500000000027114712446423021443 0ustar00mattst88mattst88# General target templates. .target:all: image: name: $DOCKER_IMAGE_NAME-$TARGET rules: - if: "$TARGET =~ $ACTIVE_TARGET_PATTERN" before_script: - ${LOAD_TARGET_ENV} ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.d/templates/test.yml0000664000175000017500000000714714712446423020762 0ustar00mattst88mattst88spec: inputs: target: description: Build target in form of "OS-ARCH" pair (e.g., linux-amd64). Mostly the same as platform string for Docker but with a hyphen instead of slash. toolchain: description: An array of toolchains to test with. Each toolchain should have an appropriate Meson cross file. type: array default: [gnu, llvm] qemu_cpu: description: An array of QEMU_CPU environmental variables used as a job matrix variable, and in turn by Docker (which uses QEMU underneath). It is not used by x86 targets, as they are executed natively on the host. type: array default: [""] pixman_disable: description: An array of PIXMAN_DISABLE targets used as a job matrix variable. type: array default: [""] timeout: description: GitLab job timeout property. May need to be increased for slow targets. default: 1h test_timeout_multiplier: description: Test timeout multiplier flag used for Meson test execution. May need to be increased for slow targets. type: number default: 20 meson_testthreads: description: Sets MESON_TESTTHREADS environmental variable. For some platforms, the tests should be executed one by one (without multithreading) to prevent gcovr errors. type: number default: 0 gcovr_flags: description: Additional flags passed to gcovr tool. default: "" job_name_prefix: description: Additional prefix for the job name. Can be used to disable a job with a "." prefix. default: "" job_name_suffix: description: Additional suffix for the job name. Can be used to prevent job duplication for jobs for the same target. default: "" allow_failure: description: Set the `allow_failure` flag for jobs that are expected to fail. Remember to set `retry` argument to 0 to prevent unnecessary retries. type: boolean default: false retry: description: Set the `retry` flag for a job. Usually used together with `allow_failure`. type: number default: 1 --- "$[[ inputs.job_name_prefix ]]test:$[[ inputs.target ]]$[[ inputs.job_name_suffix ]]": extends: .target:all stage: test allow_failure: $[[ inputs.allow_failure ]] retry: $[[ inputs.retry ]] timeout: $[[ inputs.timeout ]] needs: - job: docker optional: true parallel: matrix: - TARGET: $[[ inputs.target ]] - job: build:$[[ inputs.target ]] parallel: matrix: - TOOLCHAIN: $[[ inputs.toolchain ]] variables: TARGET: $[[ inputs.target ]] TEST_TIMEOUT_MULTIPLIER: $[[ inputs.test_timeout_multiplier ]] GCOVR_FLAGS: $[[ inputs.gcovr_flags ]] MESON_ARGS: -t ${TEST_TIMEOUT_MULTIPLIER} --no-rebuild -v ${TEST_NAME} MESON_TESTTHREADS: $[[ inputs.meson_testthreads ]] parallel: matrix: - TOOLCHAIN: $[[ inputs.toolchain ]] PIXMAN_DISABLE: $[[ inputs.pixman_disable ]] QEMU_CPU: $[[ inputs.qemu_cpu ]] script: - meson test -C ${BUILD_DIR} ${MESON_ARGS} after_script: - mkdir -p ${COVERAGE_OUT} - gcovr ${GCOVR_FLAGS} -r ./ ${BUILD_DIR} -e ./subprojects --json ${COVERAGE_OUT}.json --html-details ${COVERAGE_OUT}/coverage.html --print-summary || echo "No coverage data available." artifacts: paths: - ${BUILD_DIR}/meson-logs/testlog.txt - ${COVERAGE_BASE_DIR}/ reports: junit: - ${BUILD_DIR}/meson-logs/testlog.junit.xml ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/.gitlab-ci.yml0000664000175000017500000000110214712446423015544 0ustar00mattst88mattst88# # This is the GitLab CI configuration file for the mainstream pixman project: # https://gitlab.freedesktop.org/pixman/pixman/-/pipelines # # !!! DO NOT ADD ANY NEW CONFIGURATION TO THIS FILE !!! # # Only documentation or comments is accepted. # # To use a different set of jobs than the mainstream project, you need to set # the location of your custom yml file at "custom CI/CD configuration path", on # your GitLab CI namespace: # https://docs.gitlab.com/ee/ci/pipelines/settings.html#custom-cicd-configuration-path # include: - local: '/.gitlab-ci.d/pixman-project.yml' ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/AUTHORS0000664000175000017500000000000014712446423014154 0ustar00mattst88mattst88././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/CODING_STYLE0000664000175000017500000001017514712446423014770 0ustar00mattst88mattst88Pixman coding style. ==================== The pixman coding style is close to cairo's with one exception: braces go on their own line, rather than on the line of the if/while/for: if (condition) { do_something(); do_something_else(); } not if (condition) { do_something(); do_something_else(); } Indentation =========== Each new level is indented four spaces: if (condition) do_something(); This may be achieved with space characters or with a combination of tab characters and space characters. Tab characters are interpreted as Advance to the next column which is a multiple of 8. Names ===== In all names, words are separated with underscores. Do not use CamelCase for any names. Macros have ALL_CAPITAL_NAMES Type names are in lower case and end with "_t". For example pixman_image_t. Labels, functions and variables have lower case names. Braces ====== Braces always go on their own line: if (condition) { do_this (); do_that (); } else { do_the_other (); } Rules for braces and substatements of if/while/for/do: * If a substatement spans multiple lines, then there must be braces around it. * If the condition of an if/while/for spans multiple lines, then braces must be used for the substatements. * If one substatement of an if statement has braces, then the other must too. * Otherwise, don't add braces. Comments ======== For comments either like this: /* One line comment */ or like this: /* This is a multi-line comment * * It extends over multiple lines */ Generally comments should say things that aren't clear from the code itself. If too many comments say obvious things, then people will just stop reading all comments, including the good ones. Whitespace ========== * Put a single space after commas * Put spaces around arithmetic operators such a +, -, *, /: y * stride + x x / unit_x * Do not put spaces after the address-of operator, the * when used as a pointer derefernce or the ! and ~ operators: &foo; ~0x00000000 !condition *result = 100 * Break up long lines (> ~80 characters) and use whitespace to align things nicely. This is one way: some_very_long_function name ( implementation, op, src, mask, dest, src_x, src_y, mask_x, mask_y, dest_x, dest_y, width, height); This is another: some_very_long_function_name (implementation, op, src, mask, dest, src_x, src_y, mask_x, mask_y, dest_x, dest_y, width, height); * Separate logically distinct chunks with a single newline. This obviously applies between functions, but also applies within a function or block or structure definition. * Use a newline after a block of variable declarations. * Use a single space before a left parenthesis, except where the standard will not allow it, (eg. when defining a parameterized macro). * Don't eliminate newlines just because things would still fit on one line. This breaks the expected visual structure of the code making it much harder to read and understand: if (condition) foo (); else bar (); /* Yuck! */ Function Definitions ==================== Function definitions should take the following form: void my_function (int argument) { do_my_things (); } If all the parameters to a function fit naturally on one line, format them that way. Otherwise, put one argument on each line, adding whitespace so that the parameter names are aligned with each other. I.e., do either this: void short_arguments (const char *str, int x, int y, int z) { } or this: void long_arguments (const char *char_star_arg, int int_arg, double *double_star_arg, double double_arg) { } Mode lines ========== Given the rules above, what is the best way to simplify one's life as a code monkey? Get your editor to do most of the tedious work of beautifying your code! As a reward for reading this far, here are some mode lines for the more popular editors: /* * vim:sw=4:sts=4:ts=8:tw=78:fo=tcroq:cindent:cino=\:0,(0 * vim:isk=a-z,A-Z,48-57,_,.,-,> */ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/COPYING0000664000175000017500000000404714712446423014156 0ustar00mattst88mattst88The following is the MIT license, agreed upon by most contributors. Copyright holders of new code should use this license statement where possible. They may also add themselves to the list below. /* * Copyright 1987, 1988, 1989, 1998 The Open Group * Copyright 1987, 1988, 1989 Digital Equipment Corporation * Copyright 1999, 2004, 2008 Keith Packard * Copyright 2000 SuSE, Inc. * Copyright 2000 Keith Packard, member of The XFree86 Project, Inc. * Copyright 2004, 2005, 2007, 2008, 2009, 2010 Red Hat, Inc. * Copyright 2004 Nicholas Miell * Copyright 2005 Lars Knoll & Zack Rusin, Trolltech * Copyright 2005 Trolltech AS * Copyright 2007 Luca Barbato * Copyright 2008 Aaron Plattner, NVIDIA Corporation * Copyright 2008 Rodrigo Kumpera * Copyright 2008 Andrİ TupinambĦ * Copyright 2008 Mozilla Corporation * Copyright 2008 Frederic Plourde * Copyright 2009, Oracle and/or its affiliates. All rights reserved. * Copyright 2009, 2010 Nokia Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/INSTALL0000664000175000017500000000253114712446423014150 0ustar00mattst88mattst88Installation Instructions ************************* Copyright (C) 1994, 1995, 1996, 1999, 2000, 2001, 2002, 2004, 2005, 2006 Free Software Foundation, Inc. This file is free documentation; the Free Software Foundation gives unlimited permission to copy, distribute and modify it. Basic Installation ================== Briefly, the shell commands `meson setup build/; ninja -C build; ninja -C build install` should configure, build, and install this package. The following more-detailed instructions are generic; see the `README` file for instructions specific to this package. Running `meson setup` attempts to guess correct values for various system-dependent variables used during compilation. The simplest way to compile this package is: 1. `cd` to the directory containing the package's source code and type `meson setup build/` to configure the package for your system. While running, it prints some messages telling which features it is checking for. 2. Type `ninja -C build` to compile the package. 3. Optionally, type `ninja -C build test` to run any self-tests that come with the package. 4. Type `ninja -C build install` to install the programs and any data files and documentation. 5. You can remove the program binaries and object files from the source code directory by typing `ninja -C build clean`. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/NEWS0000664000175000017500000000000014712446423013603 0ustar00mattst88mattst88././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/README0000664000175000017500000001063614712446423014004 0ustar00mattst88mattst88Pixman ====== Pixman is a library that provides low-level pixel manipulation features such as image compositing and trapezoid rasterization. Questions should be directed to the pixman mailing list: https://lists.freedesktop.org/mailman/listinfo/pixman You can also file bugs at https://gitlab.freedesktop.org/pixman/pixman/-/issues/new or submit improvements in form of a Merge Request via https://gitlab.freedesktop.org/pixman/pixman/-/merge_requests For real time discussions about pixman, feel free to join the IRC channels #cairo and #xorg-devel on the FreeNode IRC network. Contributing ------------ In order to contribute to pixman, you will need a working knowledge of the git version control system. For a quick getting started guide, there is the "Everyday Git With 20 Commands Or So guide" https://www.kernel.org/pub/software/scm/git/docs/everyday.html from the Git homepage. For more in depth git documentation, see the resources on the Git community documentation page: https://git-scm.com/documentation Pixman uses the infrastructure from the freedesktop.org umbrella project. For instructions about how to use the git service on freedesktop.org, see: https://www.freedesktop.org/wiki/Infrastructure/git/Developers The Pixman master repository can be found at: https://gitlab.freedesktop.org/pixman/pixman Sending patches --------------- Patches should be submitted in form of Merge Requests via Gitlab. You will first need to create a fork of the main pixman repository at https://gitlab.freedesktop.org/pixman/pixman via the Fork button on the top right. Once that is done you can add your personal repository as a remote to your local pixman development git checkout: git remote add my-gitlab git@gitlab.freedesktop.org:YOURUSERNAME/pixman.git git fetch my-gitlab Make sure to have added ssh keys to your gitlab profile at https://gitlab.freedesktop.org/profile/keys Once that is set up, the general workflow for sending patches is to create a new local branch with your improvements and once it's ready push it to your personal pixman fork: git checkout -b fix-some-bug ... git push my-gitlab The output of the `git push` command will include a link that allows you to create a Merge Request against the official pixman repository. Whenever you make changes to your branch (add new commits or fix up commits) you push them back to your personal pixman fork: git push -f my-gitlab If there is an open Merge Request Gitlab will automatically pick up the changes from your branch and pixman developers can review them anew. In order for your patches to be accepted, please consider the following guidelines: - At each point in the series, pixman should compile and the test suite should pass. The exception here is if you are changing the test suite to demonstrate a bug. In this case, make one commit that makes the test suite fail due to the bug, and then another commit that fixes the bug. You can run the test suite with meson test -C builddir It will take around two minutes to run on a modern PC. - Follow the coding style described in the CODING_STYLE file - For bug fixes, include an update to the test suite to make sure the bug doesn't reappear. - For new features, add tests of the feature to the test suite. Also, add a program demonstrating the new feature to the demos/ directory. - Write descriptive commit messages. Useful information to include: - Benchmark results, before and after - Description of the bug that was fixed - Detailed rationale for any new API - Alternative approaches that were rejected (and why they don't work) - If review comments were incorporated, a brief version history describing what those changes were. - For big patch series, write an introductory post with an overall description of the patch series, including benchmarks and motivation. Each commit message should still be descriptive and include enough information to understand why this particular commit was necessary. Pixman has high standards for code quality and so almost everybody should expect to have the first versions of their patches rejected. If you think that the reviewers are wrong about something, or that the guidelines above are wrong, feel free to discuss the issue. The purpose of the guidelines and code review is to ensure high code quality; it is not an exercise in compliance. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/RELEASING0000664000175000017500000000304014712446423014347 0ustar00mattst88mattst88Here are the steps to follow to create a new pixman release: 1) Ensure that there are no uncommitted changes or unpushed commits, and that you are up to date with the latest commits in the central repository. Here are a couple of useful commands: git diff (no output) git status (should report "nothing to commit") git log master...origin (no output; note: *3* dots) 2) Increment the version in meson.build. 3) Make sure that new version works, including - meson test passes - the X server still works with the new pixman version installed - the cairo test suite hasn't gained any new failures compared to last pixman version. 4) Use "git commit" to record the changes made in step 2 and 3. 5) Generate and publish the tar files by running make PREV= GPGKEY= release-publish If your freedesktop user name is different from your local one, then also set the variable USER to your freedesktop user name. 6) Run make release-publish-message to generate a draft release announcement. Edit it as appropriate and send it to cairo-announce@cairographics.org pixman@lists.freedesktop.org xorg-announce@lists.freedesktop.org 7) Increment pixman_micro to the next larger (odd) number in configure.ac. Commit this change, and push all commits created during this process using git push git push --tags You must use "--tags" here; otherwise the new tag will not be pushed out. 8) Change the topic of the #cairo IRC channel on freenode to advertise the new version. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/a64-neon-test.S0000664000175000017500000000010414712446423015541 0ustar00mattst88mattst88.text .arch armv8-a .altmacro prfm pldl2strm, [x0] xtn v0.8b, v0.8h ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/arm-simd-test.S0000664000175000017500000000027014712446423015727 0ustar00mattst88mattst88.text .arch armv6 .object_arch armv4 .arm .altmacro #ifndef __ARM_EABI__ #error EABI is required (to be sure that calling conventions are compatible) #endif pld [r0] uqadd8 r0, r0, r0 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/demos/0000775000175000017500000000000014712446423014225 5ustar00mattst88mattst88././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/demos/alpha-test.c0000664000175000017500000000650214712446423016436 0ustar00mattst88mattst88#include #include #include "pixman.h" #include "gtk-utils.h" int main (int argc, char **argv) { #define WIDTH 400 #define HEIGHT 200 uint32_t *alpha = malloc (WIDTH * HEIGHT * 4); uint32_t *dest = malloc (WIDTH * HEIGHT * 4); uint32_t *src = malloc (WIDTH * HEIGHT * 4); pixman_image_t *grad_img; pixman_image_t *alpha_img; pixman_image_t *dest_img; pixman_image_t *src_img; int i; pixman_gradient_stop_t stops[2] = { { pixman_int_to_fixed (0), { 0x0000, 0x0000, 0x0000, 0x0000 } }, { pixman_int_to_fixed (1), { 0xffff, 0x0000, 0x1111, 0xffff } } }; pixman_point_fixed_t p1 = { pixman_double_to_fixed (0), 0 }; pixman_point_fixed_t p2 = { pixman_double_to_fixed (WIDTH), pixman_int_to_fixed (0) }; #if 0 pixman_transform_t trans = { { { pixman_double_to_fixed (2), pixman_double_to_fixed (0.5), pixman_double_to_fixed (-100), }, { pixman_double_to_fixed (0), pixman_double_to_fixed (3), pixman_double_to_fixed (0), }, { pixman_double_to_fixed (0), pixman_double_to_fixed (0.000), pixman_double_to_fixed (1.0) } } }; #else pixman_transform_t trans = { { { pixman_fixed_1, 0, 0 }, { 0, pixman_fixed_1, 0 }, { 0, 0, pixman_fixed_1 } } }; #endif #if 0 pixman_point_fixed_t c_inner; pixman_point_fixed_t c_outer; pixman_fixed_t r_inner; pixman_fixed_t r_outer; #endif for (i = 0; i < WIDTH * HEIGHT; ++i) alpha[i] = 0x4f00004f; /* pale blue */ alpha_img = pixman_image_create_bits (PIXMAN_a8r8g8b8, WIDTH, HEIGHT, alpha, WIDTH * 4); for (i = 0; i < WIDTH * HEIGHT; ++i) dest[i] = 0xffffff00; /* yellow */ dest_img = pixman_image_create_bits (PIXMAN_a8r8g8b8, WIDTH, HEIGHT, dest, WIDTH * 4); for (i = 0; i < WIDTH * HEIGHT; ++i) src[i] = 0xffff0000; src_img = pixman_image_create_bits (PIXMAN_a8r8g8b8, WIDTH, HEIGHT, src, WIDTH * 4); #if 0 c_inner.x = pixman_double_to_fixed (50.0); c_inner.y = pixman_double_to_fixed (50.0); c_outer.x = pixman_double_to_fixed (50.0); c_outer.y = pixman_double_to_fixed (50.0); r_inner = 0; r_outer = pixman_double_to_fixed (50.0); grad_img = pixman_image_create_conical_gradient (&c_inner, r_inner, stops, 2); #endif #if 0 grad_img = pixman_image_create_conical_gradient (&c_inner, r_inner, stops, 2); grad_img = pixman_image_create_linear_gradient (&c_inner, &c_outer, r_inner, r_outer, stops, 2); #endif grad_img = pixman_image_create_linear_gradient (&p1, &p2, stops, 2); pixman_image_set_transform (grad_img, &trans); pixman_image_set_repeat (grad_img, PIXMAN_REPEAT_PAD); pixman_image_composite (PIXMAN_OP_OVER, grad_img, NULL, alpha_img, 0, 0, 0, 0, 0, 0, 10 * WIDTH, HEIGHT); pixman_image_set_alpha_map (src_img, alpha_img, 10, 10); pixman_image_composite (PIXMAN_OP_OVER, src_img, NULL, dest_img, 0, 0, 0, 0, 0, 0, 10 * WIDTH, HEIGHT); printf ("0, 0: %x\n", dest[0]); printf ("10, 10: %x\n", dest[10 * 10 + 10]); printf ("w, h: %x\n", dest[(HEIGHT - 1) * 100 + (WIDTH - 1)]); show_image (dest_img); pixman_image_unref (src_img); pixman_image_unref (grad_img); pixman_image_unref (alpha_img); free (dest); return 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/demos/checkerboard.c0000664000175000017500000000336214712446423017011 0ustar00mattst88mattst88#include #include #include "pixman.h" #include "gtk-utils.h" int main (int argc, char **argv) { #define WIDTH 400 #define HEIGHT 400 #define TILE_SIZE 25 pixman_image_t *checkerboard; pixman_image_t *destination; #define D2F(d) (pixman_double_to_fixed(d)) pixman_transform_t trans = { { { D2F (-1.96830), D2F (-1.82250), D2F (512.12250)}, { D2F (0.00000), D2F (-7.29000), D2F (1458.00000)}, { D2F (0.00000), D2F (-0.00911), D2F (0.59231)}, }}; int i, j; checkerboard = pixman_image_create_bits (PIXMAN_a8r8g8b8, WIDTH, HEIGHT, NULL, 0); destination = pixman_image_create_bits (PIXMAN_a8r8g8b8, WIDTH, HEIGHT, NULL, 0); for (i = 0; i < HEIGHT / TILE_SIZE; ++i) { for (j = 0; j < WIDTH / TILE_SIZE; ++j) { double u = (double)(j + 1) / (WIDTH / TILE_SIZE); double v = (double)(i + 1) / (HEIGHT / TILE_SIZE); pixman_color_t black = { 0, 0, 0, 0xffff }; pixman_color_t white = { v * 0xffff, u * 0xffff, (1 - (double)u) * 0xffff, 0xffff }; pixman_color_t *c; pixman_image_t *fill; if ((j & 1) != (i & 1)) c = &black; else c = &white; fill = pixman_image_create_solid_fill (c); pixman_image_composite (PIXMAN_OP_SRC, fill, NULL, checkerboard, 0, 0, 0, 0, j * TILE_SIZE, i * TILE_SIZE, TILE_SIZE, TILE_SIZE); } } pixman_image_set_transform (checkerboard, &trans); pixman_image_set_filter (checkerboard, PIXMAN_FILTER_BEST, NULL, 0); pixman_image_set_repeat (checkerboard, PIXMAN_REPEAT_NONE); pixman_image_composite (PIXMAN_OP_SRC, checkerboard, NULL, destination, 0, 0, 0, 0, 0, 0, WIDTH, HEIGHT); show_image (destination); return 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/demos/clip-in.c0000664000175000017500000000274514712446423015734 0ustar00mattst88mattst88#include #include #include #include "pixman.h" #include "gtk-utils.h" /* This test demonstrates that clipping is done totally different depending * on whether the source is transformed or not. */ int main (int argc, char **argv) { #define WIDTH 200 #define HEIGHT 200 #define SMALL 25 uint32_t *sbits = malloc (SMALL * SMALL * 4); uint32_t *bits = malloc (WIDTH * HEIGHT * 4); pixman_transform_t trans = { { { pixman_double_to_fixed (1.0), pixman_double_to_fixed (0), pixman_double_to_fixed (-0.1), }, { pixman_double_to_fixed (0), pixman_double_to_fixed (1), pixman_double_to_fixed (-0.1), }, { pixman_double_to_fixed (0), pixman_double_to_fixed (0), pixman_double_to_fixed (1.0) } } }; pixman_image_t *src_img = pixman_image_create_bits (PIXMAN_a8r8g8b8, SMALL, SMALL, sbits, 4 * SMALL); pixman_image_t *dest_img = pixman_image_create_bits (PIXMAN_a8r8g8b8, WIDTH, HEIGHT, bits, 4 * WIDTH); memset (bits, 0xff, WIDTH * HEIGHT * 4); memset (sbits, 0x00, SMALL * SMALL * 4); pixman_image_composite (PIXMAN_OP_IN, src_img, NULL, dest_img, 0, 0, 0, 0, SMALL, SMALL, 200, 200); pixman_image_set_transform (src_img, &trans); pixman_image_composite (PIXMAN_OP_IN, src_img, NULL, dest_img, 0, 0, 0, 0, SMALL * 2, SMALL * 2, 200, 200); show_image (dest_img); pixman_image_unref (src_img); pixman_image_unref (dest_img); free (bits); return 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/demos/clip-test.c0000664000175000017500000000541114712446423016276 0ustar00mattst88mattst88#include #include #include "pixman.h" #include "gtk-utils.h" #define WIDTH 200 #define HEIGHT 200 static pixman_image_t * create_solid_bits (uint32_t pixel) { uint32_t *pixels = malloc (WIDTH * HEIGHT * 4); int i; for (i = 0; i < WIDTH * HEIGHT; ++i) pixels[i] = pixel; return pixman_image_create_bits (PIXMAN_a8r8g8b8, WIDTH, HEIGHT, pixels, WIDTH * 4); } int main (int argc, char **argv) { pixman_image_t *gradient_img; pixman_image_t *src_img, *dst_img; pixman_gradient_stop_t stops[2] = { { pixman_int_to_fixed (0), { 0xffff, 0x0000, 0x0000, 0xffff } }, { pixman_int_to_fixed (1), { 0xffff, 0xffff, 0x0000, 0xffff } } }; #if 0 pixman_point_fixed_t p1 = { 0, 0 }; pixman_point_fixed_t p2 = { pixman_int_to_fixed (WIDTH), pixman_int_to_fixed (HEIGHT) }; #endif pixman_point_fixed_t c_inner; pixman_point_fixed_t c_outer; pixman_fixed_t r_inner; pixman_fixed_t r_outer; pixman_region32_t clip_region; pixman_transform_t trans = { { { pixman_double_to_fixed (1.3), pixman_double_to_fixed (0), pixman_double_to_fixed (-0.5), }, { pixman_double_to_fixed (0), pixman_double_to_fixed (1), pixman_double_to_fixed (-0.5), }, { pixman_double_to_fixed (0), pixman_double_to_fixed (0), pixman_double_to_fixed (1.0) } } }; src_img = create_solid_bits (0xff0000ff); c_inner.x = pixman_double_to_fixed (100.0); c_inner.y = pixman_double_to_fixed (100.0); c_outer.x = pixman_double_to_fixed (100.0); c_outer.y = pixman_double_to_fixed (100.0); r_inner = 0; r_outer = pixman_double_to_fixed (100.0); gradient_img = pixman_image_create_radial_gradient (&c_inner, &c_outer, r_inner, r_outer, stops, 2); #if 0 gradient_img = pixman_image_create_linear_gradient (&p1, &p2, stops, 2); #endif pixman_image_composite (PIXMAN_OP_OVER, gradient_img, NULL, src_img, 0, 0, 0, 0, 0, 0, WIDTH, HEIGHT); pixman_region32_init_rect (&clip_region, 50, 0, 100, 200); pixman_image_set_clip_region32 (src_img, &clip_region); pixman_image_set_source_clipping (src_img, TRUE); pixman_image_set_has_client_clip (src_img, TRUE); pixman_image_set_transform (src_img, &trans); pixman_image_set_repeat (src_img, PIXMAN_REPEAT_NORMAL); dst_img = create_solid_bits (0xffff0000); pixman_image_composite (PIXMAN_OP_OVER, src_img, NULL, dst_img, 0, 0, 0, 0, 0, 0, WIDTH, HEIGHT); #if 0 printf ("0, 0: %x\n", src[0]); printf ("10, 10: %x\n", src[10 * 10 + 10]); printf ("w, h: %x\n", src[(HEIGHT - 1) * 100 + (WIDTH - 1)]); #endif show_image (dst_img); pixman_image_unref (gradient_img); pixman_image_unref (src_img); return 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/demos/composite-test.c0000664000175000017500000001153014712446423017350 0ustar00mattst88mattst88#include #include #include #include "pixman.h" #include "gtk-utils.h" #include "parrot.c" #define WIDTH 80 #define HEIGHT 80 typedef struct { const char *name; pixman_op_t op; } operator_t; static const operator_t operators[] = { { "CLEAR", PIXMAN_OP_CLEAR }, { "SRC", PIXMAN_OP_SRC }, { "DST", PIXMAN_OP_DST }, { "OVER", PIXMAN_OP_OVER }, { "OVER_REVERSE", PIXMAN_OP_OVER_REVERSE }, { "IN", PIXMAN_OP_IN }, { "IN_REVERSE", PIXMAN_OP_IN_REVERSE }, { "OUT", PIXMAN_OP_OUT }, { "OUT_REVERSE", PIXMAN_OP_OUT_REVERSE }, { "ATOP", PIXMAN_OP_ATOP }, { "ATOP_REVERSE", PIXMAN_OP_ATOP_REVERSE }, { "XOR", PIXMAN_OP_XOR }, { "ADD", PIXMAN_OP_ADD }, { "SATURATE", PIXMAN_OP_SATURATE }, { "MULTIPLY", PIXMAN_OP_MULTIPLY }, { "SCREEN", PIXMAN_OP_SCREEN }, { "OVERLAY", PIXMAN_OP_OVERLAY }, { "DARKEN", PIXMAN_OP_DARKEN }, { "LIGHTEN", PIXMAN_OP_LIGHTEN }, { "COLOR_DODGE", PIXMAN_OP_COLOR_DODGE }, { "COLOR_BURN", PIXMAN_OP_COLOR_BURN }, { "HARD_LIGHT", PIXMAN_OP_HARD_LIGHT }, { "SOFT_LIGHT", PIXMAN_OP_SOFT_LIGHT }, { "DIFFERENCE", PIXMAN_OP_DIFFERENCE }, { "EXCLUSION", PIXMAN_OP_EXCLUSION }, { "HSL_HUE", PIXMAN_OP_HSL_HUE }, { "HSL_SATURATION", PIXMAN_OP_HSL_SATURATION }, { "HSL_COLOR", PIXMAN_OP_HSL_COLOR }, { "HSL_LUMINOSITY", PIXMAN_OP_HSL_LUMINOSITY }, }; static uint32_t reader (const void *src, int size) { switch (size) { case 1: return *(uint8_t *)src; case 2: return *(uint16_t *)src; case 4: return *(uint32_t *)src; default: g_assert_not_reached(); } } static void writer (void *src, uint32_t value, int size) { switch (size) { case 1: *(uint8_t *)src = value; break; case 2: *(uint16_t *)src = value; break; case 4: *(uint32_t *)src = value; break; default: break; } } int main (int argc, char **argv) { #define d2f pixman_double_to_fixed GtkWidget *window, *swindow; GtkWidget *table; uint32_t *dest = malloc (WIDTH * HEIGHT * 4); uint32_t *src = malloc (WIDTH * HEIGHT * 4); pixman_image_t *gradient, *parrot; pixman_image_t *dest_img; pixman_point_fixed_t p1 = { -10 << 16, 10 << 16 }; pixman_point_fixed_t p2 = { (WIDTH + 10) << 16, (HEIGHT - 10) << 16 }; uint16_t alpha = 0xdddd; pixman_gradient_stop_t stops[6] = { { d2f (0.0), { 0xf2f2, 0x8787, 0x7d7d, alpha } }, { d2f (0.22), { 0xf3f3, 0xeaea, 0x8383, alpha } }, { d2f (0.42), { 0x6b6b, 0xc0c0, 0x7777, alpha } }, { d2f (0.57), { 0x4b4b, 0xc9c9, 0xf5f5, alpha } }, { d2f (0.75), { 0x6a6a, 0x7f7f, 0xbebe, alpha } }, { d2f (1.0), { 0xeded, 0x8282, 0xb0b0, alpha } }, }; int i; gtk_init (&argc, &argv); window = gtk_window_new (GTK_WINDOW_TOPLEVEL); gtk_window_set_default_size (GTK_WINDOW (window), 800, 600); g_signal_connect (window, "delete-event", G_CALLBACK (gtk_main_quit), NULL); table = gtk_table_new (G_N_ELEMENTS (operators) / 6, 6, TRUE); gradient = pixman_image_create_linear_gradient (&p1, &p2, stops, G_N_ELEMENTS (stops)); parrot = pixman_image_create_bits (PIXMAN_a8r8g8b8, WIDTH, HEIGHT, (uint32_t *)parrot_bits, WIDTH * 4); pixman_image_set_repeat (gradient, PIXMAN_REPEAT_PAD); dest_img = pixman_image_create_bits (PIXMAN_a8r8g8b8, WIDTH, HEIGHT, NULL, WIDTH * 4); pixman_image_set_accessors (dest_img, reader, writer); for (i = 0; i < G_N_ELEMENTS (operators); ++i) { GtkWidget *image; GdkPixbuf *pixbuf; GtkWidget *vbox; GtkWidget *label; vbox = gtk_vbox_new (FALSE, 0); label = gtk_label_new (operators[i].name); gtk_box_pack_start (GTK_BOX (vbox), label, FALSE, FALSE, 6); gtk_widget_show (label); pixman_image_composite (PIXMAN_OP_SRC, gradient, NULL, dest_img, 0, 0, 0, 0, 0, 0, WIDTH, HEIGHT); pixman_image_composite (operators[i].op, parrot, NULL, dest_img, 0, 0, 0, 0, 0, 0, WIDTH, HEIGHT); pixbuf = pixbuf_from_argb32 (pixman_image_get_data (dest_img), WIDTH, HEIGHT, WIDTH * 4); image = gtk_image_new_from_pixbuf (pixbuf); gtk_box_pack_start (GTK_BOX (vbox), image, FALSE, FALSE, 0); gtk_widget_show (image); gtk_table_attach_defaults (GTK_TABLE (table), vbox, i % 6, (i % 6) + 1, i / 6, (i / 6) + 1); gtk_widget_show (vbox); g_object_unref (pixbuf); } pixman_image_unref (gradient); free (src); pixman_image_unref (dest_img); free (dest); swindow = gtk_scrolled_window_new (NULL, NULL); gtk_scrolled_window_set_policy (GTK_SCROLLED_WINDOW (swindow), GTK_POLICY_AUTOMATIC, GTK_POLICY_AUTOMATIC); gtk_scrolled_window_add_with_viewport (GTK_SCROLLED_WINDOW (swindow), table); gtk_widget_show (table); gtk_container_add (GTK_CONTAINER (window), swindow); gtk_widget_show (swindow); gtk_widget_show (window); gtk_main (); return 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/demos/conical-test.c0000664000175000017500000000466014712446423016764 0ustar00mattst88mattst88#include "utils.h" #include "gtk-utils.h" #define SIZE 128 #define GRADIENTS_PER_ROW 7 #define NUM_ROWS ((NUM_GRADIENTS + GRADIENTS_PER_ROW - 1) / GRADIENTS_PER_ROW) #define WIDTH (SIZE * GRADIENTS_PER_ROW) #define HEIGHT (SIZE * NUM_ROWS) #define NUM_GRADIENTS 35 #define double_to_color(x) \ (((uint32_t) ((x)*65536)) - (((uint32_t) ((x)*65536)) >> 16)) #define PIXMAN_STOP(offset,r,g,b,a) \ { pixman_double_to_fixed (offset), \ { \ double_to_color (r), \ double_to_color (g), \ double_to_color (b), \ double_to_color (a) \ } \ } static const pixman_gradient_stop_t stops[] = { PIXMAN_STOP (0.25, 1, 0, 0, 0.7), PIXMAN_STOP (0.5, 1, 1, 0, 0.7), PIXMAN_STOP (0.75, 0, 1, 0, 0.7), PIXMAN_STOP (1.0, 0, 0, 1, 0.7) }; #define NUM_STOPS (sizeof (stops) / sizeof (stops[0])) static pixman_image_t * create_conical (int index) { pixman_point_fixed_t c; double angle; c.x = pixman_double_to_fixed (0); c.y = pixman_double_to_fixed (0); angle = (0.5 / NUM_GRADIENTS + index / (double)NUM_GRADIENTS) * 720 - 180; return pixman_image_create_conical_gradient ( &c, pixman_double_to_fixed (angle), stops, NUM_STOPS); } int main (int argc, char **argv) { pixman_transform_t transform; pixman_image_t *src_img, *dest_img; int i; enable_divbyzero_exceptions (); dest_img = pixman_image_create_bits (PIXMAN_a8r8g8b8, WIDTH, HEIGHT, NULL, 0); draw_checkerboard (dest_img, 25, 0xffaaaaaa, 0xff888888); pixman_transform_init_identity (&transform); pixman_transform_translate (NULL, &transform, pixman_double_to_fixed (0.5), pixman_double_to_fixed (0.5)); pixman_transform_scale (NULL, &transform, pixman_double_to_fixed (SIZE), pixman_double_to_fixed (SIZE)); pixman_transform_translate (NULL, &transform, pixman_double_to_fixed (0.5), pixman_double_to_fixed (0.5)); for (i = 0; i < NUM_GRADIENTS; i++) { int column = i % GRADIENTS_PER_ROW; int row = i / GRADIENTS_PER_ROW; src_img = create_conical (i); pixman_image_set_repeat (src_img, PIXMAN_REPEAT_NORMAL); pixman_image_set_transform (src_img, &transform); pixman_image_composite32 ( PIXMAN_OP_OVER, src_img, NULL,dest_img, 0, 0, 0, 0, column * SIZE, row * SIZE, SIZE, SIZE); pixman_image_unref (src_img); } show_image (dest_img); pixman_image_unref (dest_img); return 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/demos/convolution-test.c0000664000175000017500000000223714712446423017731 0ustar00mattst88mattst88#include #include #include "pixman.h" #include "gtk-utils.h" int main (int argc, char **argv) { #define WIDTH 200 #define HEIGHT 200 #define d2f pixman_double_to_fixed uint32_t *src = malloc (WIDTH * HEIGHT * 4); uint32_t *mask = malloc (WIDTH * HEIGHT * 4); uint32_t *dest = malloc (WIDTH * HEIGHT * 4); pixman_fixed_t convolution[] = { d2f (3), d2f (3), d2f (0.5), d2f (0.5), d2f (0.5), d2f (0.5), d2f (0.5), d2f (0.5), d2f (0.5), d2f (0.5), d2f (0.5), }; pixman_image_t *simg, *mimg, *dimg; int i; for (i = 0; i < WIDTH * HEIGHT; ++i) { src[i] = 0x7f007f00; mask[i] = (i % 256) * 0x01000000; dest[i] = 0; } simg = pixman_image_create_bits (PIXMAN_a8r8g8b8, WIDTH, HEIGHT, src, WIDTH * 4); mimg = pixman_image_create_bits (PIXMAN_a8r8g8b8, WIDTH, HEIGHT, mask, WIDTH * 4); dimg = pixman_image_create_bits (PIXMAN_a8r8g8b8, WIDTH, HEIGHT, dest, WIDTH * 4); pixman_image_set_filter (mimg, PIXMAN_FILTER_CONVOLUTION, convolution, 11); pixman_image_composite (PIXMAN_OP_OVER, simg, mimg, dimg, 0, 0, 0, 0, 0, 0, WIDTH, HEIGHT); show_image (dimg); return 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/demos/dither.c0000664000175000017500000001644514712446423015662 0ustar00mattst88mattst88/* * Copyright 2012, Red Hat, Inc. * Copyright 2012, Soren Sandmann * Copyright 2018, Basile Clement * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifdef HAVE_CONFIG_H #include "pixman-config.h" #endif #include #include #include #include "utils.h" #include "gtk-utils.h" #define WIDTH 1024 #define HEIGHT 640 typedef struct { GtkBuilder * builder; pixman_image_t * original; pixman_format_code_t format; pixman_dither_t dither; int width; int height; } app_t; static GtkWidget * get_widget (app_t *app, const char *name) { GtkWidget *widget = GTK_WIDGET (gtk_builder_get_object (app->builder, name)); if (!widget) g_error ("Widget %s not found\n", name); return widget; } typedef struct { char name [20]; int value; } named_int_t; static const named_int_t formats[] = { { "a8r8g8b8", PIXMAN_a8r8g8b8 }, { "rgb", PIXMAN_rgb_float }, { "sRGB", PIXMAN_a8r8g8b8_sRGB }, { "r5g6b5", PIXMAN_r5g6b5 }, { "a4r4g4b4", PIXMAN_a4r4g4b4 }, { "a2r2g2b2", PIXMAN_a2r2g2b2 }, { "r3g3b2", PIXMAN_r3g3b2 }, { "r1g2b1", PIXMAN_r1g2b1 }, { "a1r1g1b1", PIXMAN_a1r1g1b1 }, }; static const named_int_t dithers[] = { { "None", PIXMAN_REPEAT_NONE }, { "Bayer 8x8", PIXMAN_DITHER_ORDERED_BAYER_8 }, { "Blue noise 64x64", PIXMAN_DITHER_ORDERED_BLUE_NOISE_64 }, }; static int get_value (app_t *app, const named_int_t table[], const char *box_name) { GtkComboBox *box = GTK_COMBO_BOX (get_widget (app, box_name)); return table[gtk_combo_box_get_active (box)].value; } static void rescale (GtkWidget *may_be_null, app_t *app) { app->dither = get_value (app, dithers, "dithering_combo_box"); app->format = get_value (app, formats, "target_format_combo_box"); gtk_widget_set_size_request ( get_widget (app, "drawing_area"), app->width + 0.5, app->height + 0.5); gtk_widget_queue_draw ( get_widget (app, "drawing_area")); } static gboolean on_draw (GtkWidget *widget, cairo_t *cr, gpointer user_data) { app_t *app = user_data; GdkRectangle area; cairo_surface_t *surface; pixman_image_t *tmp, *final; uint32_t *pixels; gdk_cairo_get_clip_rectangle(cr, &area); tmp = pixman_image_create_bits ( app->format, area.width, area.height, NULL, 0); pixman_image_set_dither (tmp, app->dither); pixman_image_composite ( PIXMAN_OP_SRC, app->original, NULL, tmp, area.x, area.y, 0, 0, 0, 0, app->width - area.x, app->height - area.y); pixels = calloc (1, area.width * area.height * 4); final = pixman_image_create_bits ( PIXMAN_a8r8g8b8, area.width, area.height, pixels, area.width * 4); pixman_image_composite ( PIXMAN_OP_SRC, tmp, NULL, final, area.x, area.y, 0, 0, 0, 0, app->width - area.x, app->height - area.y); surface = cairo_image_surface_create_for_data ( (uint8_t *)pixels, CAIRO_FORMAT_ARGB32, area.width, area.height, area.width * 4); cairo_set_source_surface (cr, surface, area.x, area.y); cairo_paint (cr); cairo_surface_destroy (surface); free (pixels); pixman_image_unref (final); pixman_image_unref (tmp); return TRUE; } static void set_up_combo_box (app_t *app, const char *box_name, int n_entries, const named_int_t table[]) { GtkWidget *widget = get_widget (app, box_name); GtkListStore *model; GtkCellRenderer *cell; int i; model = gtk_list_store_new (1, G_TYPE_STRING); cell = gtk_cell_renderer_text_new (); gtk_cell_layout_pack_start (GTK_CELL_LAYOUT (widget), cell, TRUE); gtk_cell_layout_set_attributes (GTK_CELL_LAYOUT (widget), cell, "text", 0, NULL); gtk_combo_box_set_model (GTK_COMBO_BOX (widget), GTK_TREE_MODEL (model)); for (i = 0; i < n_entries; ++i) { const named_int_t *info = &(table[i]); GtkTreeIter iter; gtk_list_store_append (model, &iter); gtk_list_store_set (model, &iter, 0, info->name, -1); } gtk_combo_box_set_active (GTK_COMBO_BOX (widget), 0); g_signal_connect (widget, "changed", G_CALLBACK (rescale), app); } static app_t * app_new (pixman_image_t *original) { GtkWidget *widget; app_t *app = g_malloc (sizeof *app); GError *err = NULL; app->builder = gtk_builder_new (); app->original = original; if (original->type == BITS) { app->width = pixman_image_get_width (original); app->height = pixman_image_get_height (original); } else { app->width = WIDTH; app->height = HEIGHT; } if (!gtk_builder_add_from_file (app->builder, "dither.ui", &err)) g_error ("Could not read file dither.ui: %s", err->message); widget = get_widget (app, "drawing_area"); g_signal_connect (widget, "draw", G_CALLBACK (on_draw), app); set_up_combo_box (app, "target_format_combo_box", G_N_ELEMENTS (formats), formats); set_up_combo_box (app, "dithering_combo_box", G_N_ELEMENTS (dithers), dithers); app->dither = get_value (app, dithers, "dithering_combo_box"); app->format = get_value (app, formats, "target_format_combo_box"); rescale (NULL, app); return app; } int main (int argc, char **argv) { GtkWidget *window; pixman_image_t *image; app_t *app; gtk_init (&argc, &argv); if (argc < 2) { pixman_gradient_stop_t stops[] = { /* These colors make it very obvious that dithering * is useful even for 8-bit gradients */ { 0x00000, { 0x1b1b, 0x5d5d, 0x7c7c, 0xffff } }, { 0x10000, { 0x3838, 0x3232, 0x1010, 0xffff } }, }; pixman_point_fixed_t p1, p2; p1.x = p1.y = 0x0000; p2.x = WIDTH << 16; p2.y = HEIGHT << 16; if (!(image = pixman_image_create_linear_gradient ( &p1, &p2, stops, ARRAY_LENGTH (stops)))) { printf ("Could not create gradient\n"); return -1; } } else if (!(image = pixman_image_from_file (argv[1], PIXMAN_a8r8g8b8))) { printf ("Could not load image \"%s\"\n", argv[1]); return -1; } app = app_new (image); window = get_widget (app, "main"); g_signal_connect (window, "delete_event", G_CALLBACK (gtk_main_quit), NULL); gtk_window_set_default_size (GTK_WINDOW (window), 1024, 768); gtk_widget_show_all (window); gtk_main (); return 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/demos/dither.ui0000664000175000017500000001436714712446423016056 0ustar00mattst88mattst88 False True False 12 True True in True False True False True True 0 True False 12 True False True False 2 2 8 6 True False <b>Target format:</b> True 1 True False <b>Dithering:</b> True 1 1 True False 1 True False 1 1 False True 6 1 False True 0 False True 1 -180 190 1 10 10 -32 42 1 10 10 -32 42 1 10 10 12 4 1 1 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/demos/gradient-test.c0000664000175000017500000000505214712446423017145 0ustar00mattst88mattst88#include #include #include "pixman.h" #include "gtk-utils.h" int main (int argc, char **argv) { #define WIDTH 400 #define HEIGHT 200 uint32_t *dest = malloc (WIDTH * HEIGHT * 4); pixman_image_t *src_img; pixman_image_t *dest_img; int i; pixman_gradient_stop_t stops[2] = { { pixman_int_to_fixed (0), { 0x0000, 0x0000, 0xffff, 0xffff } }, { pixman_int_to_fixed (1), { 0xffff, 0x1111, 0x1111, 0xffff } } }; pixman_point_fixed_t p1 = { pixman_double_to_fixed (50), 0 }; pixman_point_fixed_t p2 = { pixman_double_to_fixed (200), 0 }; #if 0 pixman_transform_t trans = { { { pixman_double_to_fixed (2), pixman_double_to_fixed (0.5), pixman_double_to_fixed (-100), }, { pixman_double_to_fixed (0), pixman_double_to_fixed (3), pixman_double_to_fixed (0), }, { pixman_double_to_fixed (0), pixman_double_to_fixed (0.000), pixman_double_to_fixed (1.0) } } }; #else pixman_transform_t trans = { { { pixman_fixed_1, 0, 0 }, { 0, pixman_fixed_1, 0 }, { 0, 0, pixman_fixed_1 } } }; #endif #if 0 pixman_point_fixed_t c_inner; pixman_point_fixed_t c_outer; pixman_fixed_t r_inner; pixman_fixed_t r_outer; #endif for (i = 0; i < WIDTH * HEIGHT; ++i) dest[i] = 0xff00ff00; dest_img = pixman_image_create_bits (PIXMAN_a8r8g8b8, WIDTH, HEIGHT, dest, WIDTH * 4); #if 0 c_inner.x = pixman_double_to_fixed (50.0); c_inner.y = pixman_double_to_fixed (50.0); c_outer.x = pixman_double_to_fixed (50.0); c_outer.y = pixman_double_to_fixed (50.0); r_inner = 0; r_outer = pixman_double_to_fixed (50.0); src_img = pixman_image_create_conical_gradient (&c_inner, r_inner, stops, 2); #endif #if 0 src_img = pixman_image_create_conical_gradient (&c_inner, r_inner, stops, 2); src_img = pixman_image_create_linear_gradient (&c_inner, &c_outer, r_inner, r_outer, stops, 2); #endif src_img = pixman_image_create_linear_gradient (&p1, &p2, stops, 2); pixman_image_set_transform (src_img, &trans); pixman_image_set_repeat (src_img, PIXMAN_REPEAT_NONE); pixman_image_composite (PIXMAN_OP_OVER, src_img, NULL, dest_img, 0, 0, 0, 0, 0, 0, 10 * WIDTH, HEIGHT); printf ("0, 0: %x\n", dest[0]); printf ("10, 10: %x\n", dest[10 * 10 + 10]); printf ("w, h: %x\n", dest[(HEIGHT - 1) * 100 + (WIDTH - 1)]); show_image (dest_img); pixman_image_unref (src_img); pixman_image_unref (dest_img); free (dest); return 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/demos/gtk-utils.c0000664000175000017500000000776014712446423016326 0ustar00mattst88mattst88#include #ifdef HAVE_CONFIG_H #include #endif #include "utils.h" #include "gtk-utils.h" pixman_image_t * pixman_image_from_file (const char *filename, pixman_format_code_t format) { GdkPixbuf *pixbuf; pixman_image_t *image; int width, height; uint32_t *data, *d; uint8_t *gdk_data; int n_channels; int j, i; int stride; if (!(pixbuf = gdk_pixbuf_new_from_file (filename, NULL))) return NULL; image = NULL; width = gdk_pixbuf_get_width (pixbuf); height = gdk_pixbuf_get_height (pixbuf); n_channels = gdk_pixbuf_get_n_channels (pixbuf); gdk_data = gdk_pixbuf_get_pixels (pixbuf); stride = gdk_pixbuf_get_rowstride (pixbuf); if (!(data = malloc (width * height * sizeof (uint32_t)))) goto out; d = data; for (j = 0; j < height; ++j) { uint8_t *gdk_line = gdk_data; for (i = 0; i < width; ++i) { int r, g, b, a; uint32_t pixel; r = gdk_line[0]; g = gdk_line[1]; b = gdk_line[2]; if (n_channels == 4) a = gdk_line[3]; else a = 0xff; r = (r * a + 127) / 255; g = (g * a + 127) / 255; b = (b * a + 127) / 255; pixel = (a << 24) | (r << 16) | (g << 8) | b; *d++ = pixel; gdk_line += n_channels; } gdk_data += stride; } image = pixman_image_create_bits ( format, width, height, data, width * 4); out: g_object_unref (pixbuf); return image; } GdkPixbuf * pixbuf_from_argb32 (uint32_t *bits, int width, int height, int stride) { GdkPixbuf *pixbuf = gdk_pixbuf_new (GDK_COLORSPACE_RGB, TRUE, 8, width, height); int p_stride = gdk_pixbuf_get_rowstride (pixbuf); guint32 *p_bits = (guint32 *)gdk_pixbuf_get_pixels (pixbuf); int i; for (i = 0; i < height; ++i) { uint32_t *src_row = &bits[i * (stride / 4)]; uint32_t *dst_row = p_bits + i * (p_stride / 4); a8r8g8b8_to_rgba_np (dst_row, src_row, width); } return pixbuf; } static gboolean on_draw (GtkWidget *widget, cairo_t *cr, gpointer user_data) { pixman_image_t *pimage = user_data; int width = pixman_image_get_width (pimage); int height = pixman_image_get_height (pimage); int stride = pixman_image_get_stride (pimage); cairo_surface_t *cimage; cairo_format_t format; if (pixman_image_get_format (pimage) == PIXMAN_x8r8g8b8) format = CAIRO_FORMAT_RGB24; else format = CAIRO_FORMAT_ARGB32; cimage = cairo_image_surface_create_for_data ( (uint8_t *)pixman_image_get_data (pimage), format, width, height, stride); cairo_rectangle (cr, 0, 0, width, height); cairo_set_source_surface (cr, cimage, 0, 0); cairo_fill (cr); cairo_surface_destroy (cimage); return TRUE; } void show_image (pixman_image_t *image) { GtkWidget *window; int width, height; int argc; char **argv; char *arg0 = g_strdup ("pixman-test-program"); pixman_format_code_t format; pixman_image_t *copy; argc = 1; argv = (char **)&arg0; gtk_init (&argc, &argv); window = gtk_window_new (GTK_WINDOW_TOPLEVEL); width = pixman_image_get_width (image); height = pixman_image_get_height (image); gtk_window_set_default_size (GTK_WINDOW (window), width, height); format = pixman_image_get_format (image); /* We always display the image as if it contains sRGB data. That * means that no conversion should take place when the image * has the a8r8g8b8_sRGB format. */ switch (format) { case PIXMAN_a8r8g8b8_sRGB: case PIXMAN_a8r8g8b8: case PIXMAN_x8r8g8b8: copy = pixman_image_ref (image); break; default: copy = pixman_image_create_bits (PIXMAN_a8r8g8b8, width, height, NULL, -1); pixman_image_composite32 (PIXMAN_OP_SRC, image, NULL, copy, 0, 0, 0, 0, 0, 0, width, height); break; } g_signal_connect (window, "draw", G_CALLBACK (on_draw), copy); g_signal_connect (window, "delete_event", G_CALLBACK (gtk_main_quit), NULL); gtk_widget_show (window); gtk_main (); } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/demos/gtk-utils.h0000664000175000017500000000063314712446423016323 0ustar00mattst88mattst88#include #include #include #include #include "pixman.h" void show_image (pixman_image_t *image); pixman_image_t * pixman_image_from_file (const char *filename, pixman_format_code_t format); GdkPixbuf *pixbuf_from_argb32 (uint32_t *bits, int width, int height, int stride); ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/demos/linear-gradient.c0000664000175000017500000000216014712446423017435 0ustar00mattst88mattst88#include "utils.h" #include "gtk-utils.h" #define WIDTH 1024 #define HEIGHT 640 int main (int argc, char **argv) { pixman_image_t *src_img, *dest_img; pixman_gradient_stop_t stops[] = { { 0x00000, { 0x0000, 0x0000, 0x4444, 0xdddd } }, { 0x10000, { 0xeeee, 0xeeee, 0x8888, 0xdddd } }, #if 0 /* These colors make it very obvious that dithering * is useful even for 8-bit gradients */ { 0x00000, { 0x6666, 0x3333, 0x3333, 0xffff } }, { 0x10000, { 0x3333, 0x6666, 0x6666, 0xffff } }, #endif }; pixman_point_fixed_t p1, p2; enable_divbyzero_exceptions (); dest_img = pixman_image_create_bits (PIXMAN_x8r8g8b8, WIDTH, HEIGHT, NULL, 0); p1.x = p1.y = 0x0000; p2.x = WIDTH << 16; p2.y = HEIGHT << 16; src_img = pixman_image_create_linear_gradient (&p1, &p2, stops, ARRAY_LENGTH (stops)); pixman_image_composite32 (PIXMAN_OP_OVER, src_img, NULL, dest_img, 0, 0, 0, 0, 0, 0, WIDTH, HEIGHT); show_image (dest_img); pixman_image_unref (dest_img); return 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/demos/meson.build0000664000175000017500000000376314712446423016400 0ustar00mattst88mattst88# Copyright Âİ 2018 Intel Corporation # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. extra_demo_cflags = [] if cc.get_argument_syntax() == 'msvc' extra_demo_cflags = ['-D_USE_MATH_DEFINES'] endif demos = [ 'gradient-test', 'alpha-test', 'composite-test', 'clip-test', 'trap-test', 'screen-test', 'convolution-test', 'radial-test', 'linear-gradient', 'conical-test', 'tri-test', 'checkerboard', 'srgb-test', 'srgb-trap-test', 'scale', 'dither', ] if dep_gtk.found() libdemo = static_library( 'demo', ['gtk-utils.c', config_h, version_h], dependencies : [libtestutils_dep, dep_gtk, dep_glib, dep_png, dep_m, dep_openmp], include_directories : inc_pixman, ) if dep_gtk.found() foreach d : demos executable( d, [d + '.c', config_h, version_h], c_args : extra_demo_cflags, link_with : [libdemo], dependencies : [idep_pixman, libtestutils_dep, dep_glib, dep_gtk, dep_openmp, dep_png], ) endforeach endif endif ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/demos/parrot.c0000664000175000017500000023672514712446423015717 0ustar00mattst88mattst88/* This parrot is a finger painting by Rubens LP: * * http://www.flickr.com/photos/dorubens/4030604504/in/set-72157622586088192/ * * Used here under Creative Commons Attribution. The artist's web site: * * http://www.rubenslp.com.br/ * */ static const uint32_t parrot_bits[] = { 0x716f7070, 0x1c1b1b1b, 0x110f1010, 0x16151415, 0x14121313, 0x2c292b2b, 0x403e3f3f, 0x19181818, 0x06050605, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x02010101, 0x08070707, 0x05040404, 0x0a060908, 0x27262426, 0xb3b0b1b2, 0x99979897, 0x2b2a2929, 0x100f0f0f, 0x0f0d0e0e, 0x0e0e0c0d, 0x0d0d0b0c, 0x12111111, 0x10100e0f, 0x0e0e0c0d, 0x0e0e0c0d, 0x12101211, 0x13121212, 0x17151516, 0x100f0e0f, 0x15141414, 0x423f4042, 0x3b393a3a, 0x13121212, 0x16151515, 0x2b282b29, 0x13121112, 0x100f0f0f, 0x0f0d0f0e, 0x08070807, 0x0d0c0c0c, 0x0a090a09, 0x0e0e0c0d, 0x0c0c0a0b, 0x10100f0f, 0x0f0e0e0e, 0x07060706, 0x0d0c0d0c, 0x0e0d0e0d, 0x05040504, 0x08070807, 0x0c0b0c0b, 0x0d0c0d0c, 0x05040504, 0x110f1110, 0x08070707, 0x04030303, 0x09080808, 0x06050605, 0x01000000, 0x08070707, 0x06050505, 0x05040504, 0x100e100f, 0x0b0a0b0a, 0x01000100, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x04030403, 0x03020302, 0x0b0a0b0a, 0x14131313, 0x0e0d0d0d, 0x0e0d0e0d, 0x231f2222, 0x4d4b4b4d, 0xa7a5a6a6, 0x5b595a5a, 0x07060606, 0x00000000, 0x00000000, 0x01000100, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x02010201, 0x05040404, 0x07050706, 0x04020303, 0x403e3e3f, 0xb6b3b5b5, 0x84828283, 0x1a191819, 0x0e0d0d0d, 0x0d0c0b0c, 0x0f0f0d0e, 0x0e0d0d0d, 0x0f0f0e0e, 0x0e0e0c0d, 0x0c0b0a0b, 0x0b0a090a, 0x11111010, 0x100f0f0f, 0x100f0f0f, 0x1b19191a, 0x1f1e1e1e, 0x46434544, 0x3a37383a, 0x1c1b1a1b, 0x1e1d1d1d, 0x29272828, 0x19171818, 0x0e0d0d0d, 0x0f0e0e0e, 0x06050505, 0x0c0b0b0b, 0x100e100f, 0x09080908, 0x0c0c0a0b, 0x0f0f0e0e, 0x0c0c0a0b, 0x05040404, 0x08070807, 0x0c0b0c0b, 0x05040504, 0x06050605, 0x100e100f, 0x09080908, 0x09080908, 0x12101211, 0x09080908, 0x03020202, 0x08070707, 0x01000100, 0x04030403, 0x07060606, 0x08070707, 0x08070707, 0x0f0e0f0e, 0x0b0a0b0a, 0x03020302, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x03020202, 0x09080908, 0x05040504, 0x00000000, 0x00000000, 0x0b0a0b0a, 0x0f0e0f0e, 0x1a191a19, 0x77757576, 0xc3c1c2c2, 0x75737374, 0x1f1e1e1f, 0x06050505, 0x07030605, 0x00000000, 0x03020302, 0x00000000, 0x00000000, 0x00000000, 0x03020202, 0x04030403, 0x04030303, 0x02010101, 0x5b5a5959, 0xafacaeae, 0x5a575859, 0x1b19191a, 0x0e0d0d0d, 0x100e100f, 0x100f0f0f, 0x11101010, 0x12121111, 0x0c0c0a0b, 0x09090708, 0x0d0d0b0c, 0x0f0e0d0e, 0x0d0d0b0c, 0x14131313, 0x1c1b1b1b, 0x322f3132, 0x514f504f, 0x2d2b2b2b, 0x2e2b2c2e, 0x21202020, 0x201f1f1f, 0x15141414, 0x12101211, 0x0e0d0d0d, 0x08070807, 0x0b0a0a0a, 0x100e0f0f, 0x07060706, 0x0a090a09, 0x0f0f0d0e, 0x0c0c0a0b, 0x09090708, 0x0d0c0c0c, 0x0b0a0b0a, 0x06050605, 0x0b0a0b0a, 0x0c0b0c0b, 0x08070807, 0x07060706, 0x0f0e0f0e, 0x0a090a09, 0x01000000, 0x05040504, 0x03020202, 0x01000000, 0x08070707, 0x05040504, 0x09080908, 0x0d0c0d0c, 0x07060606, 0x04030403, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x02010201, 0x06050505, 0x08070807, 0x01000100, 0x00000000, 0x00000000, 0x0f0d0e0e, 0x2c2a2a2b, 0x9c9a9b9b, 0xcac7cac9, 0x4d4c4b4d, 0x1b19181a, 0x0a090909, 0x00000000, 0x02010201, 0x05040504, 0x00000000, 0x00000000, 0x00000000, 0x02010101, 0x06040605, 0x03020302, 0x100d0e10, 0x615f5f60, 0x9d9a9c9c, 0x32313131, 0x14131313, 0x0d0c0c0c, 0x0f0e0e0e, 0x0e0d0d0d, 0x100f0f0f, 0x11101010, 0x08070807, 0x06050405, 0x0f0e0e0e, 0x100e0f0f, 0x0d0d0c0c, 0x2a282929, 0x3d3c3b3c, 0x38373637, 0x4f4d4d4f, 0x19181718, 0x27262626, 0x14131313, 0x29272828, 0x2e2b2c2d, 0x201d1e20, 0x15121414, 0x04030403, 0x07060606, 0x0c0b0c0b, 0x0a090a09, 0x08070707, 0x0e0d0e0d, 0x0b0a0a0a, 0x07060606, 0x0c0b0b0b, 0x0a090909, 0x04030403, 0x0a090909, 0x0e0d0e0d, 0x0a090a09, 0x09080908, 0x0e0d0e0d, 0x07060706, 0x08070807, 0x08070807, 0x00000000, 0x01000000, 0x07060606, 0x04030403, 0x08070807, 0x0e0d0e0d, 0x07060706, 0x06050605, 0x01000100, 0x00000000, 0x00000000, 0x00000000, 0x02010201, 0x07060706, 0x01000100, 0x00000000, 0x00000000, 0x01000100, 0x01000100, 0x322e3131, 0xa9a8a8a8, 0xb9b8b8b8, 0x39383639, 0x1d1b1b1c, 0x0c0b0b0b, 0x04030303, 0x05040404, 0x07060706, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x06030605, 0x09060807, 0x0e0d0c0d, 0x605e5e5f, 0x99959898, 0x2e2c2c2e, 0x13121212, 0x0d0d0b0c, 0x11111010, 0x12121111, 0x13131212, 0x0f0e0e0e, 0x09080908, 0x03020302, 0x0c0b0b0b, 0x0e0d0d0d, 0x11101010, 0x38363737, 0x514f4f50, 0x34333134, 0x46434546, 0x24222124, 0x29262827, 0x04030303, 0x05040404, 0x14131313, 0x15151414, 0x100f100f, 0x07060706, 0x07060606, 0x0d0c0d0c, 0x0a090909, 0x07060706, 0x0f0e0f0e, 0x0c0b0c0b, 0x01000100, 0x0c0b0c0b, 0x0a090a09, 0x01000100, 0x08070707, 0x12101211, 0x0b0a0b0a, 0x06050605, 0x0f0e0f0e, 0x07060706, 0x04030403, 0x06050605, 0x02010201, 0x00000000, 0x05040504, 0x03020302, 0x06050605, 0x0d0c0d0c, 0x08070707, 0x07060706, 0x100e100f, 0x05040504, 0x01000100, 0x00000000, 0x02010201, 0x07060606, 0x03020202, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x06050605, 0xb8b6b6b7, 0xa4a2a3a3, 0x2c2b2b2b, 0x1c191a1b, 0x0e0c0d0d, 0x08070707, 0x35323433, 0x1a191919, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x04020403, 0x1b19191a, 0x68666667, 0x7d7a7c7c, 0x23212023, 0x1c191a1c, 0x0f0e0d0e, 0x11111010, 0x11111010, 0x10100f0f, 0x0c0b0b0b, 0x09080908, 0x04030403, 0x0e0d0e0d, 0x0e0d0d0d, 0x18171717, 0x52505150, 0x5d5a5b5c, 0x3b3a383a, 0x3d3a3b3d, 0x24212224, 0x29252729, 0x06050505, 0x04030303, 0x04030403, 0x06050505, 0x0c0b0b0b, 0x09080908, 0x04030303, 0x0d0c0c0c, 0x06050605, 0x05040504, 0x0c0b0b0b, 0x08070807, 0x06050605, 0x09080908, 0x0c0b0c0b, 0x05040504, 0x0a090909, 0x0e0d0e0d, 0x0a090a09, 0x09080908, 0x0f0e0e0e, 0x09080908, 0x04030403, 0x09080908, 0x02010201, 0x00000000, 0x07060706, 0x05040504, 0x07060606, 0x0f0d0f0e, 0x06050605, 0x08070807, 0x11101010, 0x0c0b0b0b, 0x11101010, 0x09080808, 0x03020302, 0x05040404, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x7c7a7b7b, 0x1d1c1c1d, 0x100f0f0f, 0x110e1010, 0x07070607, 0x5b585a59, 0x3e3b3d3c, 0x05040404, 0x00000000, 0x01010001, 0x00000000, 0x00000000, 0x02010101, 0x1d1b1b1c, 0x615f6060, 0x54525253, 0x1e1c1b1e, 0x18151617, 0x1b18181b, 0x1a171819, 0x1b181a1a, 0x1b18191a, 0x100e0f0f, 0x03020302, 0x07060606, 0x0d0c0c0c, 0x120f1111, 0x27252626, 0x73727272, 0x706e6f6f, 0x524f5152, 0x2f2c2d2f, 0x1e1c1d1d, 0x1f1c1e1e, 0x09070808, 0x03020202, 0x04030303, 0x03020302, 0x0b0a0a0a, 0x08070807, 0x02010101, 0x0b0a0b0a, 0x0b0a0b0a, 0x04030303, 0x0d0c0d0c, 0x09080808, 0x05040504, 0x0b0a0b0a, 0x08070807, 0x02010201, 0x0c0b0c0b, 0x0c0b0b0b, 0x0a090a09, 0x08070807, 0x100e100f, 0x06050605, 0x04030403, 0x07060706, 0x02010201, 0x00000000, 0x06050605, 0x03020302, 0x09080908, 0x0d0c0d0c, 0x0d0c0c0c, 0x0a090909, 0x0d0c0c0c, 0x15131314, 0x1b19191a, 0x1d1b1b1c, 0x11101010, 0x02010201, 0x01000100, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x02010201, 0x1c1b1b1b, 0x100f0f0f, 0x07060606, 0x0b0b090b, 0x8f898e8c, 0x37353636, 0x01010001, 0x00000000, 0x00000000, 0x01000000, 0x00000000, 0x00000000, 0x14121313, 0x57545557, 0x43414142, 0x211e1f21, 0x18161517, 0x18161518, 0x1f1d1c1f, 0x1b191a1a, 0x13111212, 0x0c0b0b0b, 0x02010101, 0x08070807, 0x0f0e0e0e, 0x14121313, 0x403e3e40, 0x8b888a8a, 0x68666767, 0x4c4a4a4c, 0x28252628, 0x23202123, 0x16141615, 0x03020202, 0x06050605, 0x04030403, 0x04030403, 0x0d0c0c0c, 0x0c0b0c0b, 0x00000000, 0x0a090a09, 0x0b0a0b0a, 0x03020302, 0x09080908, 0x0c0b0b0b, 0x04030403, 0x0c0b0c0b, 0x0b0a0b0a, 0x01000100, 0x09080908, 0x0f0e0e0e, 0x09080908, 0x0c0b0b0b, 0x0b0a0909, 0x03020202, 0x06050605, 0x08070707, 0x04030303, 0x00000000, 0x06050605, 0x02010201, 0x0c0b0c0b, 0x0f0d0e0e, 0x05040504, 0x0c0c0a0b, 0x100f0f0f, 0x0f0f0d0e, 0x14121313, 0x18161717, 0x100e0f0f, 0x02010101, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x0b0a0a0a, 0x0d0c0c0c, 0x08070707, 0x201f1f1f, 0xa29ca19f, 0x27262626, 0x00000000, 0x09080808, 0x05040404, 0x02010101, 0x03010302, 0x03020302, 0x1b19191a, 0x35323335, 0x39373738, 0x1c1a1a1c, 0x19161718, 0x1c1a191c, 0x211f1f21, 0x201d1d20, 0x14121313, 0x09080808, 0x04030403, 0x09080908, 0x0b0a0a0a, 0x1d1b1b1c, 0x4f4c4d4e, 0x87838685, 0x4a494749, 0x32303031, 0x1b1a1a1a, 0x1a191919, 0x13121312, 0x03020302, 0x09080908, 0x0d0c0d0c, 0x0d0c0d0c, 0x100e0f0f, 0x09080908, 0x01000000, 0x09080808, 0x09080808, 0x02010201, 0x0a090a09, 0x09080908, 0x04030403, 0x0c0b0c0b, 0x07060706, 0x00000000, 0x08070807, 0x0a090909, 0x09080808, 0x08070707, 0x0c0b090a, 0x03020000, 0x04030101, 0x06050405, 0x03020202, 0x00000000, 0x05040504, 0x04030403, 0x07060606, 0x100e100f, 0x07060706, 0x09080808, 0x11111010, 0x0d0d0b0c, 0x0e0e0c0d, 0x100f0f0f, 0x0f0e0f0e, 0x03020302, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x08070707, 0x10100f0f, 0x0c0a0b0b, 0x41403f40, 0x89858887, 0x1e1c1d1d, 0x00000000, 0x110f1010, 0x37353636, 0x1f1e1e1e, 0x05040404, 0x06050505, 0x201d1f1f, 0x322f3031, 0x33303132, 0x18151618, 0x19161719, 0x1f1d1c1f, 0x1c1a1a1b, 0x1c1a1a1b, 0x17151516, 0x09080808, 0x04030403, 0x09080908, 0x0f0d0e0e, 0x1a191819, 0x66646465, 0x77747676, 0x2b292a29, 0x07050706, 0x07060606, 0x27242726, 0x25232424, 0x08070707, 0x09080808, 0x11101010, 0x12111111, 0x15131414, 0x0b0a0b0a, 0x04030403, 0x04030203, 0x06050304, 0x01000000, 0x06050505, 0x07060606, 0x07060606, 0x09080808, 0x09080808, 0x02010101, 0x07060606, 0x100e0f0f, 0x09080808, 0x0c0b090a, 0x0a090708, 0x05040203, 0x03020000, 0x05040203, 0x04030102, 0x03020000, 0x05040304, 0x03020202, 0x09080808, 0x0a090a09, 0x08070807, 0x0a090a09, 0x110f1110, 0x0d0d0b0c, 0x0e0e0c0d, 0x11110f10, 0x0c0b0b0b, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x04030303, 0x0d0c0c0c, 0x0a090909, 0x504d4e4f, 0x78757776, 0x1a191919, 0x00000000, 0x16151515, 0x31303030, 0x312f3030, 0x100f0f0f, 0x0b0a0b0a, 0x14121313, 0x2c292a2c, 0x26232426, 0x19171619, 0x1b171a1b, 0x1f1c1d1f, 0x201d1d20, 0x0d0c0c0c, 0x0a090909, 0x06050505, 0x05040504, 0x0c0b0b0b, 0x0b0a0a0a, 0x22212121, 0x817d7f7e, 0x59575857, 0x17161616, 0x04030303, 0x01000000, 0x07060706, 0x0c0b0b0b, 0x02010201, 0x04030403, 0x0b0a0a0a, 0x201e1e1f, 0x17151616, 0x0d0c0c0c, 0x02010201, 0x04030303, 0x04030102, 0x04030101, 0x05040202, 0x06050304, 0x05040203, 0x09080707, 0x05040303, 0x03020102, 0x07060405, 0x09080607, 0x09080506, 0x0a090708, 0x0c0b090a, 0x06050303, 0x04030101, 0x06050302, 0x03020000, 0x03020001, 0x04030102, 0x05040202, 0x06050304, 0x0a090808, 0x07060505, 0x08070707, 0x0a090909, 0x0a090909, 0x09080808, 0x12111111, 0x09080808, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x03020202, 0x03020302, 0x09080908, 0x0a090a09, 0x110f1110, 0x66646465, 0x100e0f0f, 0x05030504, 0x1f1d1e1f, 0x25242424, 0x22212121, 0x0f0e0e0e, 0x17151616, 0x0f0e0e0e, 0x1e1d1d1d, 0x211e1e21, 0x1a18171a, 0x17151417, 0x1d1a1a1d, 0x201d1e20, 0x19161719, 0x00000000, 0x00000000, 0x01000100, 0x15121414, 0x16141415, 0x32303031, 0x78747776, 0x2f2e2d2d, 0x09080808, 0x06030605, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x09080808, 0x18171617, 0x11101010, 0x02010201, 0x01000000, 0x05040303, 0x03020000, 0x05040202, 0x06050203, 0x04030102, 0x06050304, 0x04030001, 0x03020001, 0x09090605, 0x09090607, 0x09080608, 0x16131216, 0x0a0a0709, 0x06050305, 0x05040204, 0x07060406, 0x03020002, 0x03020001, 0x04030102, 0x02010000, 0x06050304, 0x06050304, 0x03020001, 0x09080607, 0x100f0c0d, 0x0e0d0a0b, 0x0d0c090a, 0x0d0c0b0b, 0x05040303, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x03020302, 0x0b0a0a0a, 0x09080908, 0x0e0d0e0d, 0x0f0e0f0e, 0x0f0c0d0e, 0x07060606, 0x18171717, 0x27242526, 0x1d1b1c1c, 0x0b0a0a0a, 0x16141515, 0x0d0c0c0c, 0x17161617, 0x1a18191a, 0x1a17181a, 0x18151618, 0x1e1b1d1d, 0x201e1e1f, 0x17141516, 0x0e0b0c0e, 0x00000000, 0x00000000, 0x03020202, 0x15121314, 0x2f2c2d2e, 0x58565657, 0x18161717, 0x06030505, 0x01000100, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x08070707, 0x0c0b0b0b, 0x15131314, 0x07060606, 0x01000000, 0x03020001, 0x03020000, 0x05040202, 0x09080607, 0x05040102, 0x0b0a0809, 0x0e0c0b0c, 0x18121110, 0x271b1d19, 0x39262a25, 0x4f363b33, 0x6042483f, 0x3f2a2a1f, 0x36272314, 0x3a2c2615, 0x3a2e2717, 0x382e2617, 0x3c322b1c, 0x362e271b, 0x29221c12, 0x28231e17, 0x1815120f, 0x0d0b0909, 0x0b0a0808, 0x0c0c090b, 0x100f0c0d, 0x11100d0e, 0x100f0c0d, 0x03020001, 0x02010000, 0x03020000, 0x01000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x05040504, 0x03020302, 0x00000000, 0x0a090909, 0x0a090909, 0x08070707, 0x0c0b0c0b, 0x09080908, 0x09080908, 0x0a090909, 0x1b191a1a, 0x211e1f20, 0x14111214, 0x2c292a2b, 0x19171619, 0x1e1c1c1e, 0x1d1b1b1d, 0x14131213, 0x0d0c0c0c, 0x13121212, 0x13121212, 0x0b0a0b0a, 0x00000000, 0x00000000, 0x00000000, 0x04030403, 0x17151516, 0x1a191919, 0x2d2b2c2c, 0x04030303, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x03020302, 0x100e0f0f, 0x1c191b1b, 0x0f0e0f0e, 0x03020202, 0x00000000, 0x07050404, 0x09060507, 0x0a080606, 0x05050202, 0x09060607, 0x32272a2f, 0x5e43535c, 0x8f617882, 0xd18b9f91, 0xffacbb99, 0xffafb687, 0xffb3b37a, 0xffb6b071, 0xffb8ae69, 0xffb8ab61, 0xffbaaa5c, 0xffbca955, 0xffbea854, 0xffbfa958, 0xf9bba553, 0xefb29e4c, 0xe7ae9d56, 0xb88e7f49, 0xa27d7046, 0x7c5d5436, 0x4c3c3629, 0x1f1c1a1a, 0x12120f12, 0x08060404, 0x03010000, 0x03020000, 0x03020000, 0x03020000, 0x01000000, 0x00000000, 0x00000000, 0x02010201, 0x06050605, 0x02010201, 0x07060706, 0x0a090a09, 0x0b0a0a0a, 0x17141616, 0x0a090a09, 0x08070807, 0x05040504, 0x0e0d0d0d, 0x11101010, 0x0d0b0b0d, 0x4b49494a, 0x4f4c4e4e, 0x100e0f0f, 0x0f0e0e0e, 0x08070707, 0x0b0a0a0a, 0x0a090a09, 0x0d0c0d0c, 0x07060706, 0x00000000, 0x00000000, 0x00000000, 0x03020302, 0x0e0d0e0d, 0x1a181819, 0x0e0d0d0d, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x02010201, 0x01000000, 0x12111111, 0x14131313, 0x13121212, 0x03020102, 0x02010000, 0x02010000, 0x0e0c090b, 0x03030000, 0x0d0a090a, 0x4330393e, 0x9269808b, 0xd193b0b5, 0xf7a9c1ab, 0xffa8bea4, 0xffa5bda7, 0xffa9b998, 0xffafb27d, 0xffb6ae6c, 0xffb6ac65, 0xffb8aa5c, 0xffbda650, 0xffbda54d, 0xffbea54c, 0xffbfa54c, 0xffbea54c, 0xffbfa44b, 0xffbfa64c, 0xffbfa54c, 0xffbca34b, 0xffb09a46, 0xffa48f40, 0xeb988541, 0xb67d7040, 0x59463f2d, 0x07070506, 0x03030002, 0x04030002, 0x03020000, 0x03020000, 0x02010000, 0x01000000, 0x06050605, 0x08070707, 0x01000100, 0x06050605, 0x0a090a09, 0x07060706, 0x0e0c0d0d, 0x120f1111, 0x06050605, 0x0b0a0b0a, 0x05040404, 0x02020102, 0x12111111, 0x5b595a5a, 0x48464747, 0x06050505, 0x03020202, 0x02010201, 0x01000101, 0x00000000, 0x00000000, 0x01000100, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x05040504, 0x04030403, 0x0c0a0a0b, 0x06050505, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x100f0f0f, 0x22201f21, 0x14131112, 0x08080305, 0x02010000, 0x02010000, 0x00000000, 0x06040404, 0x62455259, 0xbf86a4b0, 0xf4abcccb, 0xfdadbd9b, 0xffadb891, 0xffabbb9b, 0xffacb686, 0xffb6ab66, 0xffbaa958, 0xffb9a85c, 0xffb6ac67, 0xffbaa95c, 0xffbba752, 0xffbda64e, 0xffc0a54d, 0xffc0a44c, 0xffbea64d, 0xffbfa64c, 0xffbea64c, 0xffbfa64c, 0xfebfa54a, 0xffbea54b, 0xffbba249, 0xffb39b45, 0xff9b8739, 0xff8f7b32, 0xf8907e37, 0xac756b43, 0x1c1a1711, 0x02020001, 0x02000000, 0x02010000, 0x03020000, 0x05040303, 0x07060606, 0x01000100, 0x07060706, 0x09080908, 0x07060706, 0x0d0c0d0c, 0x0b0a0a0a, 0x0f0c0d0e, 0x07060706, 0x04030403, 0x00000000, 0x0a090909, 0x33323232, 0x211f2021, 0x02020101, 0x00000000, 0x00000000, 0x00000000, 0x01000100, 0x03010202, 0x05040404, 0x03010202, 0x05030403, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x02010101, 0x03020001, 0x08070506, 0x12100e10, 0x0a080606, 0x06040203, 0x0b070707, 0x241b1c1f, 0x7752656c, 0xe9a1c9d6, 0xffb7ddea, 0xfcb7cbb7, 0xf9abb890, 0xfeb1b98a, 0xfeb4b175, 0xffb8ad69, 0xffb7ab65, 0xffb2af70, 0xffafb484, 0xffb1b380, 0xffb8aa5c, 0xffbea54e, 0xffbfa64b, 0xffc0a64d, 0xffc0a64b, 0xffbfa64b, 0xffc0a54d, 0xffbfa54a, 0xffbca34a, 0xffbba149, 0xffbba149, 0xffbaa04a, 0xffb59c44, 0xffb09845, 0xffab9543, 0xfe9d883a, 0xfe8d7931, 0xff88772f, 0xde837744, 0x4c3b372d, 0x06050303, 0x03020001, 0x03020000, 0x05040203, 0x03020202, 0x01000100, 0x07060706, 0x06050605, 0x0e0d0e0d, 0x0d0c0d0c, 0x04030403, 0x08070807, 0x03020202, 0x00000000, 0x00000000, 0x08070707, 0x06060505, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x06050505, 0x05040404, 0x04030302, 0x0a09080a, 0x03020302, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x01000000, 0x02010201, 0x04030303, 0x03020101, 0x02010000, 0x03020000, 0x04030002, 0x1c141617, 0x452d383c, 0x8858707b, 0xe89fc8d4, 0xffb6dadf, 0xffbbd3c8, 0xfebac5a4, 0xffbab885, 0xffb9ba89, 0xf7b0b68a, 0xfeb1b88a, 0xffacb484, 0xffaeb484, 0xffafb382, 0xffb7ab62, 0xffbfa547, 0xffc0a44b, 0xffbea64b, 0xffbca248, 0xffb9a049, 0xffb8a04a, 0xffb69f4d, 0xffb39c49, 0xffad9644, 0xffa89242, 0xffa58f3f, 0xffa18b3b, 0xffa08b39, 0xffa5903e, 0xffa38c3b, 0xff958035, 0xff907c34, 0xfe887531, 0xfe7e6c29, 0xff7c6a29, 0xe6837540, 0x5a4b473d, 0x08070506, 0x03020001, 0x04030102, 0x0a090909, 0x09080908, 0x04030403, 0x0e0d0e0d, 0x0e0d0e0d, 0x04030403, 0x04030403, 0x04030303, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x03010201, 0x01000100, 0x01000100, 0x05030403, 0x02010101, 0x01000100, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x03020302, 0x08070707, 0x06050304, 0x05040202, 0x02010000, 0x02010000, 0x5d3f4d51, 0xc37da2af, 0xeb8bbed2, 0xffacd1d2, 0xffb6d2cc, 0xffb9d5d0, 0xfeb7cab6, 0xffbac39f, 0xffb9c4a1, 0xfeb6c19a, 0xffb5b785, 0xffb5b279, 0xfeb1af76, 0xffb1b179, 0xffbaa859, 0xffbea64d, 0xffc0a74e, 0xffb9a248, 0xffa48d3a, 0xffab9752, 0xffb1a268, 0xffbeb282, 0xffcdc4a0, 0xffd2caa8, 0xffbfb79a, 0xffb9b197, 0xff938b6e, 0xffcbc3a7, 0xff9b9171, 0xff998b60, 0xff897737, 0xff83702a, 0xff82702b, 0xff7e6c2b, 0xff7a6827, 0xff766525, 0xff7a6828, 0xc9887c57, 0x17161211, 0x06050304, 0x05040203, 0x05040404, 0x00000000, 0x0b0a0b0a, 0x0a090a09, 0x07060706, 0x07060706, 0x02010101, 0x00000000, 0x00000000, 0x00000000, 0x01000100, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x03020202, 0x09080808, 0x01000100, 0x00000000, 0x00000000, 0x00000000, 0x02010201, 0x03020302, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x05050404, 0x0d0c0c0c, 0x08070507, 0x06050204, 0x07060305, 0x03030000, 0x38263032, 0xd482acb7, 0xff96c5c8, 0xff9ac1bf, 0xff9acbd5, 0xfe9cd2e5, 0xffb1d5d8, 0xffaec7b4, 0xffb1ba90, 0xffb6af72, 0xffb9aa5c, 0xffb7ac64, 0xffb1b07c, 0xffaeb27d, 0xffb0b178, 0xffa8b68e, 0xffa5c0aa, 0xffb29e4b, 0xffa48f3f, 0xffdfd7b7, 0xfff9f7f1, 0xfffaf8f4, 0xffe5e5e2, 0xffcacac8, 0xffd2d2d1, 0xffebebea, 0xffd8d8d7, 0xffb0b0ae, 0xfff2f2f1, 0xffc6c6c6, 0xffc3c2c1, 0xffe7e3da, 0xffada78c, 0xff8f7f45, 0xff786424, 0xff7a682c, 0xfe8b7c48, 0xff978a57, 0xff887a42, 0x805d5b4a, 0x13120f13, 0x0b0a0808, 0x09090607, 0x0d0c0c0c, 0x0a090a09, 0x02010201, 0x05040504, 0x01000100, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x01000100, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x03020202, 0x08070707, 0x05050405, 0x01000100, 0x00000000, 0x00000000, 0x00000000, 0x05040504, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x03020202, 0x0a090909, 0x05040202, 0x07070202, 0x07060302, 0x0e090a09, 0x89567077, 0xff94c4cc, 0xff8bbfc3, 0xfe92c7d5, 0xff91cfeb, 0xff9dd4ec, 0xffa0cfd8, 0xff9bbeb4, 0xffa9b487, 0xffb3ad68, 0xffafb074, 0xffadb486, 0xffa7b894, 0xffabb68f, 0xffa8b99b, 0xffa3bca9, 0xffa4b189, 0xffaba15b, 0xffb5a460, 0xffe5dfc8, 0xfffefefe, 0xfffdfdfe, 0xfffdfdfd, 0xffd2d2d3, 0xffeeeeee, 0xfff2f2f2, 0xffcecece, 0xffc9c9c9, 0xffaaaaaa, 0xfff7f7f7, 0xffe4e4e4, 0xffc5c5c4, 0xffe2e1e3, 0xffc3c2c5, 0xffdad5cc, 0xffa89d71, 0xffeceada, 0xfffffffe, 0xfffafafd, 0xfff3f2e7, 0x8978766e, 0x100f0c0f, 0x0c0b0909, 0x13121011, 0x0e0d0d0d, 0x00000000, 0x04030403, 0x03020302, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x01000100, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x04040303, 0x09090809, 0x09080808, 0x0f0e0d0f, 0x03030202, 0x02010101, 0x07050606, 0x00000000, 0x02010201, 0x08070807, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x02010101, 0x07060505, 0x04030102, 0x04040000, 0x02020000, 0x35242a2f, 0xc06b99ac, 0xff7bc1d9, 0xff79c0d5, 0xfe7bc3e1, 0xff8bc6d3, 0xffa8c5aa, 0xffb1bc85, 0xffafbb85, 0xffacbe95, 0xffb1b884, 0xffadbb94, 0xffa9bc9e, 0xffa6b99a, 0xffa2b99f, 0xff9db497, 0xff9ca57b, 0xffa09552, 0xffaf9b55, 0xffd4c8a2, 0xfff8f5ee, 0xffffffff, 0xfffefefc, 0xfffefefe, 0xfff1f1f1, 0xffe2e2e2, 0xff737373, 0xffdadada, 0xffe7e7e7, 0xffc2c2c2, 0xffc8c8c8, 0xffcfcfcf, 0xffececec, 0xffcececd, 0xffb2b2b2, 0xfff6f6f6, 0xffc1c2c4, 0xffe5e6e6, 0xfffcfbfc, 0xff8c8b8d, 0xfec5c5c3, 0xffffffff, 0xc8c7c7c7, 0x11110e0f, 0x0c0b080a, 0x0d0d090a, 0x0a090909, 0x04030403, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x01000100, 0x05040504, 0x01000100, 0x00000000, 0x00000000, 0x00000000, 0x06050505, 0x0c0b0b0c, 0x0a0a0909, 0x07070607, 0x02010101, 0x01000000, 0x03020202, 0x06050605, 0x08060707, 0x06050605, 0x04030403, 0x00000000, 0x00000000, 0x00000000, 0x03020101, 0x09080508, 0x0c0c0808, 0x05040203, 0x02020000, 0x06050303, 0x6745555a, 0xde6fabc2, 0xff6bc0e0, 0xff6cbee2, 0xfe5fbae1, 0xff7bbcb7, 0xffccca5e, 0xffedce35, 0xffeace3e, 0xffe7d146, 0xffebd13f, 0xffeece35, 0xffeccc3c, 0xffdccf56, 0xffcdd077, 0xffd6d373, 0xffdbcb60, 0xffdac24e, 0xffdfc451, 0xfff1ebcb, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xfffcfcfc, 0xffdadada, 0xffb8b8b8, 0xffd9d9d9, 0xffd9d9d9, 0xffe5e5e5, 0xffe3e3e3, 0xffd3d3d3, 0xffdedfde, 0xffe3e3e3, 0xffd3d3d4, 0xffefefef, 0xffd8d8d7, 0xfff4f4f4, 0xffacabab, 0xff868686, 0xffffffff, 0xfefcfcfa, 0xe3a09f9f, 0x2d262325, 0x04040102, 0x05040102, 0x05040303, 0x01000101, 0x00000000, 0x02010201, 0x00000000, 0x01000000, 0x00000000, 0x06050505, 0x0d0c0c0c, 0x00000000, 0x00000000, 0x02010201, 0x19181718, 0x11100f11, 0x08080707, 0x04040303, 0x00000000, 0x00000000, 0x00000000, 0x05040504, 0x02010101, 0x07060706, 0x04030303, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x08070506, 0x11100c0f, 0x06040305, 0x00000000, 0x2b1e2223, 0xce89acb6, 0xf872bddb, 0xff5eb9e2, 0xff64bce3, 0xfe55b5e6, 0xff5eb6d3, 0xffc3ce6a, 0xffffda27, 0xfffeda25, 0xfffdda2e, 0xffffd928, 0xffffd926, 0xffffd925, 0xffffda25, 0xfffdd92e, 0xfffade3f, 0xfffeda33, 0xfffed933, 0xffffdc36, 0xffe2cd57, 0xffb6a878, 0xffb1afa7, 0xffb1b0b2, 0xffacacac, 0xffababab, 0xffefefef, 0xffffffff, 0xfffafafa, 0xfff7f7f7, 0xfff8f8f8, 0xfffbfbfb, 0xffe8e8e8, 0xffd6d6d6, 0xffececec, 0xfff5f5f5, 0xffededed, 0xfffcfcfc, 0xfffbfbfb, 0xfffdfdfd, 0xfffefefe, 0xffe7e7e7, 0xfff7f7f7, 0xffffffff, 0xfeb4b3b4, 0xff504c4d, 0x9a404242, 0x00000000, 0x02010000, 0x02010000, 0x00000000, 0x00000000, 0x00000000, 0x01000100, 0x01000100, 0x0a090909, 0x0c0b0b0b, 0x312f3030, 0x0d0c0c0c, 0x0e0d0d0d, 0x0c0b0b0b, 0x04010302, 0x03010201, 0x04040303, 0x04030303, 0x00000000, 0x00000000, 0x01000100, 0x06050605, 0x0d0c0d0c, 0x03020302, 0x00000000, 0x00000000, 0x00000000, 0x01000000, 0x05040404, 0x09080607, 0x09080605, 0x07060405, 0x29212022, 0xca85a8b2, 0xff79c1de, 0xfe4baad5, 0xff55b3db, 0xff57b6e0, 0xff62b8cd, 0xffbacb74, 0xffffda26, 0xfffed924, 0xfffed82a, 0xfffed92d, 0xfffed828, 0xfffed926, 0xffffd829, 0xfffcdb30, 0xfffdda34, 0xfffed92f, 0xfffdd92e, 0xfffddc2f, 0xfffdd829, 0xfffad734, 0xfff1d565, 0xffedece7, 0xfff5f5f6, 0xfffefefe, 0xfffdfdfd, 0xffb9b9b9, 0xffbebebe, 0xfff3f3f3, 0xfff8f8f8, 0xfffbfbfb, 0xfffdfdfd, 0xfff7f7f7, 0xffeaeaea, 0xffe6e6e6, 0xfff6f6f6, 0xfff7f7f7, 0xffc7c7c7, 0xfff8f8f8, 0xfffbfbfb, 0xfffefefe, 0xffffffff, 0xfef9f9f9, 0xfeaca9ab, 0xff3d393d, 0xff4d484c, 0xe9424242, 0x01010101, 0x04030101, 0x02010000, 0x00000000, 0x03020202, 0x0a090909, 0x00000000, 0x07060706, 0x0d0c0c0c, 0x28262727, 0x47454547, 0x08070707, 0x0e0d0d0d, 0x0f0f0d0e, 0x08070707, 0x07060606, 0x05040405, 0x03020203, 0x02010102, 0x02010201, 0x0a090a09, 0x09080908, 0x01000100, 0x00000000, 0x00000000, 0x00000000, 0x04030304, 0x0c0a0b0b, 0x0b0a0909, 0x0b0a0708, 0x09080608, 0x1d17171a, 0xaf6e8f97, 0xff79c2dd, 0xff49a8d3, 0xff4eaed8, 0xff55b6e2, 0xff6ebbc1, 0xffc9ce64, 0xffffda26, 0xfffdd927, 0xfffed926, 0xfffcd929, 0xfffddc40, 0xfffdda2f, 0xfffed926, 0xfffdda2b, 0xfffedc33, 0xfffdda2f, 0xfffdd92b, 0xffffd827, 0xfffdd827, 0xffffda27, 0xffe7d16d, 0xff959590, 0xff6f6b6e, 0xff8f8d8e, 0xffb0afaf, 0xffd2d1d1, 0xffdfdfdf, 0xffededed, 0xffc4c4c4, 0xffc8c8c8, 0xffd6d6d6, 0xffdbdbdb, 0xffd8d8d8, 0xffd4d4d4, 0xff9d9d9d, 0xff8e8d8e, 0xffb1b0b1, 0xfff7f7f7, 0xffe0e0e0, 0xfffcfdfd, 0xfffdfdfd, 0xffddddde, 0xfb9f9d9e, 0xfc2d292c, 0xff2e282c, 0xfe5a565b, 0xf63c383b, 0x54333333, 0x08080708, 0x07060304, 0x02010101, 0x02010201, 0x08070807, 0x06050505, 0x0d0c0c0c, 0x201e1f1f, 0x34313333, 0x18171617, 0x0e0e0d0e, 0x17171617, 0x1d1c1b1d, 0x02010101, 0x00000000, 0x02000001, 0x01000001, 0x03010103, 0x07060706, 0x09080908, 0x04030403, 0x00000000, 0x00000000, 0x00000000, 0x03020203, 0x09050607, 0x0b090909, 0x13111012, 0x09070607, 0x110e0d0f, 0xbc7899a6, 0xff77c1dd, 0xff4ba8d4, 0xfe4dadd6, 0xff56b7e1, 0xff78bcbb, 0xffd8d152, 0xffffda26, 0xffffd827, 0xfffcdd43, 0xfffcdc3f, 0xfffbdf5e, 0xfff9dd4e, 0xfffddc3f, 0xfffddb38, 0xfffed927, 0xfffdd928, 0xfffdd92d, 0xfffdd829, 0xfffed829, 0xfffed928, 0xfff5d430, 0xffa39036, 0xff534d49, 0xffaba8aa, 0xfffdfdfd, 0xffdadada, 0xffd8d7d7, 0xffc4c4c4, 0xffbfbfbf, 0xffc7c7c7, 0xffcecece, 0xffc3c3c3, 0xffbfbfbf, 0xffb8b8b8, 0xffb1b1b1, 0xffb8b8b8, 0xffdedede, 0xffb6b6b6, 0xfff1f1f1, 0xffffffff, 0xffc2c1c1, 0xff757274, 0xfe4b474b, 0xff2b252a, 0xff2e292d, 0xfe332e32, 0xff514c50, 0xfe514d50, 0xab606262, 0x1a1a1a1a, 0x0b0b0809, 0x04030203, 0x03020302, 0x0a090a09, 0x0e0d0d0d, 0x13121112, 0x29262729, 0x19171818, 0x06050605, 0x0d0c0c0d, 0x1b1a1a1a, 0x1a171819, 0x03010103, 0x01000000, 0x01000001, 0x02000002, 0x02000002, 0x00000000, 0x03020302, 0x06050605, 0x01000000, 0x00000000, 0x01000000, 0x04000002, 0x06000001, 0x0f07080a, 0x0d0a070a, 0x03030101, 0xbd789ba2, 0xff75c1de, 0xff4eacd5, 0xfe4bacd2, 0xff56b4e1, 0xff69bacf, 0xffbecb6f, 0xfffed928, 0xffffd822, 0xfffbdc3c, 0xfffbe068, 0xfff9e179, 0xfff8e37d, 0xfffcdf65, 0xfffcdd4d, 0xfffbdb37, 0xfffed928, 0xfffddb31, 0xfffdd932, 0xfffed82a, 0xfffeda29, 0xffffda2b, 0xfff5d333, 0xff82722d, 0xff2d2925, 0xff959298, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xfff6f6f6, 0xfff0f0f0, 0xffe6e6e6, 0xffe2e2e2, 0xffe5e5e5, 0xffededed, 0xfff5f5f5, 0xfff7f7f7, 0xffb9b9b9, 0xffe4e4e3, 0xffe9e8e9, 0xff5c5a5d, 0xff342f33, 0xff2e292d, 0xff2d282c, 0xff2d282c, 0xff2e292d, 0xff322d31, 0xff464044, 0xff4a464a, 0xea8f8e8f, 0x5c524f52, 0x0c0c080a, 0x04030102, 0x07060706, 0x0e0d0e0d, 0x11101010, 0x16151515, 0x14131213, 0x05040404, 0x00000000, 0x22212121, 0x0e0d0c0e, 0x04020204, 0x01000001, 0x02000002, 0x01000001, 0x01000001, 0x03000002, 0x04030403, 0x02010101, 0x00000000, 0x03020302, 0x02000101, 0x05000001, 0x05000001, 0x08010203, 0x0f060407, 0x0c050407, 0xa069838b, 0xff89c6df, 0xfe59b3d7, 0xff4ba8d3, 0xff57b5e1, 0xff5ebad5, 0xff9fc28d, 0xfffad930, 0xffffd824, 0xfffdd830, 0xfffbdf57, 0xfff8e272, 0xfff6e589, 0xfff7e283, 0xfff6e382, 0xfff8df64, 0xfffddc4a, 0xfffdda2c, 0xfffdd82b, 0xfffacf26, 0xfffbd527, 0xfffed827, 0xfffed825, 0xfffdd225, 0xff917a39, 0xff352d33, 0xff302e30, 0xff3b373a, 0xff5b585b, 0xff7e7b7e, 0xffaeaeae, 0xffeaeaea, 0xfff4f3f4, 0xfff6f6f6, 0xfff5f5f5, 0xfff5f5f5, 0xfff6f6f6, 0xffd8d8d8, 0xffa7a7a7, 0xffefefef, 0xffe5e4e5, 0xff524d51, 0xff2b262b, 0xff2f2a2e, 0xff2f2a2e, 0xff2f2a2e, 0xff2e292d, 0xff2e292d, 0xff302b2f, 0xff433d41, 0xff484246, 0xff939192, 0x8c6c6b6d, 0x10100d0e, 0x07060405, 0x0d0c0c0c, 0x0e0d0e0d, 0x12111111, 0x11101010, 0x09080808, 0x07060706, 0x08070707, 0x08060608, 0x01000000, 0x01000001, 0x02000002, 0x01000001, 0x01000001, 0x08050608, 0x06050605, 0x05040404, 0x00000000, 0x03020202, 0x04020202, 0x05000001, 0x06000001, 0x0a020304, 0x0c040305, 0x0a030003, 0x764e5e64, 0xff97ccdb, 0xff80c0d6, 0xff5dafd0, 0xff52b3dd, 0xff53b3df, 0xff6ebac3, 0xffcbd06b, 0xffedd53c, 0xfffed824, 0xfffcdb3a, 0xfffae36a, 0xfff8e173, 0xfff8e387, 0xfff6e593, 0xfff7e27b, 0xfff9e170, 0xfffbdf57, 0xfffed92c, 0xfffdd528, 0xffdab123, 0xffc2a528, 0xfffcd82a, 0xffe1b824, 0xffb89326, 0xff40392e, 0xff726e71, 0xffbcbbbb, 0xffdad9da, 0xffdbdada, 0xffebeaea, 0xffc9c8c9, 0xffa5a4a4, 0xffa9a8a8, 0xffacabab, 0xffadabac, 0xffaeacad, 0xffbbbbbb, 0xffdadada, 0xffffffff, 0xffffffff, 0xff989798, 0xff2f2a2e, 0xff2e292d, 0xff2e292d, 0xff2f2a2e, 0xff2e292d, 0xff2f2a2e, 0xff2e292d, 0xff312c30, 0xff423d41, 0xfe474246, 0xff8b898b, 0x71434344, 0x12110e0f, 0x0c0b080a, 0x110f0f10, 0x0f0e0f0e, 0x13111312, 0x09080908, 0x03020302, 0x06050505, 0x100f0f0f, 0x01000001, 0x01000001, 0x01000001, 0x02000001, 0x03000103, 0x0d0a0b0d, 0x13101113, 0x03020202, 0x05040504, 0x02010101, 0x04010101, 0x06000001, 0x07000002, 0x08000103, 0x0e050406, 0x06000000, 0x50364144, 0xf8a7ccd2, 0xf599c5d0, 0xfd93c4d4, 0xff57b3d6, 0xff41a0cc, 0xff5ab6dc, 0xff82c4d1, 0xffafca90, 0xfff5d732, 0xffffd925, 0xfffedb33, 0xfffcdb3f, 0xfff9e170, 0xfff7e48e, 0xfff8e27c, 0xfff8e16d, 0xfff9df62, 0xfffbde52, 0xfffedb2b, 0xfffdc825, 0xff9c8129, 0xff74652a, 0xffb79b2e, 0xff7a6026, 0xff4d4533, 0xff97959a, 0xfffbfdfa, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xfffefefe, 0xfffbfbfb, 0xfffefffe, 0xffffffff, 0xffffffff, 0xfffefefe, 0xfff6f6f6, 0xff656365, 0xff2c272b, 0xff2e282c, 0xff2d282c, 0xff2e292d, 0xff2d282c, 0xff2f2a2e, 0xff2f2a2e, 0xff302b2f, 0xff423d41, 0xff3d383c, 0xff807e81, 0xa7676969, 0x13100d0e, 0x04030102, 0x0b0a090b, 0x0f0e0f0e, 0x09080908, 0x04030303, 0x0a090a09, 0x0f0e0e0e, 0x24222223, 0x01000001, 0x02000002, 0x01000001, 0x00000000, 0x04030303, 0x02010201, 0x02000002, 0x00000000, 0x05040504, 0x04030303, 0x04000001, 0x07000002, 0x07000002, 0x0c040404, 0x08020000, 0x01000000, 0x583b4549, 0xf7a1c8d2, 0xd38cabb5, 0xfc6ebbd8, 0xff3191be, 0xff50b1d8, 0xff63bde5, 0xff88c3b5, 0xffe4d449, 0xffffda29, 0xffffd826, 0xffffd829, 0xfffddc3a, 0xfff9e06d, 0xfff8e277, 0xfffbde4f, 0xfffcdb43, 0xfffddd49, 0xfff5d447, 0xffeaca44, 0xffc19930, 0xff4e4429, 0xff51472d, 0xff433828, 0xff302c2c, 0xff726d70, 0xfff8f8f8, 0xfffffefe, 0xfffdfdfd, 0xfffefdfe, 0xfffdfdfd, 0xfffefefe, 0xfffefefe, 0xfffdfdfd, 0xfffefefe, 0xfffefefe, 0xfffefefe, 0xfffefefe, 0xfffefefe, 0xfffdfdfd, 0xfffefefe, 0xffe3e2e2, 0xff524d50, 0xfe2c272b, 0xff2b282b, 0xfe2d282c, 0xff2e282c, 0xff2e292d, 0xff2e292d, 0xff2f2a2e, 0xff2e292d, 0xff3f3a3e, 0xff353034, 0xff767477, 0xbb757878, 0x110a0809, 0x05000000, 0x07030103, 0x05030403, 0x01000100, 0x0d0c0d0c, 0x0d0c0c0c, 0x201e1e1f, 0x25222325, 0x01000001, 0x02000002, 0x08050607, 0x0f0c0d0f, 0x02000002, 0x02000001, 0x01000000, 0x04000000, 0x01000000, 0x04030303, 0x04010201, 0x05000001, 0x07000002, 0x09040103, 0x04010100, 0x0e080709, 0xbf7c9aa0, 0x9b647c82, 0xc46597a8, 0xff3996bd, 0xff42a3cd, 0xff56b4e0, 0xff6ab9ca, 0xffcdd162, 0xffefd846, 0xfffcd829, 0xfffed929, 0xfffed925, 0xfffcda3b, 0xfffae273, 0xfffbdc40, 0xffffd929, 0xfffed927, 0xffffdb33, 0xffe5b32d, 0xffad8e3e, 0xff63583b, 0xff2f2a29, 0xff383129, 0xff2f292e, 0xff2c2a2b, 0xff817d81, 0xffffffff, 0xfffefefe, 0xfffefefe, 0xfffdfefd, 0xfffefefe, 0xfffefefe, 0xfffefefe, 0xfffefefe, 0xfffefefe, 0xfffefefe, 0xfffefefe, 0xfffefefe, 0xfffefefe, 0xfffefefe, 0xfffefefe, 0xffd4d3d3, 0xff413f41, 0xff2f292d, 0xf1464748, 0xff2d292d, 0xff2c292e, 0xff302b2e, 0xff2d282c, 0xff2e292d, 0xff2e292d, 0xff3a373a, 0xff332f33, 0xff787376, 0xb9727373, 0x12080508, 0x0a030003, 0x0c030204, 0x0b060608, 0x1c181a1b, 0x19161718, 0x16161515, 0x19181818, 0x12111111, 0x04020204, 0x0f0c0d0f, 0x0b08090b, 0x01000001, 0x02000001, 0x04000001, 0x07000002, 0x07000002, 0x06000002, 0x05000001, 0x03000000, 0x04010102, 0x05020102, 0x06040203, 0x0e0b0709, 0xae758d92, 0xa96d868c, 0x76445b64, 0xfa4fa0c4, 0xff399ac6, 0xff4bacd3, 0xff4eadd4, 0xff8bc2a5, 0xffc0cc75, 0xffe7d440, 0xffffda2b, 0xfffeda28, 0xfffdd927, 0xfffcdf53, 0xfffcdc45, 0xfffed825, 0xffffd825, 0xfffed629, 0xfffed731, 0xffd8a428, 0xff715b2c, 0xff372f2b, 0xff2e2a2b, 0xff322b2c, 0xff2f292e, 0xff2d292d, 0xff777576, 0xfffdfdfd, 0xfffefefe, 0xfffefefe, 0xfffefefe, 0xfffefefe, 0xfffefefe, 0xfffefefe, 0xfffefefe, 0xfffefefe, 0xfffefefe, 0xfffefefe, 0xfffdfdfd, 0xfffefefe, 0xfffefefe, 0xfffefefe, 0xffc9c8c8, 0xfe363236, 0xff322f32, 0x583d3a3c, 0xe23d3d3e, 0xff2b272a, 0xfe2c272b, 0xff2d282c, 0xff2e292d, 0xff2f2a2e, 0xff342f33, 0xfe332f33, 0xff757174, 0xb36c6d6d, 0x12060507, 0x0b030003, 0x0b020204, 0x07010103, 0x15121314, 0x221f2022, 0x211e1e20, 0x0f0b0b0d, 0x0e0b0c0d, 0x1a17181a, 0x0c090a0b, 0x03000001, 0x05000001, 0x06000002, 0x07000002, 0x07000002, 0x07000002, 0x06000001, 0x07000002, 0x05000001, 0x03010101, 0x04010000, 0x04000000, 0x835a686c, 0xb97d9698, 0x3822292d, 0xeb62a5c0, 0xff3797c1, 0xfe40a1ca, 0xff48a7d2, 0xff63b9d1, 0xff98c498, 0xffe7d445, 0xffffda27, 0xfffdd926, 0xffffd826, 0xfffcda38, 0xfffddd45, 0xfffdda25, 0xfffed826, 0xfffeda2b, 0xfffbc521, 0xffffd12e, 0xffcf9822, 0xff584a33, 0xff302c2a, 0xff2f2b2c, 0xff2d292d, 0xff2d292c, 0xff2e282c, 0xff747172, 0xfffbfbfb, 0xfffefefe, 0xfffefefe, 0xfffefefe, 0xfffefefe, 0xfffefefe, 0xfffefefe, 0xfffefefe, 0xfffefefe, 0xfffefefe, 0xfffefefe, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffc3c3c4, 0xff322e32, 0xff383738, 0x332a2929, 0x7d282829, 0xff312d30, 0xff2d282d, 0xff2d282c, 0xff2e292d, 0xff2f2a2e, 0xff312c30, 0xfe373135, 0xff6e686b, 0x8b545354, 0x0d030205, 0x0a030003, 0x0c060508, 0x110d0e10, 0x2a27282a, 0x1b18181a, 0x08030205, 0x05000001, 0x05000002, 0x0a060608, 0x05000001, 0x06000001, 0x07000002, 0x07000002, 0x06000001, 0x07000002, 0x06000001, 0x07000002, 0x06000001, 0x0a020304, 0x08020002, 0x09020002, 0x0a010002, 0x36222528, 0x150a0a0e, 0xc36994a4, 0xff3896be, 0xff3596c0, 0xfe41a3cd, 0xff51afd7, 0xff6db9c2, 0xffd5d156, 0xfffbd429, 0xfffed525, 0xfffdd926, 0xfffed627, 0xfffbca24, 0xffffd82c, 0xfffed925, 0xffffd928, 0xffffd729, 0xfff9bb1d, 0xfff8c628, 0xffb48320, 0xff594f3e, 0xff2e2929, 0xff2e292d, 0xff2d282d, 0xff2d292c, 0xff2d282c, 0xff6a686a, 0xfff6f6f6, 0xfffefefe, 0xfffefefe, 0xfffefefe, 0xfffefefe, 0xfffefefe, 0xfffefefe, 0xfffefefe, 0xfffefefe, 0xfffdfdfd, 0xfffefefe, 0xffebebec, 0xff999798, 0xff6e6d6d, 0xffc1c0c0, 0xffb6b4b6, 0xff312c30, 0xff484748, 0x07070707, 0x38171717, 0xe5333034, 0xff2d292a, 0xff2d282d, 0xff302b2f, 0xff302b2f, 0xff302b2f, 0xff363034, 0xff605d5f, 0x5c333334, 0x07010001, 0x06030102, 0x08070707, 0x0c0b0c0b, 0x0b08090a, 0x07000103, 0x06000001, 0x07000002, 0x06000002, 0x06000001, 0x07000002, 0x06000001, 0x07000002, 0x07000002, 0x07000002, 0x07000002, 0x07000002, 0x07000002, 0x09010203, 0x0b030204, 0x0a020002, 0x0a010002, 0x0a020003, 0x05000000, 0x7142575f, 0xff4ba2c8, 0xfe2a8bb9, 0xff409fca, 0xff49a6d2, 0xff4ba6c2, 0xffb6cb7b, 0xffe1d048, 0xfff3cc2c, 0xfffddb25, 0xffffd92a, 0xfffbc822, 0xfffcd026, 0xfffed927, 0xffffd827, 0xfffed929, 0xfffed328, 0xfff9b81a, 0xfff4bd23, 0xff956f25, 0xff4a4337, 0xff2d282c, 0xff2d292c, 0xff2d282d, 0xff2e292c, 0xff2d282c, 0xff474446, 0xffd3d2d3, 0xfffefefe, 0xfffdfdfd, 0xfffefefe, 0xfffefefe, 0xfffefefe, 0xfffefefe, 0xfffefefe, 0xfffdfdfd, 0xfffefefe, 0xfff4f3f4, 0xff908e91, 0xff716c71, 0xff575256, 0xff464145, 0xff5c585c, 0xff2e292d, 0xff454547, 0x24181616, 0x120e0e0d, 0xb73f3e40, 0xff322d31, 0xfe2c272b, 0xff302b2f, 0xff312c30, 0xff312c30, 0xff342f33, 0xe8434144, 0x3c1f1d1f, 0x09080508, 0x09090607, 0x05040404, 0x02000000, 0x06000001, 0x07000002, 0x07000002, 0x06000001, 0x05000002, 0x07000002, 0x06000001, 0x07000002, 0x07000002, 0x07000002, 0x07000002, 0x07000002, 0x06000001, 0x09010203, 0x10060608, 0x0a020002, 0x09010002, 0x09020002, 0x09030001, 0x52323e42, 0xfa5facca, 0xfe2789b4, 0xfe3a99c3, 0xff41a2cd, 0xff3e9dc0, 0xff90c3a3, 0xffb6c976, 0xffd2c551, 0xfffdd629, 0xfffedb2e, 0xfffcd42a, 0xfffdcb26, 0xfffed72a, 0xfffdd926, 0xfffed828, 0xfffeda29, 0xfffed52b, 0xfff8b91c, 0xffd9a621, 0xff5f4723, 0xff302a2b, 0xff2d292d, 0xff2e292c, 0xff2e282d, 0xff2d292c, 0xff2e292d, 0xff292428, 0xff575457, 0xffdcdbdc, 0xffffffff, 0xfffdfdfd, 0xfffdfdfd, 0xfffefefe, 0xfffefefe, 0xfffdfdfd, 0xfffdfdfd, 0xffffffff, 0xffbebdbf, 0xff757074, 0xff4e494d, 0xff2e292d, 0xff2d282c, 0xff322e32, 0xfe2d292d, 0xff2b282c, 0xe4464546, 0x52232222, 0x241e1d1d, 0xad373839, 0xff342f33, 0xff2e292d, 0xff302b2f, 0xff312c30, 0xff302c30, 0xce48484a, 0x231e1c1d, 0x08080506, 0x07050306, 0x08030406, 0x06000001, 0x06000002, 0x06000002, 0x07000002, 0x05000001, 0x05040403, 0x07000002, 0x07000002, 0x07000002, 0x07000002, 0x07000002, 0x07000002, 0x07000002, 0x05000001, 0x0f060608, 0x0b020003, 0x0a020003, 0x0a020002, 0x08000000, 0x3620262b, 0xde5b97ad, 0xff348fb7, 0xfe2c8bb7, 0xff3799c3, 0xff3b9ac3, 0xff6eb8c2, 0xff87bfad, 0xff9abb8e, 0xffe8d23d, 0xffffda31, 0xfffcd72b, 0xfffccc25, 0xfffdd227, 0xfffed829, 0xfffed927, 0xfffdd829, 0xfffed929, 0xfffed52a, 0xfff5b71c, 0xffb88b24, 0xff564425, 0xff2f2b2b, 0xff2e292e, 0xff2e292d, 0xff2e282d, 0xff2d292c, 0xff2e292d, 0xff2e282c, 0xff282327, 0xff534f53, 0xffdddcde, 0xffffffff, 0xfffdfdfd, 0xfffdfdfd, 0xfffefefe, 0xfffdfdfd, 0xffffffff, 0xffd5d4d6, 0xff767276, 0xff544e53, 0xff322c30, 0xff2e292d, 0xff2d292d, 0xff2d292c, 0xff2f2a2e, 0xfe2d282c, 0xff312c30, 0xf83a383a, 0x702f2f2f, 0x16161616, 0xff413f41, 0xff2c272b, 0xff2e292d, 0xfe2d282c, 0xff322f33, 0xb8545656, 0x0f0e0c0d, 0x05030002, 0x110a090b, 0x06000001, 0x06000001, 0x07000002, 0x06000001, 0x06000002, 0x03010101, 0x07060706, 0x07000002, 0x07000002, 0x07000002, 0x07000002, 0x07000002, 0x07000002, 0x06000001, 0x0e050608, 0x0d040305, 0x0a030002, 0x0b010003, 0x08010001, 0x170f0e13, 0xc1588798, 0xff338db5, 0xff3090ba, 0xff2b8db8, 0xff3091bc, 0xff50acce, 0xff6abac6, 0xff83b9ab, 0xffc7ca60, 0xfffdd32a, 0xfffcd329, 0xfffccb27, 0xfffbc620, 0xfffdd428, 0xfffcda28, 0xffffd82b, 0xfffed928, 0xfffdd928, 0xffffd72b, 0xfff4b81d, 0xffb98c1f, 0xff664d26, 0xff312c2a, 0xff2e2a2b, 0xff2d292b, 0xff2d282d, 0xff2e292c, 0xff2e292d, 0xff2e292d, 0xff2d282c, 0xff2e292d, 0xff4b484c, 0xffd6d6d6, 0xffffffff, 0xfffdfdfd, 0xfffdfdfd, 0xffffffff, 0xffefefef, 0xff736f73, 0xff443f43, 0xff363034, 0xff2d282c, 0xff2d282c, 0xff2d282c, 0xff2e292d, 0xff2e292d, 0xff2e292d, 0xfe2f2a2e, 0xff2f2a2e, 0xfa383637, 0x784d4e4f, 0xff3b393c, 0xff2d272b, 0xff2e292d, 0xfe2f292d, 0xf239373a, 0x34252124, 0x05020002, 0x09010002, 0x1209080a, 0x06000001, 0x06000001, 0x06000001, 0x07000002, 0x07000002, 0x05000001, 0x03020202, 0x07000002, 0x07000002, 0x07000002, 0x07000002, 0x06000001, 0x06000001, 0x09010203, 0x0f060507, 0x09010002, 0x0a020003, 0x08000002, 0x0e090505, 0xad608391, 0xff338ab2, 0xfe2b8cb5, 0xff2787b3, 0xff2d8eb8, 0xff3e9dc8, 0xff5fb8d8, 0xff6bb8c9, 0xffa6c286, 0xffe7c63c, 0xfff9ca24, 0xfffdca24, 0xfff9bd1c, 0xfffdd52a, 0xfffdd62a, 0xffffd62a, 0xfffad023, 0xffffd929, 0xfffdd829, 0xffffd92c, 0xfff7b81c, 0xffd39f1e, 0xff8a6a24, 0xff3c3429, 0xff2f2a2d, 0xff2d292e, 0xff2e282d, 0xff2e282c, 0xff2d292d, 0xff2e292d, 0xff2e292d, 0xff2d282c, 0xff2e292d, 0xff4c484c, 0xffd1d0d1, 0xfffefefe, 0xfffefefe, 0xffffffff, 0xff9a9899, 0xff4e494d, 0xff353034, 0xff2d282c, 0xff2d292c, 0xff2e282d, 0xff2e292d, 0xff2d282c, 0xff2d282c, 0xff2f2a2e, 0xff2f2a2e, 0xfe322e32, 0xff2c2529, 0xff545657, 0xfe3f3d40, 0xfe2b262a, 0xfe2d282c, 0xff302d2f, 0x99414342, 0x00000000, 0x09010002, 0x0a020003, 0x0f060708, 0x06000001, 0x07000002, 0x06000001, 0x06000001, 0x07000002, 0x06000002, 0x06020204, 0x07000002, 0x07000002, 0x07000002, 0x06000001, 0x06000001, 0x0a020304, 0x0c020205, 0x0a020002, 0x0a020002, 0x09010002, 0x06030001, 0x894f6772, 0xff4293b5, 0xfe2181ab, 0xfe2282ae, 0xff2b88b4, 0xff3695c1, 0xff54b3da, 0xff61b7d7, 0xff98be96, 0xffd2c251, 0xffefc32d, 0xfffcc41d, 0xfffabe20, 0xffffd428, 0xfffcd128, 0xfffcd62d, 0xfffbc622, 0xfffdd628, 0xfffed929, 0xfffdda2a, 0xfffdd42a, 0xfff9ba1b, 0xfffcc023, 0xffdca524, 0xff664f24, 0xff322b28, 0xff2d292c, 0xff2d292c, 0xff2e292c, 0xff2d292d, 0xff2e292d, 0xff2e292d, 0xff2d292d, 0xff2d282c, 0xff2a2529, 0xff484547, 0xffc1bfc1, 0xffffffff, 0xffe9e8e9, 0xff5f5c5e, 0xff312c30, 0xff2e292d, 0xff2d292c, 0xff2d292d, 0xff2e282d, 0xff2d282c, 0xff2d282c, 0xff2e292d, 0xff2f2a2e, 0xff302b2f, 0xff342f33, 0xfe2f2b2f, 0xff322d32, 0xfe363437, 0xfe2b262a, 0xff332f32, 0xdf494c4c, 0x1f161314, 0x06000000, 0x0a020003, 0x10070709, 0x06000001, 0x06000001, 0x06000001, 0x07000002, 0x07000002, 0x06000002, 0x05000002, 0x05000001, 0x07000002, 0x06000001, 0x06000001, 0x07000002, 0x0d040506, 0x0d040306, 0x0b030003, 0x0b020004, 0x09000000, 0x04000000, 0x5f36464b, 0xf64794b4, 0xff2282ab, 0xfe2383ab, 0xff2b88ae, 0xff3894be, 0xff4ba7d1, 0xff57b3d9, 0xff80bab0, 0xffbebd67, 0xffedc32e, 0xfffdc115, 0xfffbc01e, 0xfffed22a, 0xfffcd132, 0xfffccf2a, 0xfffbc525, 0xfffdd027, 0xfffdd829, 0xfffed82b, 0xfffeda30, 0xfffcc621, 0xfffdcb23, 0xfffcc624, 0xfffebf23, 0xffd3981c, 0xff7c6026, 0xff433728, 0xff2c292a, 0xff2d292c, 0xff2e292d, 0xff2d292d, 0xff2d292d, 0xff2e292d, 0xff2d282c, 0xff2d282c, 0xff2b252a, 0xff3d383c, 0xff959394, 0xffa5a4a6, 0xff373337, 0xff2d282c, 0xff2d282c, 0xff2d282c, 0xff2e282d, 0xff2d282c, 0xff2d282c, 0xff2d282c, 0xff2d282c, 0xff2e292d, 0xff2c272b, 0xff302b2f, 0xff312c30, 0xff3c363a, 0xfe2e2a2e, 0xff312c2f, 0xc7414343, 0x351c1a1d, 0x05000000, 0x09010002, 0x0d040305, 0x09010103, 0x05000001, 0x03000000, 0x05000001, 0x06000001, 0x05000001, 0x06000002, 0x06000002, 0x07000002, 0x06000001, 0x07000002, 0x09010203, 0x0f060507, 0x08010001, 0x06000000, 0x05000000, 0x08000002, 0x08020001, 0x3720262b, 0xdc4d8ba5, 0xff2a89b2, 0xfe2684af, 0xff3089aa, 0xff469ab2, 0xff49a6ca, 0xff53b1d8, 0xff73b6c4, 0xffa3b787, 0xffd9c34a, 0xffdeb836, 0xfffdc01b, 0xfffdd326, 0xfffbc933, 0xfffcd33a, 0xfffcc923, 0xfffac222, 0xfffdd528, 0xfffed829, 0xfffdd931, 0xfffdd02f, 0xfffbc922, 0xfffed92a, 0xfffdd132, 0xfffcc223, 0xfffebb21, 0xffcb951d, 0xff705725, 0xff322d2a, 0xff2d282c, 0xff2e292d, 0xff2e292d, 0xff2d282c, 0xff2d282c, 0xff2e292d, 0xff2e292d, 0xff2d282c, 0xff2d282c, 0xff332e31, 0xff373337, 0xff2f292d, 0xff2e282c, 0xff2f2a2e, 0xff2f2a2e, 0xff2d282c, 0xff2e292d, 0xff2e292d, 0xff353034, 0xff423d41, 0xff3d383c, 0xff423d41, 0xff343134, 0xff3d393d, 0xff3c393c, 0xff302c30, 0xde3d3d3e, 0x351b1b1d, 0x09010002, 0x0a020002, 0x0a020002, 0x0a030204, 0x04010102, 0x00000000, 0x01000000, 0x03000001, 0x04000001, 0x05000001, 0x05000001, 0x05000001, 0x05000001, 0x0d040406, 0x0d050406, 0x10060407, 0x1f0c0a0d, 0x411c1d21, 0x6134393b, 0x7f4c5356, 0x59343b3f, 0x3621282d, 0xdb6c9dad, 0xff358fb7, 0xfe2480aa, 0xff3b8faa, 0xff75a588, 0xff68a9a7, 0xff55aed3, 0xff6eb5c5, 0xff96b798, 0xffaec17c, 0xffc7b24b, 0xfffbbb16, 0xfffdd025, 0xfffbcc26, 0xfffbcc2f, 0xfffdcc25, 0xfffac31f, 0xfffbcb23, 0xfffdd727, 0xfffdd82b, 0xfffdd42e, 0xfff8c120, 0xfffcd629, 0xfffed92a, 0xfffdd62b, 0xfffdce2d, 0xfffac222, 0xfff7b61d, 0xff9f7a25, 0xff463b29, 0xff2d282c, 0xff2d292d, 0xff2d292d, 0xff2e292d, 0xff2e292d, 0xff2e292d, 0xff2d282c, 0xff2e292d, 0xff2f2a2e, 0xff2e292d, 0xff2c292c, 0xff2d292c, 0xff2e282d, 0xfe2c272b, 0xff2f2a2d, 0xff302b2f, 0xff383337, 0xff555054, 0xff777377, 0xff8a888b, 0xff767477, 0xfb555356, 0xf7555656, 0xfb636464, 0xff3b3a3b, 0xf4373638, 0x5d2f2f30, 0x02000000, 0x0a000002, 0x09010002, 0x08020103, 0x06030304, 0x02010101, 0x01000000, 0x01000000, 0x01000000, 0x01000000, 0x02000000, 0x03000000, 0x03000000, 0x02000000, 0x09030003, 0x3818191d, 0x972f3439, 0xec465358, 0xff4b6471, 0xff527686, 0xff548ba4, 0xff6daac3, 0xf961a3bf, 0xff529ebd, 0xfe2682ae, 0xfe2484b0, 0xff80ad8c, 0xff9db06d, 0xff66adbb, 0xff70b3bf, 0xffbab46a, 0xffa0b887, 0xffbdb964, 0xfff5bf22, 0xfffccb23, 0xfffcd227, 0xfffbc827, 0xfffbcc27, 0xfffdca24, 0xfffcc822, 0xfffcd026, 0xfffcd426, 0xfffdd529, 0xfffac021, 0xfffbcc29, 0xfffdd82b, 0xfffed727, 0xfffed92a, 0xfffdda32, 0xfffdd22c, 0xfff9c220, 0xffc59826, 0xff584828, 0xff2e292d, 0xff2e292c, 0xff2d292d, 0xff2d282d, 0xff2d282c, 0xff2e292d, 0xff2e292d, 0xff2d282c, 0xff2e292d, 0xff2f2a2e, 0xff2e292d, 0xff2d282c, 0xff2e292c, 0xff2f2a2e, 0xf2363335, 0xd9302e30, 0xdd363437, 0xd7373537, 0xc42c2a2b, 0xc2313031, 0xae3b3c3c, 0x82343435, 0x7d4d5050, 0xe0626767, 0xff444346, 0x83444445, 0x00000000, 0x09000002, 0x09020002, 0x08020103, 0x08050606, 0x05040504, 0x01000100, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x01000000, 0x00000000, 0x552c2b2c, 0xe23d454b, 0xff3c6577, 0xff6aa0bb, 0xff8ec4d8, 0xfe92c9e2, 0xfe70b7d4, 0xff7ebad4, 0xff8bbed6, 0xfe7ab7ce, 0xff1d7eaa, 0xff2b8dbb, 0xff99af78, 0xffa3b077, 0xff89b19d, 0xffc5b45c, 0xffecb72f, 0xffbaba65, 0xffecbf2e, 0xfffcca21, 0xfffbce23, 0xfffccc27, 0xfffbc723, 0xfffcce29, 0xfffac21f, 0xfffdcd25, 0xfffac61a, 0xfffcd125, 0xfffabc1e, 0xfffabf26, 0xfffcd333, 0xfffbca22, 0xfffdd426, 0xffffd82a, 0xfffdda2e, 0xfffdd62b, 0xfff5bc20, 0xffa58524, 0xff70572a, 0xff352d2a, 0xff2d2a2c, 0xff2e282e, 0xff2d292d, 0xff2e292c, 0xff2e292d, 0xff2e292d, 0xff2e292d, 0xff2d282c, 0xff2d282c, 0xff2e292d, 0xff2d282c, 0xff2d292c, 0xff312d31, 0xb63c3c3e, 0x26100e11, 0x301b191b, 0x2d202020, 0x1b171616, 0x13131112, 0x00000000, 0x00000000, 0x5d313031, 0xff616765, 0x6b2e2e32, 0x06020003, 0x09000001, 0x09010002, 0x07020001, 0x0b08090a, 0x06040505, 0x01000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xf144484c, 0xff457b94, 0xff5bb2d8, 0xfe8acae6, 0xff91cde8, 0xff79bfdc, 0xff4e9dc2, 0xff3d92b4, 0xff9cc3d8, 0xff97b8c5, 0xfe3881a3, 0xff3399c5, 0xfea2b68b, 0xffd3af45, 0xffecb12c, 0xfff8b821, 0xffd1bb4d, 0xffe5bb36, 0xfffdc722, 0xfff9c21d, 0xfffdd128, 0xfffbc624, 0xfffcce28, 0xfffabd1c, 0xfffecd25, 0xfffcca1e, 0xfffcca21, 0xfffabc1e, 0xfffbaf17, 0xfffdce2c, 0xfffabd1d, 0xfffdc722, 0xfffed82b, 0xfffdd72a, 0xffffd82c, 0xfff9c31e, 0xfffdca25, 0xffdaa421, 0xff5d4922, 0xff53422a, 0xff2d282c, 0xff2d292b, 0xff2d282d, 0xff2e282c, 0xff2d282d, 0xff2d282c, 0xff2d282c, 0xff2e292d, 0xff2d282c, 0xff2e292d, 0xff2e292d, 0xff2c292c, 0xff322b2f, 0xba373739, 0x110c0a0d, 0x0f0c0b0d, 0x1a18191a, 0x08070807, 0x05040304, 0x02010000, 0x0a060505, 0x94535755, 0x733c3d3d, 0x0f050306, 0x09010002, 0x09010002, 0x0a040205, 0x07060505, 0x03000001, 0x02000000, 0x01000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xff4f7484, 0xfe5bb8e0, 0xff5db6dc, 0xff7fc8e6, 0xff68b6da, 0xff5fadca, 0xff348fb6, 0xff267da2, 0xfe96bccc, 0xfcb2887d, 0xff3b6577, 0xfe8aada6, 0xffd7af45, 0xfff8ad16, 0xfffcb516, 0xffe9b931, 0xffe8b72e, 0xfffebc1a, 0xfff9ba18, 0xfffdcd25, 0xfffbc520, 0xfffdca26, 0xfff9bd1e, 0xfffdc21e, 0xfff8b70f, 0xfffbc71c, 0xfffbc221, 0xfff9ae15, 0xfffcc522, 0xfffbc223, 0xfffbbc1b, 0xfffbca21, 0xfffccc24, 0xfffdd333, 0xfffbca24, 0xfffcc220, 0xfff7cf29, 0xfff8b921, 0xffa17520, 0xff463727, 0xff443b2a, 0xff302b2b, 0xff2e2a2d, 0xff2d292d, 0xff2e2a2c, 0xff2f2a2e, 0xff2e292d, 0xff2d282c, 0xff2d282c, 0xff2e292d, 0xff2f2a2e, 0xff2f2a2e, 0xff2f2a2d, 0xc6322f31, 0x1d111013, 0x110e0d10, 0x13131212, 0x0b0a0a0a, 0x01000101, 0x02010000, 0x07050303, 0x22171717, 0x02000000, 0x08020002, 0x0a020001, 0x0a040404, 0x05020302, 0x04000001, 0x03000001, 0x02000000, 0x01000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xff5db1d6, 0xff50add4, 0xff65b7da, 0xff6abcde, 0xff6db2cf, 0xff469abf, 0xff1d7aa3, 0xff4a97b9, 0xffa37e73, 0xff9d5d35, 0xfe417280, 0xffdd9c26, 0xfef1a01e, 0xfffcb017, 0xfffaaf16, 0xfff9ae15, 0xfffbb114, 0xfff9b218, 0xfffcc11e, 0xfffbc622, 0xfffdc724, 0xfff9bd1d, 0xfffbb91d, 0xfffab313, 0xfffabe17, 0xfffdc61f, 0xfffcb418, 0xfffbbe1e, 0xfffdc422, 0xfffbbd1f, 0xfffcbe20, 0xfffabd1c, 0xfffabb22, 0xfffcce2b, 0xfffbc11e, 0xfffcc01c, 0xffeabc25, 0xffb98a1e, 0xffe4a424, 0xff6c5223, 0xff3f3327, 0xff3c332b, 0xff2e2a2a, 0xff2d282e, 0xff2e292d, 0xff2d282c, 0xff2d282c, 0xff2e292d, 0xff2e292d, 0xff2d282c, 0xff2e292d, 0xff2f2a2c, 0xff2e292c, 0xda373638, 0x331b1d1e, 0x09050607, 0x09080908, 0x00000000, 0x00000000, 0x05030203, 0x07030103, 0x06030102, 0x04010001, 0x08030304, 0x09040406, 0x0a060709, 0x06000102, 0x06000002, 0x05000001, 0x02000000, 0x01000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xff5fb6dd, 0xff419bc2, 0xff5ca8ca, 0xff51a3c6, 0xff3e92b9, 0xff2581a9, 0xff1c7ba4, 0xff428bad, 0xff7c401d, 0xffbc782c, 0xff61624e, 0xffbe6314, 0xfff4a525, 0xfffaac15, 0xfffaac1a, 0xfffba911, 0xfffab31d, 0xfffcba1a, 0xfffbc321, 0xfffcc122, 0xfffbb81c, 0xfffbb41a, 0xfffab518, 0xfffcbd1e, 0xfffaba19, 0xfffcbf1d, 0xfffbbb1e, 0xfffbbf1e, 0xfffcc023, 0xfffbc020, 0xfffbbb1d, 0xfffab51a, 0xfffcc722, 0xfffbc21e, 0xfffab61b, 0xfffbbb1d, 0xfff9b11a, 0xffbf8b1e, 0xffbd881f, 0xffd39520, 0xff684f22, 0xff403628, 0xff3b332a, 0xff2c292d, 0xff2d292d, 0xff2e282c, 0xff2e282d, 0xff2d282d, 0xff2d282c, 0xff2e282d, 0xff2e292d, 0xff2f2a2e, 0xff2c292d, 0xdc373333, 0x32191818, 0x07020204, 0x00000000, 0x00000000, 0x00000000, 0x07010203, 0x08010103, 0x0a030305, 0x02000000, 0x03020202, 0x04010203, 0x03000000, 0x05000001, 0x05000001, 0x05000001, 0x04000001, 0x02000000, 0x01000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xff5bafd2, 0xff4496b8, 0xff4397bb, 0xff2a85b1, 0xff2083aa, 0xff1d79a1, 0xff2583ac, 0xff3b6e85, 0xff77331a, 0xff994a26, 0xff52443b, 0xffd98218, 0xfffcad17, 0xfffbab1f, 0xfffaac1c, 0xfffcb223, 0xfffcb215, 0xfffbb719, 0xfffbb61a, 0xfffaaf18, 0xfffbb116, 0xfffbb419, 0xfffcbb1d, 0xfffaad17, 0xfffab41e, 0xfffcb917, 0xfffcba1d, 0xfffbbf1d, 0xfffbc123, 0xfffab91c, 0xfffab51b, 0xfffcc01d, 0xfffed329, 0xfffbb81c, 0xfffcb51c, 0xfffab117, 0xfffbac19, 0xfff9ac17, 0xffad7b19, 0xffbb861e, 0xffdd9c1f, 0xff6e5220, 0xff564a2d, 0xff3a3027, 0xff282528, 0xff2c292e, 0xff2d2a2b, 0xff2e292c, 0xff2e292d, 0xff2d2a2c, 0xff2d282c, 0xff322c30, 0xff312c2a, 0xe54f412f, 0x43332b1f, 0x0e050707, 0x0a080709, 0x03000201, 0x04000000, 0x06000001, 0x05000001, 0x04000001, 0x02000000, 0x01000000, 0x02000000, 0x02000000, 0x03000000, 0x04000001, 0x05000001, 0x05000001, 0x03000000, 0x01000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x01000000, 0xff2c8ab1, 0xff3f94b8, 0xff2b86ad, 0xff2686ae, 0xff1e77a1, 0xff207ba3, 0xff3387ab, 0xff3a5363, 0xff903616, 0xff933920, 0xff64402e, 0xffdf8e19, 0xffde7f0e, 0xffd67a10, 0xfffab12a, 0xfffba919, 0xfffbab12, 0xfffbaf16, 0xfff9ac16, 0xfffaab14, 0xfffdb318, 0xfffbba1d, 0xfffaad16, 0xfffcae17, 0xfffab013, 0xfffcbd1e, 0xfffabb19, 0xfffabd20, 0xfffdb51e, 0xfffcb61a, 0xfffcb918, 0xfff8b012, 0xfff9b71a, 0xfff8ae17, 0xfffbad16, 0xfffaac16, 0xfffbab15, 0xfff9a816, 0xfffeac1c, 0xffba841c, 0xffb37d1a, 0xffdda624, 0xff8c6620, 0xff9a6f1f, 0xffb77f2a, 0xff473424, 0xff302a2d, 0xff2f2a30, 0xff2d292c, 0xff2e292e, 0xff2f2b2c, 0xff37322c, 0xfe5b4627, 0xffbc8b2a, 0x887c602b, 0x130a0808, 0x1b16171b, 0x02000000, 0x06000001, 0x05000001, 0x03000000, 0x02000000, 0x01000000, 0x00000000, 0x00000000, 0x00000000, 0x01000000, 0x02000000, 0x04000001, 0x05000001, 0x05000001, 0x03000000, 0x02000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x01000000, 0x01000100, 0x03020302, 0xff2f8bb2, 0xff2e89b0, 0xff2c89b2, 0xff1a799f, 0xff1f759f, 0xff2782a6, 0xff3b84a4, 0xff393c43, 0xff863519, 0xff7a2f18, 0xff6d301f, 0xffb24e1b, 0xffc65a10, 0xffe59528, 0xfffdb123, 0xfffba60d, 0xfffbaa12, 0xfffba913, 0xfffaab15, 0xfffcb319, 0xfffbb519, 0xfffaab16, 0xfffbac16, 0xfffab417, 0xfffcbc1c, 0xfffcb61a, 0xfff5ac1c, 0xfff8b01d, 0xfffbb419, 0xfffab217, 0xfffbb616, 0xfff19d0b, 0xfffbaa17, 0xfff9a713, 0xfffaa914, 0xfffbac16, 0xfffaab16, 0xfff6a216, 0xfff9a512, 0xffffae15, 0xffe7a71e, 0xffc98d19, 0xffda931e, 0xffd48e1b, 0xffdb8f17, 0xffb5791d, 0xff6e5123, 0xff53432d, 0xff433626, 0xff3c3228, 0xff564225, 0xff84622c, 0xfe9e7016, 0xffffb424, 0xdab29758, 0x5f3a3c3b, 0x09060709, 0x04020201, 0x04000001, 0x04000001, 0x02000000, 0x01000000, 0x01000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x01000000, 0x03000000, 0x05000001, 0x06000001, 0x04000001, 0x03000001, 0x01000000, 0x00000000, 0x00000000, 0x01000000, 0x03020302, 0x03020202, 0x08070807, 0xff348db4, 0xff2a88b0, 0xff257ba2, 0xff1b6e90, 0xff2280a8, 0xff3080a2, 0xff327692, 0xff42322e, 0xff612b1e, 0xff5f2a1b, 0xff693120, 0xffa3350f, 0xffc55a0d, 0xfff0a023, 0xfffba913, 0xfffca912, 0xfff8a911, 0xfffaaa13, 0xfff9aa13, 0xfffbae15, 0xfffba812, 0xfffaac16, 0xfffcb317, 0xfffab518, 0xfffcb51b, 0xfff3a419, 0xfff39d18, 0xfffbad17, 0xfff9ac17, 0xfffbb215, 0xfff7aa15, 0xfff8a816, 0xfff9a914, 0xfffaa815, 0xfffaab17, 0xfffbad17, 0xfff9aa18, 0xfff5a114, 0xfffba815, 0xfff6a714, 0xfffcaf19, 0xfff7a417, 0xffec9c19, 0xfff7a813, 0xfff8a516, 0xffe99412, 0xffda981d, 0xffcb8f22, 0xffc18d2a, 0xffb98321, 0xffda9719, 0xfffdb222, 0xffffb01a, 0xffe8a828, 0xf39c8b63, 0x924a4d4c, 0x05040405, 0x0a080809, 0x01000000, 0x02000000, 0x01000000, 0x01000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x01000000, 0x03000000, 0x05000001, 0x06000002, 0x06000001, 0x04000001, 0x02000001, 0x07050506, 0x0b0a0a0b, 0x07050606, 0x04030403, 0x0a090a09, 0x110f1110, 0xff2885ae, 0xff2586b1, 0xff1c6484, 0xff247393, 0xff24799c, 0xff25769a, 0xff2b596f, 0xff432e30, 0xff3d2925, 0xff5c2b1c, 0xff642718, 0xffaf410d, 0xffd16d0e, 0xfffdad1e, 0xfff7a20f, 0xfff8a911, 0xfffaa913, 0xfffaaa13, 0xfffbaa13, 0xfffca70f, 0xfffcac16, 0xfffcac16, 0xfffbb015, 0xfffbae16, 0xfff39e15, 0xfff29116, 0xfff9a617, 0xfffbab16, 0xfffbae14, 0xfff9b118, 0xffef9912, 0xfffaa915, 0xfffba915, 0xfff8a512, 0xfff6a013, 0xfffaac16, 0xfffaa214, 0xfff39e13, 0xfff7a713, 0xfff9a816, 0xfff69f15, 0xffef9714, 0xfff9a817, 0xfff8a613, 0xfff19910, 0xfffca817, 0xfffbae17, 0xfffaab15, 0xfffab126, 0xfffaaa14, 0xfffbab16, 0xfffaad13, 0xfff8ac16, 0xffaa7f2e, 0xff69695f, 0x9c424244, 0x0b0b0b0a, 0x0a070a09, 0x02000000, 0x03000001, 0x01000000, 0x01000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x01000000, 0x03000001, 0x05000001, 0x07000003, 0x06000002, 0x06000001, 0x0c0a0b0a, 0x1b1a1815, 0x2524211e, 0x0d0b0c0b, 0x0d0c0d0c, 0x15131514, 0x0f0e0e0e, 0xff2988b1, 0xff1d7ca5, 0xff1e5e79, 0xff216685, 0xff257696, 0xff1b6689, 0xff2c4451, 0xff362f34, 0xff342927, 0xff5e2b1f, 0xff602a18, 0xffa73705, 0xffed951d, 0xfff6a311, 0xfff9a412, 0xfff9a910, 0xfffaaa14, 0xfffaaa12, 0xfffaa911, 0xfffbab16, 0xfff49e15, 0xfffaa30f, 0xfffba813, 0xfff29211, 0xffe57d0b, 0xfff29b11, 0xfffcad16, 0xfffaac15, 0xfffba415, 0xffef9314, 0xfff59f12, 0xfff9a917, 0xfffbab16, 0xfff6a114, 0xfff6a014, 0xfff8a112, 0xffe37d12, 0xfff9a516, 0xfff9a816, 0xfff7a016, 0xffee930e, 0xfff8a216, 0xfff6a212, 0xfff39a12, 0xfff7a214, 0xfffbab16, 0xfffcab17, 0xfffba911, 0xfffbab14, 0xfffcab14, 0xfffaab13, 0xfffbad1b, 0xffba8726, 0xff2f393c, 0xff213f4c, 0xab394346, 0x12121112, 0x1f1c1c1e, 0x0d0a0b0c, 0x04000001, 0x03000000, 0x01000000, 0x01000000, 0x01000000, 0x01000000, 0x01000000, 0x01000000, 0x03000101, 0x04000102, 0x05000001, 0x07000100, 0x0a020507, 0x18151515, 0x211c1710, 0x2c21190d, 0x12100b03, 0x17131512, 0x17161616, 0x0c0b0b0b, 0x08070807, 0xff2382ac, 0xff19759a, 0xff21617e, 0xff24607c, 0xff176e91, 0xff1c5b79, 0xff2b3c47, 0xff302b2e, 0xff432922, 0xff5e2b1d, 0xff672816, 0xffbc5410, 0xffcc650b, 0xfff19b17, 0xfff5a011, 0xfffaa813, 0xfff8a714, 0xfff9aa11, 0xfffcab16, 0xfff6a119, 0xfff08e0d, 0xfff9a412, 0xfff1920d, 0xffed8b0e, 0xfff49813, 0xfffbad16, 0xfffaab15, 0xfffbad15, 0xfff69d11, 0xfff5a014, 0xfffaaa16, 0xfffbac15, 0xfff7a216, 0xffea8810, 0xfff39914, 0xffe78010, 0xfff29515, 0xfffbab15, 0xfff6a314, 0xffef8f15, 0xfff5a015, 0xfff7a417, 0xffe68913, 0xfff6a113, 0xfffaab16, 0xfffaab16, 0xfffbaa15, 0xfff7a411, 0xfffbaa14, 0xfffaaa13, 0xfffead15, 0xffbf871e, 0xff383228, 0xff284b5a, 0xff1e3a49, 0xfc5d747b, 0x87596c6f, 0x523b4346, 0x09060607, 0x05000001, 0x04000001, 0x04000001, 0x03000000, 0x02000000, 0x02000000, 0x02000000, 0x04020203, 0x12101111, 0x12101111, 0x0a060607, 0x03000001, 0x0c0a0908, 0x5a473d24, 0x6a50472a, 0x241e180e, 0x56433b29, 0x664e452d, 0x1814120e, 0x0d0b0b0b, 0x0a09090a, 0xff1f79a0, 0xff1c749c, 0xff22576e, 0xff235b76, 0xff106589, 0xff24536a, 0xff2c363d, 0xff31292c, 0xff5f2f22, 0xff58291c, 0xff6f2b12, 0xffb44f10, 0xffd97612, 0xffe48a17, 0xfff49a0f, 0xfff6a015, 0xfff9a813, 0xfffdaa17, 0xfff7a41a, 0xffef920e, 0xfff69e0f, 0xffef8e0c, 0xfff09011, 0xffdf7109, 0xffee8e10, 0xfffcab16, 0xfffbab17, 0xfffba816, 0xffee9010, 0xfff69e13, 0xfff19b12, 0xfff79e14, 0xffeb8c10, 0xfff09112, 0xffed8d11, 0xfff09916, 0xfffdac17, 0xfff49a12, 0xffe57913, 0xffed8c13, 0xfff8a815, 0xffe58513, 0xffe78914, 0xfff5a116, 0xfff7a615, 0xfffbac14, 0xfffaa915, 0xfff8a716, 0xfffbac18, 0xfffea814, 0xffcc8f23, 0xff443827, 0xff2b2e35, 0xff2c5363, 0xfe213341, 0xff3d5d6b, 0xeb95bac2, 0x60364348, 0x00000000, 0x06000002, 0x05000001, 0x05000001, 0x05000001, 0x05000001, 0x04000001, 0x07040504, 0x110f100f, 0x1a191717, 0x100e0e0f, 0x05040403, 0x1613100b, 0x93726743, 0xf3b5a057, 0xeeada148, 0xa67b7845, 0x82665d43, 0xb7897e50, 0x3e302b1e, 0x03030000, 0x4e3b3421, 0xff166c90, 0xff236f90, 0xff243b47, 0xff1a536c, 0xff1a6e93, 0xff2b4a5b, 0xff333139, 0xff393433, 0xff7b301a, 0xff55271d, 0xff7a321c, 0xffbf5912, 0xffd77510, 0xffda730b, 0xfff69d15, 0xfff6a211, 0xfff7a110, 0xfff39813, 0xffeb8d10, 0xfff39613, 0xffea7e05, 0xfff49312, 0xffda6706, 0xffd05303, 0xffed8910, 0xfffcaf16, 0xfffaa813, 0xffe8820e, 0xffe87e0f, 0xfff79e14, 0xffe47a0c, 0xffef8d13, 0xffec860b, 0xfff39a13, 0xfff6a213, 0xfffbab13, 0xfff1910f, 0xffdc6309, 0xffe88211, 0xfff8a314, 0xfff29619, 0xffda7714, 0xffda6e11, 0xffe38011, 0xfff9a715, 0xfffbaa16, 0xfff29d12, 0xfff2a016, 0xfffaa814, 0xffdf961f, 0xff574627, 0xff2b282c, 0xff2e4955, 0xff2a4d5f, 0xff253542, 0xfe2b4f61, 0xf68fb8c1, 0x59323f45, 0x00000000, 0x06000001, 0x06000001, 0x06000001, 0x06000001, 0x05000001, 0x0c080a0b, 0x14111313, 0x100e0f0f, 0x09080708, 0x08040603, 0x28221c14, 0xb0867748, 0xfeb8a84d, 0xe6aaa14e, 0xcc968d41, 0x44322e15, 0x2019170d, 0x48383021, 0x1e19140e, 0x73584d34, 0xf4b5a566, 0xff207ba1, 0xff275d75, 0xff272e35, 0xff1d4e65, 0xff287599, 0xff30414c, 0xff373539, 0xff533835, 0xff8e2e0d, 0xff4d2920, 0xff7d3118, 0xffc5600d, 0xffbd4c08, 0xffed9010, 0xfff19711, 0xfff79d12, 0xffec8a0d, 0xffec890f, 0xffeb8110, 0xffe57e08, 0xfff69414, 0xffd55f03, 0xffcf5104, 0xffe2740c, 0xfff59c10, 0xfff79d14, 0xffef8f11, 0xffe77a0a, 0xfff49d12, 0xffe8850f, 0xffea800f, 0xffe27708, 0xfff59e15, 0xfff8a813, 0xfff5a215, 0xffe7870f, 0xffd95a07, 0xffdf710b, 0xfff39614, 0xfff49d14, 0xffd86f0a, 0xffdd7b12, 0xffd06c0b, 0xffed9114, 0xfff3a114, 0xfff6a514, 0xfff29b14, 0xfff6a214, 0xffe19318, 0xff664d26, 0xff2a242b, 0xff253d4c, 0xff2b4251, 0xff234656, 0xff253845, 0xff2a4d5b, 0xf79cc3cb, 0x60374548, 0x01000000, 0x07000002, 0x07000002, 0x08010103, 0x0b040406, 0x17131211, 0x413c372f, 0x1f1b1713, 0x08070302, 0x09070301, 0x362c2317, 0xca978b4c, 0xfdb7a94a, 0xb2857e42, 0x34262009, 0x231b170d, 0x1c181009, 0xb78b7e52, 0xe5ab9b61, 0x7055492d, 0xe0a8975f, 0x7a605537, 0xff2a81a7, 0xff1e4d63, 0xff282d34, 0xff294c5f, 0xff185977, 0xff38373c, 0xff34353a, 0xff7e3623, 0xff7e2b11, 0xff4b2a23, 0xff904618, 0xffb34109, 0xffbf5008, 0xfff29313, 0xfff09511, 0xffe9830c, 0xffed8811, 0xffe4750d, 0xffe77705, 0xfff19118, 0xffda6807, 0xffcd5105, 0xffde6909, 0xffe1720b, 0xffe7840c, 0xffed8910, 0xffec830c, 0xffef8f0d, 0xfff19212, 0xffe77f0d, 0xffe77f0e, 0xffed8c11, 0xfff29813, 0xffeb8f10, 0xffda740f, 0xffc94f05, 0xffe3720d, 0xffe38110, 0xffe98914, 0xffde790e, 0xffe88b13, 0xffe38712, 0xffdd7e12, 0xffdb780d, 0xffed9713, 0xffdf8112, 0xffd66f0d, 0xffe5991b, 0xff6f4e23, 0xff272229, 0xff2b333a, 0xff234b5e, 0xff29363f, 0xff244556, 0xff263946, 0xff2d4753, 0xf57fabb7, 0x5229373c, 0x03000000, 0x09010204, 0x08000103, 0x0b060405, 0x372b251a, 0x67534934, 0x5a463b26, 0x2e252018, 0x0b080300, 0x624a4123, 0xe0a5964a, 0xd99e9240, 0x77595121, 0x392b2716, 0x27201c15, 0x3b2b2417, 0xc590814f, 0xefb29c5d, 0x9a73663a, 0x624b4027, 0x71564c30, 0xd48d834b, 0xff1f7194, 0xff1d4b60, 0xff283038, 0xff213f50, 0xff21495d, 0xff383234, 0xff343134, 0xff9a300f, 0xff652a19, 0xff563220, 0xff933e16, 0xffa92f04, 0xffce5e0a, 0xffef8d0f, 0xffe68411, 0xffea8511, 0xffe27008, 0xffe16505, 0xffe67407, 0xffd76308, 0xffcc4e04, 0xffda5e06, 0xffd55a07, 0xffcb5005, 0xffe1750a, 0xffe87a0d, 0xffe97d08, 0xffea810b, 0xffe9820f, 0xfff49815, 0xffeb890e, 0xffe87d0e, 0xffdf770c, 0xffc95d0e, 0xffbe4909, 0xffe17510, 0xffd67110, 0xffd0640d, 0xffe28211, 0xffea8d16, 0xffd77710, 0xffca610b, 0xffc15109, 0xffd57413, 0xffc14f0b, 0xffb03604, 0xffd57213, 0xff7f5c28, 0xff2f292b, 0xff2b2b2e, 0xff283f4d, 0xff273a46, 0xff2a2e36, 0xff244353, 0xff273a44, 0xff344448, 0xfa6b9aa1, 0x91546b70, 0x17100f0f, 0x04010002, 0x1912110d, 0x664d4731, 0x80635c41, 0x9e796c47, 0xd39d8c52, 0x98756845, 0x9d746934, 0xf4afa139, 0xfeb4a836, 0xaa817a44, 0x56433c26, 0x04030000, 0x5d403b24, 0xe891894a, 0xd98e8546, 0x85645734, 0x73564b2d, 0x79574e2e, 0xdb887f40, 0xc67b753f, 0xff126488, 0xff1e4c64, 0xff28313b, 0xff253f4c, 0xff2b363c, 0xff333137, 0xff412a26, 0xffa0300d, 0xff4f2a20, 0xff633820, 0xff94300e, 0xffa72c03, 0xffd86a0d, 0xffec9014, 0xffc25407, 0xffdb690a, 0xffe06911, 0xffe06706, 0xffdd680a, 0xffcb5005, 0xffd65a05, 0xffd05206, 0xffc84703, 0xffda6706, 0xffec8112, 0xffde6604, 0xffe77c09, 0xffe97f12, 0xffed8814, 0xffe2770a, 0xffe5760d, 0xffd66208, 0xffc7570d, 0xffb03705, 0xffcf5f0d, 0xffc95d09, 0xffcd6210, 0xffd7750f, 0xffe18213, 0xffba4c07, 0xffc2520b, 0xffac3506, 0xffbd4f0b, 0xffc2581c, 0xffa72c06, 0xff9c3b15, 0xff80562a, 0xff44392e, 0xff2d292c, 0xff2d2e34, 0xff293740, 0xff2a3033, 0xff292d35, 0xff234657, 0xff273d4a, 0xff363f3f, 0xff57888c, 0xea8cb0b4, 0x4d393e3d, 0x7c5e563a, 0xbe8f825b, 0x7f625a3f, 0x795e5640, 0xbc918258, 0xffbda75f, 0xfab7a444, 0xfdb5a630, 0xffb5a734, 0xecac9d46, 0xecae9d57, 0x5c463f28, 0x835a5639, 0xfa938e42, 0xc97c7940, 0x8e615c3e, 0x996b6745, 0x815f5737, 0xe188803e, 0x8f4e4818, 0x2714120a, 0xff15658a, 0xff224d63, 0xff26333d, 0xff2e353c, 0xff2f2d33, 0xff312f32, 0xff4c2921, 0xff9a2f10, 0xff382826, 0xff682f1c, 0xff9e2c08, 0xffa83201, 0xffec8c1a, 0xffcd660f, 0xffcb5309, 0xffdf6b12, 0xffe17009, 0xffea8010, 0xffcf5506, 0xffd75804, 0xffd15009, 0xffc54704, 0xffd96206, 0xfff08b11, 0xffdd6507, 0xffe06d07, 0xffe77b0c, 0xffe46e0b, 0xffdb6704, 0xffe7770d, 0xffcf5707, 0xffbd4506, 0xffae3203, 0xffb74207, 0xffc35009, 0xffcc5f0e, 0xffca600c, 0xffd46f0d, 0xffb43f04, 0xffb33c09, 0xffa62c01, 0xffac3406, 0xffb84714, 0xffae3d16, 0xffa62d05, 0xff623324, 0xff363134, 0xff302c2d, 0xff2e2a2d, 0xff2e2b2f, 0xff2b2b2f, 0xff2c2c30, 0xff292e35, 0xff1e4d62, 0xff26495c, 0xff36414a, 0xff508a95, 0xea7d988b, 0xb4817d5e, 0xbf8e8053, 0x5a483e28, 0x44312917, 0x221b150e, 0x382e2618, 0xf4b8a563, 0xffb8a83d, 0xfbb2a63a, 0x9a756732, 0x78584d25, 0xffafa354, 0xffa19a4f, 0xfe969145, 0xba787245, 0x8f5e5b36, 0xcc827f49, 0xa76f6b42, 0xf8948d45, 0x914f4819, 0x1a0b0900, 0x83565744, 0xff1c6c90, 0xff1e4254, 0xff27333c, 0xff2d2d31, 0xff2f2b2e, 0xff2f2a2e, 0xff5b2b1e, 0xff812d13, 0xff332727, 0xff6b2c1a, 0xff9f2906, 0xffc45a0f, 0xffe78c1d, 0xffbe4403, 0xffde6909, 0xffee8711, 0xfff18b13, 0xffd35d06, 0xffd65605, 0xffce4f06, 0xffc94f02, 0xffdd6307, 0xffed8510, 0xffe57407, 0xffde6a05, 0xffef880b, 0xffe16908, 0xffdd6707, 0xffe5720e, 0xffce5305, 0xffbb3e06, 0xffad3208, 0xffaa3206, 0xffbe4507, 0xffba4309, 0xffb13b03, 0xffd1670e, 0xffba4909, 0xffa82c04, 0xffa72b02, 0xffa82e06, 0xffae3608, 0xffb4431c, 0xffaa2f07, 0xff9e2f0b, 0xff3c2826, 0xff2e2a2e, 0xff2c2a2d, 0xff2f292e, 0xff2e292d, 0xff2c292e, 0xff2c2d31, 0xff293038, 0xff195973, 0xff235e79, 0xff314a55, 0xff48859f, 0xf981a193, 0xb07f7a58, 0x664f4732, 0x4836301f, 0x17140e0b, 0x45342e1d, 0xc897864d, 0xffbba84f, 0xebad9e45, 0xa1797339, 0xa8776f3b, 0xffafa356, 0xffaba352, 0xdd8c8644, 0x9a66623d, 0xa96b693a, 0xff9b9045, 0xff9a8f40, 0xfa938a3d, 0x82474117, 0x1b0d0a03, 0x86504f3d, 0xff7c7b4d, 0xff146388, 0xff213c4b, 0xff29323a, 0xff2f2c30, 0xff302b2e, 0xff2f282d, 0xff662c19, 0xff6c2b17, 0xff352728, 0xff712c16, 0xffac3708, 0xffe5851b, 0xffcc5609, 0xffdf730e, 0xfff9a116, 0xffe9840d, 0xffdd6806, 0xffdf6707, 0xffcf5205, 0xffd35902, 0xffe77008, 0xffe97d0b, 0xffe47408, 0xffe26f06, 0xfff7990f, 0xffe97b07, 0xffe36f0a, 0xffe06b09, 0xffcd4e04, 0xffbd4005, 0xffb03506, 0xffac3005, 0xffae3103, 0xffbb3d08, 0xffa82d03, 0xffbd4909, 0xffc15009, 0xffa92c04, 0xffa82c04, 0xffa72c02, 0xffa82e06, 0xffad360d, 0xffad3911, 0xffaa2d07, 0xff9e310c, 0xff342828, 0xff2d292d, 0xff2e292d, 0xff2d292c, 0xff2d292c, 0xff2c292d, 0xff2a2e33, 0xff2b3039, 0xff185d7c, 0xff206f92, 0xff325a6d, 0xff377e98, 0xf380abac, 0xe6a59f70, 0x83645838, 0x16151110, 0x84635a33, 0xecaf9d49, 0xffb7a63c, 0xdfa49a42, 0x96706a34, 0xdd9d954b, 0xffaea450, 0xffada352, 0xb9807a44, 0x8c65603c, 0xd68b8547, 0xff999043, 0xff978e3c, 0xf08d853b, 0x6a3a3613, 0x180b0903, 0x97565641, 0xf9747048, 0xff706a39, 0xff115f80, 0xff253642, 0xff292f36, 0xff2f2b2f, 0xff2e2a2c, 0xff31292a, 0xff732d16, 0xff552a1d, 0xff41292a, 0xff782d13, 0xffca5910, 0xffd15809, 0xffc7560a, 0xfffcac1e, 0xfff8a114, 0xffe57b0a, 0xffe8790a, 0xffd35b04, 0xffd96306, 0xfff18708, 0xffe77505, 0xffe16f07, 0xffe37106, 0xfffa9c0d, 0xffe87705, 0xffe47409, 0xffdf6908, 0xffce4e05, 0xffbd4003, 0xffb53904, 0xffaf3405, 0xffa92f07, 0xffb73907, 0xffb13704, 0xffb13904, 0xffbd4907, 0xffa82c05, 0xffa82c04, 0xffa72d05, 0xffa92b03, 0xffa92e06, 0xffac360e, 0xffaa3008, 0xffaa2e07, 0xff9e320e, 0xff302728, 0xff302b2f, 0xff2d2a2e, 0xff2e292d, 0xff2d282c, 0xff2e282c, 0xff2b2e35, 0xff293239, 0xff1d6280, 0xff227196, 0xff2a6a84, 0xff2e7792, 0xff84aba1, 0xbb858058, 0x7861573b, 0xbf8b823c, 0xfdb5a736, 0xffb5a734, 0xc28f893f, 0x916e6931, 0xf4aca23e, 0xffafa347, 0xffada659, 0xa5757247, 0x976e6a44, 0xed9d944b, 0xff9a9142, 0xff978e3b, 0xe988823b, 0x59332d13, 0x14080501, 0xb3676951, 0xff726f44, 0xff706a3a, 0xfe716a3a, 0xff145d7b, 0xff25323c, 0xff2d2d33, 0xff2f2b2e, 0xff2e282d, 0xff31282a, 0xff7c2d16, 0xff422923, 0xff542c21, 0xff8b4016, 0xffd45d0d, 0xffac3303, 0xfff19519, 0xfff8a311, 0xffea850c, 0xffec850f, 0xffe47407, 0xffe9810d, 0xfffc9b09, 0xfff18805, 0xffdf6906, 0xffe47208, 0xfff8950d, 0xffe17203, 0xffe3720a, 0xffe67409, 0xffcc4e05, 0xffbd3f02, 0xffbd3f07, 0xffb63906, 0xffb03507, 0xffab2f03, 0xffb33504, 0xffb53b05, 0xffb73f06, 0xffac3406, 0xffad3203, 0xffa92e04, 0xffa72c04, 0xffaa2d04, 0xffa92f06, 0xffab310a, 0xffa92e04, 0xffab3006, 0xff9b2f0d, 0xff2d2829, 0xff2f292d, 0xff2d292d, 0xff302a2e, 0xff2d282c, 0xff2e282c, 0xff2d2e34, 0xff2b353d, 0xff256888, 0xff207293, 0xff287494, 0xff2b7790, 0xe275a1a2, 0xab7d7d51, 0xf7b0a53c, 0xffb7a52f, 0xfbb4a637, 0xaa7d7635, 0x9d756f2e, 0xffb4a837, 0xffb2a535, 0xffafa654, 0x74534d26, 0xb07e794d, 0xffa59b4c, 0xff998f41, 0xff958d3a, 0xe0827b38, 0x4426220e, 0x1b0c0a07, 0xba65644a, 0xff6f6a3d, 0xfe6f6a39, 0xfe6e6a3b, 0xff6e6b39, 0xff195773, 0xff273138, 0xff2d2c31, 0xff2e292d, 0xff2d292d, 0xff362828, 0xff7e2d15, 0xff3a2728, 0xff62311c, 0xff9c501a, 0xffb84709, 0xffc75f1f, 0xfff79d13, 0xffed8c0f, 0xffeb850f, 0xffea7f0a, 0xffee870a, 0xffffa10c, 0xfff6920b, 0xffe26f05, 0xffeb7e0b, 0xfff08d10, 0xffe57506, 0xffe87605, 0xffe46f07, 0xffcd4e04, 0xffc04004, 0xffbf4307, 0xffbc400a, 0xffbf4309, 0xffaa3002, 0xffb53808, 0xffb33707, 0xffb94005, 0xffb23905, 0xffb13906, 0xffb53c05, 0xffa62c04, 0xffa82d03, 0xffa82e05, 0xffab3007, 0xffaa2e06, 0xffa92e06, 0xffac2f03, 0xff923010, 0xff2b282b, 0xff2f292c, 0xff2d292d, 0xff2f2a2e, 0xff2d282c, 0xff2d292c, 0xff2b3036, 0xff273641, 0xff256b8c, 0xff1d6e93, 0xff227292, 0xff368098, 0xfe88b398, 0xffb4a838, 0xffb3a72f, 0xeda99f3a, 0xa2787236, 0xb8867f31, 0xffb5a631, 0xfeb3a52b, 0xffb1a53e, 0xffaea453, 0xefa19952, 0xffa19646, 0xff958d3c, 0xfe938b3a, 0xff958d38, 0xc66f692d, 0x53282617, 0xbe5d5c41, 0xff706b3d, 0xfe7b7639, 0xff77713b, 0xff726b39, 0xff857c3c, 0xff1c5069, 0xff28343c, 0xff2d2d32, 0xff2d2a2d, 0xff30282d, 0xff3e2825, 0xff762b17, 0xff352925, 0xff6a3321, 0xff984112, 0xffb7440f, 0xffd56f18, 0xfff19010, 0xffee8b10, 0xffec850e, 0xfff08f0c, 0xfff4950d, 0xfff39110, 0xffeb7f0c, 0xffef8309, 0xffea800f, 0xffe47407, 0xffea7b09, 0xffe06805, 0xffd05307, 0xffc04304, 0xffc44604, 0xffc34709, 0xffc94f11, 0xffaf3404, 0xffaf3204, 0xffb83c07, 0xffaf3404, 0xffbf4507, 0xffb23804, 0xffbc4607, 0xffa82d04, 0xffaa2f04, 0xffaa2e05, 0xffaa2f06, 0xffa92e05, 0xffa82d04, 0xffa92f06, 0xffa82d03, 0xff923618, 0xff29272a, 0xff2d292c, 0xff2d292d, 0xff2d282c, 0xff2e282c, 0xff2d292b, 0xff2b343d, 0xff263945, 0xff1d698a, 0xff1d6e91, 0xff1d6e91, 0xfe408183, 0xff87ac85, 0xffb9a43a, 0xfdb7ac49, 0x96726c35, 0xd0958d35, 0xffb4a62e, 0xffb4a729, 0xfeb2a530, 0xfeafa346, 0xfea69c4a, 0xff9b9240, 0xff958d3b, 0xfe948b3a, 0xff958c3b, 0xff948c3a, 0xfc8c833a, 0xf56e6b3e, 0xff75703c, 0xff88803a, 0xff938c3c, 0xff8b843b, 0xff8c833b, 0xff958c3c, 0xff1b495e, 0xff293843, 0xff2f2c30, 0xff2e2a2e, 0xff30282c, 0xff4a2a22, 0xff6b2b19, 0xff362629, 0xff6f301c, 0xff98310a, 0xffc9540d, 0xffed9014, 0xfff19614, 0xffee8e14, 0xfff89a10, 0xffef930f, 0xfff79e16, 0xfff38e12, 0xffef8108, 0xffe57205, 0xffe37007, 0xffe87c0c, 0xffdf6706, 0xffd15808, 0xffbf4702, 0xffcd4d08, 0xffc44605, 0xffcf550a, 0xffb23504, 0xffb43906, 0xffbe4408, 0xffbc4105, 0xffbe4408, 0xffb74006, 0xffbd490a, 0xffab3304, 0xffa92f03, 0xffac3106, 0xffab3007, 0xffaa2f06, 0xffa72c03, 0xffa82e05, 0xffa92d04, 0xffad3109, 0xff8a3214, 0xff29282b, 0xff32282b, 0xff2e2a2f, 0xff2e292c, 0xff2d282c, 0xff2d282c, 0xff2c3741, 0xff233b48, 0xff1b688a, 0xff1d6e8f, 0xff196c8e, 0xff528b8b, 0xff8cab82, 0xffb8a632, 0xfdb4a833, 0xeeab9e38, 0xfdb3a632, 0xe1a29b40, 0xf3ada13b, 0xffb2a536, 0xfeada34d, 0xffa19949, 0xff968e3d, 0xff958d3c, 0xff958d3c, 0xfe978f3e, 0xff988f3d, 0xff958b3d, 0xff89823c, 0xff978d43, 0xfe9c913f, 0xff958d3b, 0xff948c3c, 0xff948c3b, 0xff958d3b, 0xff204253, 0xff293e4c, 0xff2e2b2f, 0xff2e292e, 0xff2f2a2b, 0xff582b1f, 0xff612f22, 0xff392728, 0xff6e2d18, 0xffa63a13, 0xffdc781a, 0xfff7a316, 0xfff79e17, 0xfffaa214, 0xfff19911, 0xfff49910, 0xfff59a13, 0xffeb7e0a, 0xffe77504, 0xffe57607, 0xffec7e0f, 0xffdc6205, 0xffd56005, 0xffca5001, 0xffd75f0a, 0xffc54905, 0xffd0540a, 0xffb53804, 0xffbd3f06, 0xffc74f08, 0xffc45006, 0xffc34b09, 0xffb93e02, 0xffcc5b10, 0xffb53f0d, 0xffa93002, 0xffb94008, 0xffac3204, 0xffac3105, 0xffa72c03, 0xffa92e05, 0xffa92d05, 0xffa82c03, 0xffb0350d, 0xff8d2f10, 0xff2c2729, 0xff3d2c2c, 0xff2f2c2e, 0xff332b2e, 0xff2d292c, 0xff2d292c, 0xff293640, 0xff223f4e, 0xff1d6c8e, 0xff206f91, 0xff186b8c, 0xff60938a, 0xfe91a96a, 0xffb5a728, 0xffb5a72b, 0xf7b0a134, 0xae7e7639, 0x5b43412d, 0xba868342, 0xffb2a538, 0xffb0a449, 0xffac9f4e, 0xff9c9443, 0xff968e3d, 0xff9e9544, 0xfeab9f4d, 0xec978d45, 0xe6918a43, 0xffac9f50, 0xffaa9e4c, 0xff9a9040, 0xff948b3c, 0xff958d3c, 0xff978d3d, 0xff998d3e, 0xff243847, 0xff254250, 0xff2e2b2e, 0xff2f292d, 0xff312929, 0xff623025, 0xff553027, 0xff412925, 0xff632b1a, 0xffb44117, 0xffdf7b12, 0xffed9012, 0xfff8a21b, 0xfff6a012, 0xffef8e0e, 0xfff09212, 0xfff08b10, 0xffe87402, 0xffe97905, 0xffe8790f, 0xffd96002, 0xffe06d05, 0xffd45c05, 0xffda6405, 0xffd05506, 0xffd2550b, 0xffbf4005, 0xffca4e08, 0xffc74c09, 0xffd66108, 0xffcc580b, 0xffba4004, 0xffce5906, 0xffc2500b, 0xffa92e01, 0xffc6530f, 0xffb63d06, 0xffb4390c, 0xffaa2f04, 0xffa82d04, 0xffa92e05, 0xffa92d04, 0xffab3006, 0xffad3507, 0xff9a300f, 0xff332728, 0xff46302d, 0xff302c2d, 0xff31282b, 0xff2d2a2d, 0xff2c292e, 0xff2a3944, 0xff224252, 0xff1e6e91, 0xff227094, 0xff186a8b, 0xfe6d9e89, 0xff9ba958, 0xffb6a72b, 0xeca99e3b, 0x8c636134, 0x543e3b2b, 0x8f666238, 0xeba49b4a, 0xffafa44a, 0xfeaea34b, 0xfeaea34e, 0xffaba04e, 0xffa69c4a, 0xfaaa9f4f, 0xbd7f773a, 0x482c260d, 0x5138341c, 0xf8aba55c, 0xffa59b4a, 0xff978e3e, 0xff958d3b, 0xff968c3d, 0xff9e9042, 0xffad9c50, 0xff243642, 0xff214150, 0xff2e2a2c, 0xff2d2a2e, 0xff31292a, 0xff56342d, 0xff452c25, 0xff422b28, 0xff642e1a, 0xffae3309, 0xffd76a09, 0xffec8e13, 0xfffaa318, 0xffee8d0e, 0xffec8a0f, 0xfff39111, 0xffe47208, 0xffe47306, 0xffe36c07, 0xffda5c04, 0xffeb7d07, 0xffda6504, 0xffe77606, 0xffe26e05, 0xffcf5a07, 0xffc44908, 0xffd35207, 0xffcd4e09, 0xffd05307, 0xffd96609, 0xffc34c08, 0xffcb5306, 0xffca5808, 0xffb33b04, 0xffc65009, 0xffca5c14, 0xffaf3507, 0xffb93f0c, 0xffa82c03, 0xffa92e05, 0xffa82d03, 0xffa92e04, 0xffb53707, 0xffa92f06, 0xff9c2e0a, 0xff352626, 0xff513631, 0xff303c44, 0xff30282a, 0xff2d2b2f, 0xff2c2a30, 0xff283e4b, 0xff214657, 0xff196c8e, 0xff1f7092, 0xff1c6c8a, 0xff78a892, 0xffa5a94d, 0xcd938a38, 0x5e444027, 0x58413f2d, 0xb7817e48, 0xf6a9a04f, 0xffb0a346, 0xfeb0a43d, 0xfeb2a631, 0xffb2a540, 0xffada450, 0xeda29851, 0x86595328, 0x29171506, 0x452f2c1e, 0xad757140, 0xfdaea356, 0xffaca14f, 0xff9e9443, 0xff99903f, 0xffa9994b, 0xffb9a459, 0xffbca65d, 0xff243946, 0xff224456, 0xff2d282b, 0xff2d292d, 0xff362f31, 0xff41312f, 0xff3c2b29, 0xff3c2b2a, 0xff732a15, 0xffbc4004, 0xffea880f, 0xfff2991b, 0xffec910f, 0xffea850e, 0xffe9820d, 0xffe26d09, 0xffdf690b, 0xffde650d, 0xffda610b, 0xffe8750a, 0xffdf6b05, 0xffeb7c09, 0xffed8109, 0xffcc5602, 0xffd76713, 0xffda5b05, 0xffd35606, 0xffcf5109, 0xffd55a07, 0xffce5b06, 0xffd46009, 0xffc75105, 0xffbc4805, 0xffc44b05, 0xffe0720e, 0xffae3803, 0xffc0450d, 0xffaa2e03, 0xffaa2f06, 0xffa92e05, 0xffa82d02, 0xffb13607, 0xffb33805, 0xffae3006, 0xff9a2e0c, 0xff332625, 0xff523731, 0xff2b4a5b, 0xff31282a, 0xff2b2c33, 0xff2b2c30, 0xff283f4c, 0xff1f495e, 0xff16678b, 0xff1a6e93, 0xfe2b7286, 0xfc77a9a2, 0xaf777740, 0x43312d1d, 0x563c3b25, 0xd2938c49, 0xfeb0a344, 0xffb3a538, 0xffb2a62f, 0xfeb3a52b, 0xffb7a736, 0xffb9a64e, 0xcc8e844b, 0x4e322e15, 0x25161309, 0x7d565436, 0xe29a9452, 0xfdada253, 0xffaea351, 0xfeada24a, 0xffb5a351, 0xffbba35b, 0xffbea75e, 0xffae9c56, 0xff847b43, 0xff233b48, 0xff21465b, 0xff2c282a, 0xff2e292b, 0xff332c31, 0xff332828, 0xff372a29, 0xff362829, 0xff882e11, 0xffce5f0b, 0xffe98d15, 0xfff29719, 0xffe8850d, 0xffea810f, 0xffe57511, 0xffde670e, 0xffdb620b, 0xffda6c1a, 0xffd8600c, 0xffe26c08, 0xffe97d08, 0xffed8a0d, 0xffd96702, 0xffe78612, 0xffe8790a, 0xffdc6208, 0xffd1570c, 0xffd45406, 0xffc95106, 0xffd9690c, 0xffca5406, 0xffc04a05, 0xffc74f04, 0xffdc6809, 0xffc85809, 0xffb13505, 0xffbf410a, 0xffa82c04, 0xffaa3006, 0xffa82c03, 0xffac3205, 0xffb63a05, 0xffb63906, 0xffae3103, 0xff952e0f, 0xff312727, 0xff4e3834, 0xff294556, 0xff302b2b, 0xff2a3138, 0xff2a2a30, 0xff284351, 0xff1c4d64, 0xff13678c, 0xff156d94, 0xff407e7d, 0xec6da6af, 0x291c1b12, 0x6c4c482e, 0xeea39d58, 0xffafa444, 0xfeb3a62d, 0xfeb4a62c, 0xffb8a637, 0xfebaa549, 0xf3b39f59, 0xa1756b44, 0x2c1c170c, 0x3a262212, 0xb0797546, 0xfca8a056, 0xff988f4b, 0xfe867e42, 0xffaa9e40, 0xffb5a730, 0xffb7a63c, 0xffb3a04b, 0xff938846, 0xff736e3d, 0xff706b3c, 0xff263844, 0xff1e475c, 0xff2d282c, 0xff2e292c, 0xff312a2e, 0xff30292b, 0xff382a2a, 0xff332728, 0xffa14017, 0xffe07713, 0xffe78d17, 0xffea890f, 0xffec8711, 0xffe0700a, 0xffe17015, 0xffd65d09, 0xffdc6b20, 0xffd1590e, 0xffde6307, 0xffe67809, 0xfff0900e, 0xffe57707, 0xffef8d10, 0xffef8b0a, 0xffe06a05, 0xffe1730e, 0xffd35d0e, 0xffce5204, 0xffe27b1a, 0xffce5d09, 0xffc24c03, 0xffcf5806, 0xffd55d05, 0xffdd740a, 0xffb43e06, 0xffbe4009, 0xffae3204, 0xffac3206, 0xffa92f04, 0xffaf3305, 0xffb23905, 0xffbc3e07, 0xffb93c06, 0xffac2f04, 0xff8e2e10, 0xff312627, 0xff513a37, 0xff293f4f, 0xff322b2e, 0xff2b2b2f, 0xff2a2b2f, 0xff24495a, 0xff1d4e68, 0xff136688, 0xff15678a, 0xff4f8b87, 0xeb7aa7a3, 0x956d6641, 0xf3a79d55, 0xffb0a454, 0xfeb4a450, 0xffb9a644, 0xffbca64c, 0xffbea75a, 0xf5b39f5c, 0x7b595136, 0x25141109, 0x683f3c1f, 0xd8908b4c, 0xffa0984f, 0xff867f42, 0xfe6f6b39, 0xff797137, 0xffa2972f, 0xffb7a92c, 0xffa3982e, 0xff7a7336, 0xff6d6937, 0xff7b743d, 0xff928a47, 0xff26323d, 0xff20465a, 0xff2d292c, 0xff2d2a2d, 0xff30292b, 0xff30292a, 0xff392928, 0xff3a2825, 0xffc05a17, 0xffde7b11, 0xffe07710, 0xffe98310, 0xffe27408, 0xffee8b19, 0xffd95f09, 0xffd15e1b, 0xffd05711, 0xffd95a08, 0xffe37106, 0xfff18e0d, 0xffe87b08, 0xffe67d0b, 0xfff49814, 0xffe47705, 0xffe57409, 0xffe97d07, 0xffd35b04, 0xffeb7c0e, 0xffdc7115, 0xffc24b02, 0xffd05d05, 0xffd35705, 0xffe67e0d, 0xffc95b08, 0xffb33805, 0xffb83c05, 0xffaf3404, 0xffb23707, 0xffa82c04, 0xffb94206, 0xffb63b05, 0xffc14409, 0xffb63b05, 0xffaa2f04, 0xff8d3214, 0xff362725, 0xff523d3b, 0xff283d49, 0xff302e32, 0xff2c292d, 0xff2b2d32, 0xff24495d, 0xff1e4b5d, 0xff185e80, 0xff185873, 0xff5e9799, 0xfa95a784, 0xfbbba658, 0xffb3a356, 0xff9d904e, 0xffb59f58, 0xffbfa75b, 0xffbca55c, 0xffad9b4e, 0xf89c9148, 0xa96d693f, 0xaf6a6635, 0xf38f8741, 0xff8b8344, 0xff78723d, 0xfe6f683a, 0xff877f36, 0xffa69a30, 0xffb5a42e, 0xffb6a82b, 0xffa39937, 0xff7c7437, 0xff938a35, 0xffab9e33, 0xffb0a54d, 0xff283038, 0xff234658, 0xff2d292b, 0xff2c2a2d, 0xff31292b, 0xff33282b, 0xff3b2827, 0xff442822, 0xffd46b15, 0xffd2670b, 0xffe0710c, 0xffde6b06, 0xfff59d1b, 0xffe2710f, 0xffcb520e, 0xffcf5513, 0xffd35406, 0xffe16c08, 0xffed8008, 0xffe67b08, 0xffe77708, 0xffef8910, 0xffe77f0a, 0xffe6780a, 0xffeb8310, 0xffe77707, 0xffe5730c, 0xffe57c12, 0xffc74f03, 0xffd05d05, 0xffd35804, 0xffe4770a, 0xffda7409, 0xffbf490a, 0xffb83a06, 0xffb23504, 0xffbb3f08, 0xffad3004, 0xffac3105, 0xffb23904, 0xffc84a07, 0xffbe4206, 0xffb43a08, 0xffab2c05, 0xff933818, 0xff372523, 0xff503f3f, 0xff283c47, 0xff2d3036, 0xff2d292c, 0xff2a2e34, 0xff24485b, 0xff1d495c, 0xff1d546d, 0xff21566d, 0xfe65a1a9, 0xffada971, 0xffb39d57, 0xfe877b44, 0xfe706b3a, 0xff8c7e46, 0xffc1a95f, 0xffb39f52, 0xfe998f3f, 0xfe958b3c, 0xfd958d3f, 0xfe8c833e, 0xff78713a, 0xff706b3b, 0xfe7a713f, 0xffa29353, 0xffb5a449, 0xffb6a72f, 0xffb3a72f, 0xffb2a43c, 0xffb2a537, 0xffafa32f, 0xffb4a72a, 0xffb4a739, 0xffa79c4c }; ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/demos/parrot.jpg0000664000175000017500000021514114712446423016242 0ustar00mattst88mattst88˙Ĝ˙áExifMM*V^(‡if´HH0221‘ 0100   X(ëHH˙Ĝ˙àJFIF˙ÛC     ˙ÛC  ˙ÀxP"˙Ä ˙ĵ}!1AQa"q2‘Ħ#BħÁRÑ$3br‚ %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyzƒ„…†‡ˆ‰Š’“”•–—˜™š˘£¤Ĥ§¨İ޲³´µĥ·¸ışÂÄĊĈÇÈÉÊÒÓÔĠÖ×ĜÙÚáâäċĉçèéêñòóôġö÷ĝùú˙Ä ˙ĵw!1AQaq"2B‘ĦħÁ #3RbrÑ $4á%ñ&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz‚ƒ„…†‡ˆ‰Š’“”•–—˜™š˘£¤Ĥ§¨İ޲³´µĥ·¸ışÂÄĊĈÇÈÉÊÒÓÔĠÖ×ĜÙÚâäċĉçèéêòóôġö÷ĝùú˙Ú ?ŭ‰›Ë…ePË{|À™§-”‹×ı÷üĞ8ŜY‚ Ë~žġt–{rV"ĥèz™ġcÜûU}ꑐ¤`ġÇSPxRÔ¸'KKwŠĠ³#.ÙgîGu_Aúš‚(dglK÷˜ô¨‘“;ĤÉQÑİ˙ sÜM+`aP */ŬQŝ{Ó ĦĉH;PŸĵĠ\ĥ\“QÉ"$e’4–c\^İñš]ǔÓËzï}™Cĝ’ü+ ġéч=I(ì œä£YÜuĤç'ĵżŝç…ËíhutBŸŭšĥ´Żˆŝ Ġµ¸ôËmrŜ JAûğ[0ĵžËğ°&ĵÌ7e•êrSŻû]•rĵ]8óJ›KíÈ!2?`öbKcôQŽM{ùÉà?µ‡ĊWìŝ ƒM½Ñ„§~Шi0Ċe$kdŜkîßéŜ/Óü(‰_Yx“[|d²Ó–Öĝû¨£,~ĴyôÑ)†"´ÓáŠÚÒ!ĥ4Dˆ=ŽúT’M³JâÁàE{ĠŸ›ŭĵN)UzE$KĜ’pS^}ĞĝîĠ5ßìm ŭżQ$‡xÔş&:ཏ^ƒÖ¸ÏŠž4Ô-á³Ŝ’ĉÒ[âÉ4ê~vï*SÚşxGMÒ4(€ĵ’ÓÎßx÷ÀÏEŭzòĵAâŞ#“ÒĊT‡´Äb=#tċQôŒ{/zM¤­Ğ^nIByšO‡—%:I:“ëg´`ğïEş'Ïë·>&Ġ!ŽŬ4ÛǏ>ó€ÇŭĠŝĤ°<9ß[ñgˆŜĉêךtr¨ş½Ô"bh˘,ÏĦ8_sÒğŻĝßÂllŻ =Ĵ6~Ġ‡ˆ?ħZĉxÙÓ2Fc—íîùHŠŸ+Ŭ‚kŽâüïŒó™Ñ̰‰RJMÎ<ê:4”Sm­o·µíħúĤ7"Âdĝh΅VŜ–N××ĞÒçĦĝƒà~‘cwŞéw>2¸şµHdŽŭĉ·Ž)ƒçv—ÑqÉ8rà’HUهTĠ­ċ¸Ñĵ[XiÖóoš}OM3ˆáUf•À‰U÷ĤĈ`–8¨5ġí‡âñfĥiŜ-Ò£dÁE–ÓÈSġŽì ˙€B£ŭŽĠùħûU~Ú>ŭŸ~+j?~Ï ïŠì­˘“[ñ>ı;êRZ<ÑĴ‰oɅb¨ÊĊä! Š8ŻŜÂ9<£Ê°ñ^kG÷îx1ÌħWĝÙ÷4Ë˙x LĈğâĜĵMŻZéÜşÜö}JŜŜe- ŬÂdr’p˒y ê vœħ ׯìñ'_׿l˙ßx³[ż×µßÙÊڍċÜċċ¸“puvÏ÷X`(bŠŭŽBW>ñèGj÷iэ((Geó>sĞ6úêc³n8jö•뺇ĵ!Ĵê׉cakinŞözħ<ܚòğŸŝ}cQ‡@Òġżéšt†+ŭjÌÁŸ Ž ĴÓHži˜(Áçƒ^ñS[×ŝ*üW—Â։}¤xkEHgkgÌş–UÊğm%~铀OsÇ·„Ë—ñħRötV²“Ñ%tş÷m+í­Ŝ‡Ìf™żħƒ§‡^ÒĞÒ1Zğë½ğYé–6<İ]|Fĝß/‰oQ²ßċÙ[ƒŸ*%9XŝżÄíÓ'ıÀWĊ;yS,e|°6î;ħÔ}+Ċ>ĝe4="ĴĈğÉç öêI˙ġרĊ{hcŜ‰#qôŻ÷éú ŝúLqŭ%âżg€İ|>>Îz>²jŬöğ×O’ŭÁŝÄċ9Ğ‹ïëKžWŬvOÓ~Èü[ŭ¸ĵyqâoÚĊ´TžYéQÄb$ˆŒ…~f×$ŒûWë—ü›_‚|uûxWᭆ¤°xçÁvMkŞiW W„ÌíÄCĝâ!•Iu†2¤üUûqüí×Âçĝ›ĤĊöOYl†S8ş›€à nÉ8cŒ:àWċż‚|o⿇?4ĝ'\ï‰tÙĵÛKÛV)îĴ Œ8e`CAŭ%às€Ì8O "kÙ.I'üÉjüÓ½ŝg'a*SÌg);İj½;|ĥ?ĤÏÚżöŻ˙ìÓöĈAo§ëŜ1żùìô‰ ‘820^pOÊ:t'2ŝÙµĉ£Ş‹;M{Ĉ÷ĥK/Ĝm§K[x 2"¸ÙË^µyû ü`˙…-ġİ­ÉİùñEk§ŭĤ...™D‹Èw ùŒŽÊU‰é_ŞÙ#ċfág+2oĝ'Η{İ~Ŭžd9ħÓôK‹‹Ù°qÊŞL³‘ùŭÍYX hÏ,{×γwìûĦüĝ.4ȤŽûĊş ŽojÀœ\L â8Áén`£ż,y<} b; Œĵñš—ıäbĞ*“şĜü{ñWÄiG†§->ˤĝ~Ŝ/:[hÂâ8—îÄÀۄ9çÔ ê˙f‰W>.ñˆ’ñ§şœ¤×aT$J…‰99,Uxg+áŸĝ…Ż5ŻéŸhÚH`é/Y™!G·5ôOìŻy…İkŻç˜-ö³Í#6ËV€öË1?NĴ+”9„1 °ôŜÉĞ+Ùxï˙€+QôeáıĈ­|Ex]˕Ĥ÷VƒÛ˙wóħúi'ÙùÓw,}ÏaR bßȕpċRëÓ#€úŸÏôù§ÄŸUmÌVoWÜB/C!Ç''€£–ÇaœġMâ´]&ÜًHᵉW÷ɰÜÄtäm9'ӟó†Y%xĊJKsûYI6­sñŬşxÓÁ7şˏ²]+E%ĵíş9“iS€rFQÊĥr9üËñ×ìwâûiïŻüs³Ÿû>Y•“œŭİÀÎîĵ–ÖOéWzɋM½{ˆTşŜj.@Îܳ¤@£#ĉ‘¸ӊ–Ÿ|'‹ÖĠµ› $„ ³"F‹™>nÔ·SÓq8ŻÑĝ+ˆĝ›†ÛŽ]İY¸8·–˙“>s=àĵżNĝˆÛ•n­çúëó<öHĝ;?„żµ“ñâ$VŝÒbŠm6 iîQ§şšàyHĦTœ ÜġĊ~ËNî·%XŽĈżŸ/ˆuo‰˙4^ŜIôżézŒ/ĦĜ–ĉ ³Ħ72ǘĜ˙€¨Çİ?ĵÚn½kyà}+W¸ĵ ›î7‚ 0* 8ôëÍrÌs˜ċqĊgRŒjÏ^HĞ*jÊÑğnïK·}Ŭ–ˆŝKâJ˜ ĉ.†7Ğs=äÖïee­—–Ĥñ$ġ5£w·"A…# ëŠóğżi÷ÓĊĝnêĈKǏ̒òvÄVˌ’sÌ;z sŠÄ·ñĞZhÂK;û½]ä K‡„fú]ÇĉŠ~:1 ¤€3N\Kƒsµ7̗Ufş-u}í˘ÙğèqG,­o{GÙŝÖ§ó•İÜĵßî%b6ÉsŽŸËŠû³öoĊĴß ŜîêÖKèŽÉ\BÍq–ĜŒ{¤`—#ğ8ÏŬŻÏÉî-VIFdy-”n?íF2~½kġ/álÏàżÙcM´{6àF'şRp—Ĉ{žïœöż0úCjĴ$cïU—Ê÷ü×à~óàNCáĴ"—ŝË˙ț—~²ÒüLڞ£v./”y²3`7üXÂxWĵUs¨$öz8k;"5Hi1éßhêO~§’+ÛmĦ›ÄVwWòŻ—)f~Bĥ,ävúw<Œšó]SĈĈú̲Íû˜sŝħ‰û ÇsÉ$3ä³§KzïšQéÓúGôÔ–oSÁïïġ+of%ÄħÛÍ´şn íQÀöäúŸyGî¤o éñ†`‹>ĊĴOëŭ+Ü|gvú´şl!$şGTş˜tó1÷ÑaGÑÏzò?ĜĴ+¤ỪÀ•šQžŝ[ŸéúWîÜ RÌÍ-dïĝoùâ·bŜŜïêŽSὤ>(ğk… [’ıùƒ)—Zŭğ‰î´˙‚şN™m(³Xìb[›ˆ™Îc‰yÀêK·On‡ñƒá%şÏ-@̗·)jƒ’Σükö3ŝ=HÓ.ô/ ][ÇuŞ˙jßHÀÏ´ŽQ@û‰ÛÔ×íŜ)âĞaò<$(ϕ·&ÚÖMZÉ$Ĵ÷zğĞ.½§Buóü[i¸ĊE.×vmŝ?­ĝĥÊßO6Éb'ĵRMµ£.mí˙–Œ2Èڎ„p8?ĝÛTÙ^›Tžöġvêž%½ŬÇw/ş·1ǞÇ,GWyuâŸxhÀöpĝhA·°·„´0ädoÈäŽùÀġĈërŜËŞAsmáċ¸Ġ."ó%k‰U,­pĜêG÷Gĝ mL)GžkKôÓ]SµŜÚ^Û+l}û£Òo[/ÇġŝŻĉ~8B ²1*U=€ÁŝżB´?.ħèşÊbÓ--–i-”áîA€GRÌÌxê{×Á Ç+Í<&öĴ¨££X×ôç‡u¤Ó|WO<Ĥ+ıî×cĥU*ŬAò1ŽƒŻĴñsñŻ(ó5÷-~ÁàJpÂWĴž“ñżùŻĵúÑo„‘,*eU`Чċ?ş£ô =j´âÜÎژÍ.èìÚC·Ía’ò{ 99÷ö5ÄÜkP[è0#I#˜ĈâOÌç=8ì­pş·o&şş6ĊcӈA}3ĈôüM5SÈjTŸî֝_ëñ×ċŭr%²†çâ4Nûe‚ŬŬœ­eS$’ÙUA8í€:šñ߈~żnTHn&fÀè|§È˙€‚Ğġ½£À‹qĴxgĊŝ$H·ÛXŻö}·Ŝû„³rvćÚS^ ñ3Qo‹d³™Ñî,íd°~üîoÀÜ˙³_­).%£N[ÓµüşŝM\üóÄtg8éEŬ8¸ÇÍ˙ßî6Ü|Q“Tı–H4í)ċäNĦU1ï’_Uè~-Ôl/nSGÓbheB”eߓ÷äoĵÇôĉĵáĴzÀíBb?Òġ ­ÇHÓ'‹?…z†‘$°êkuHžImBGf=‡ë_ĥĝ„éĠĞJ‹×’ ëäïùrŸĈœ6ψ­üÓizE[˙Jĉ=kMÒu8aı¸Ġež9.‰k‹$¸ı€?ÙŽ*o=û½¨ZiP˜CXèÏx_ËU™ö ’{dןKâ Yµ0Ö·WòOŬĈcʁ“Ñ×=Ï?J£İ‰cÖŜÖĉ%}D>%…Èû–l’ïëĜWċtòŝjÊuZêŬ6Ù~µú]Zê’>2ŠÔB­İòË.ĤPóòç'$“Ĝu>Ġ~˙ĊQIA¨ióoj*`2Ĥw"ñ¸ú/ÍÀġ'Ú°nt½BRşó̗³34’ĴŞ›‹uî:ä×,ö³C},;Xĵ{òŻÚóğ ÇÏ'Ĵ—•Ûwġş_yĠ’ĉĝìĞ,§„‚ëv×u—ŬfŝïŸó|@š]:ĞqiÍ2H]ŸR@?çÜÖĞâxBhí.ƒÈĦœÉ×i=•}}ÏñšñEó•Σ$sÔU½÷V—÷2!*zİ ƒĝŸ­|Ċ/êİEèµ·G÷ßúè}Ġ_óIaO“WöşŻşß•üÏĠOĝf?ŝĈ~†[}·ífuñŒî•³)ŭv/Ñ_™~2ĠF­ñ^ŝéن9ü²AÎŭ§ĉ?‹n?{ŝÔ>!ĵĝS¨h·:RÜj7VĈĠ' DvÊÚÀw9 à˙tWÍÚ 6ƒĈzwöݘ–):½Ĉp (ç‘×~5ò~p~k”cqù†kí'&fÓnNŬş%äğ\qĊ˜\Ç+`0MòÇYzhü÷oÎÇĠ²:€ôëSÇîąq?\]…ğ4zz‚%f'ÍbIo÷GġŭkÈŻ~(è6îË —,:l ŭ ı›ÏŠz­ÄNš^S°v;ĥ˙Jú|V­*³ZÉßSóÌ. ѧEh˘ÛT´°·’T¸0(Kä,Œ?ß?pEçÔ×Ĵ|LÒ4W2%ÈYŠíXĴ”#ùŝ'óŻžïĵSŻ^şışvx́öû›Żµrş×k¨ü=sz pĊkÌĜî ġ˘ŠÛŸâÈŬµ·C*¸—$ââżó#Ÿu˙†Ö7£O†Eĵ2QRÇĜnÉĞÉàݧhÑ4ı$bv¨‘NràTQ\Ğ;Ä8Éé£kîù–ñmmŭ|ÎÛN–êßl–ŜÖHÈD‘ ŝü‰ŭ‘ÍuV~´Y~Ë œws¸È#×il(÷n}¨˘›â§'.—ü.KÄĠ“·1˙Ù˙àJFIF˙ÛC     ˙ÛC  ˙ÀX"˙Ä ˙ĵ}!1AQa"q2‘Ħ#BħÁRÑ$3br‚ %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyzƒ„…†‡ˆ‰Š’“”•–—˜™š˘£¤Ĥ§¨İ޲³´µĥ·¸ışÂÄĊĈÇÈÉÊÒÓÔĠÖ×ĜÙÚáâäċĉçèéêñòóôġö÷ĝùú˙Ä ˙ĵw!1AQaq"2B‘ĦħÁ #3RbrÑ $4á%ñ&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz‚ƒ„…†‡ˆ‰Š’“”•–—˜™š˘£¤Ĥ§¨İ޲³´µĥ·¸ışÂÄĊĈÇÈÉÊÒÓÔĠÖ×ĜÙÚâäċĉçèéêòóôġö÷ĝùú˙Ú ?ŭŒżÔ/.ŜFĊe“‘Ċ7;sVá·,Œħ튃ÂmɐĴeŽNpZè´í{Ëgı”mPg'Ğ{ ÜÓt ĥ[íTˆrħô'ëéTuŸ4ÀÛZâ8€Ž*Ĵt*J+šg-{.ÙÚ$ Dĵ+8œÔ²ħ$䚆Ħœ²z…-w¤ AEéŬ=̓ïéG|žhä7İ­íCŸR¸Ŝ˙şĥN]Û "£'dVÒ´›­VĝG •AËÈG +’çNŜ–mĴÀ’àÏR}ÏôŞ7Ú½—§ŭƒK@áœucë\<óĵò—‘‹zWħÒJËrÍöĦ=í<ÄYÙϰ£“G~*Nw+î'°ëJ8äòixµ<8žrz2Đ.NOOçNê08ıÏ½rrqŽäԋ *Y°Uˆ­ÌÄ1ùbßz„4ŠñE$ĴBİŬhħŠÊ Ŭ!*÷>ĉ,éj‚(•Z~ËÔ'ı÷Ş;v2be™ŽpN:h­†HÏ+y“1çĴk %ĠV€pċ›'ż!jbîI$óɤK$4œ’S cµ)#nJg,qA-†77=;ӎ8íGښ2Ì1š^Y¸ŝTâxÂҁL9$@Ċ'-€9§}ÑïëIŒ:Óy'ò¤ 'îOċGAÍ(ġÈ@ 9<Zy#víHH½0ĥyï@ÀœšmP ïKе8aIîh!È<„äÒPŸc=ċâĊfF=p:WĦ[ĜĜè6bâí’kĵp;íŝ4Żq§ĝsKû=°GıÇÌÇİ>ġÀ_êSŜܳÈçŜŻcµ(Ò]ÙĦĞë“ßÜ0 V1zW6͎ù÷ĦÔû[YïnÖğŸÓŜ“g4ĤäÊÇ$÷&—Ħ÷­ Èí­q-Ä ~òQÓ>‹íïYµ$5`˘Š(Ô @'Ҁ 85ÒéÚtin.nó#ߖ>‚šEBLM'EóóstŜU²r̐÷Ğú–´‰l,ĴÊ·N‹ÜÖ~£Ğ=ÁÂVè0޽Ĵ2ÜñÏİĤlĉ˘­]Ë9$–cL#$ҁǷĦç<ô¤b&2ĵc™ÀÀüé zS 'а'°Í9FOZP1ӓRÇÈÁUI'u À 0&´˘´Ûù÷ËNÀġoiAaœhÓŻ™rßvÛŬżÂİÏĉÉxò]:’§ ?(ÔÒ4PĥäJ†i72ìˆ}Ôġ÷4“]ûĞq—Ĉ7ÓéPKpҟ..ıîju6!Ë÷>” ÊÀY`S‚cßÒĦIɜr{TDä’I>Ĥƒßš›™ÜĵdO+j}Ŝŝ­UĜöĤ@ëÍ4’N:Ó¸ÛË`tíNàżc¸˙ o,p(ĵ“ô§púšN‹ÇıĤ䓁@I8)Ü(Çց…ŝ´ŜI␠’́ÜԀb .) Ϙ=İßtŜŽ>´ÂI œœ 2֎wÍ7­!/Aêixy4O&€ äӉŠ À òx $“ĜwĤÏz›ŻĜSĥ™$f˜X­J)ëšCHBäcŒô¤¤˘€ Q’qIGzѸı’âbòħ<ô&Ş3`óùR2{šŜÒôO>·êö[çs öÍrztí.Q˜°";tĉI[…QWou {[SaMüR˙€ö¤Ġ5<+edŸf°OşĞĠ½Í`޳ĥÍrQÒ#I$ÒUİ-dH× ïU‘›M NUÉô÷§¤e²OÊ£İĞÑDŠlĵF>êŸâĤ%rġ•µĵ6‚îàî^Ş:n˙ëU{Ëé._’1ÀP0ŞÓÜ4­–$(ûŞ;U~X÷öÍ´²œœ)ÀqïJŝ¸¤'˜ ‘OÔlŬÇ_JFo˙U4y< W$9f§Ž;PHÏkéúdדda#Q—váT{‘Q‹{m,ĤşıHâBÌÇ š(ĦÓϓm²â÷<żÓüj ‹û{+G´ÓÎĠĈ%Ÿ3ûA\óßʘ쌟›[ëNèĠ8Ôמía,#s$­÷ċÏ'Ĝ{V<’<ü½)ĈÓ6#8ÄçµW”„£ƒŒpïEÈ”Ż¨4Ħl}OSUñŜ”}=iĝéRfGÛ<ӂ“OĈϵFǓ@xĊ(ÏJ@1Éüİ $!I$âœ8SÈúÒi9$b˜–8§pżzG~ôÎI¤ÔñR”RcާÓä)xQÇ&—…\΢äšRK֎èà wĤġ4„½>´tô (BñšRpŒP?3tôġĤ08fà:qäŭ(9$O=ŭéĴÛ}邌MDNNh¸’y¤˘ŠB (˘€ rŒ·4€rKҁ¤tzv—okf5=_ċ€sĊ)˙ ĦŞê÷À ˆ­Óˆâ^ŠĞ{}q¨]™fr ”z ¨‘´BŒÉ' Ĥ›4”ôċŽÂGK&Ġçı5 "&쀋ĠsíP†EbdEž[ğš…äyœ(áGAĜQħ*Ȗ{–”ùqä/óŞğF2M?*£jóĜžĉž ™°Oa@·%\lV‘v¨*˙ZŽIYܖ9ô”×b[M0 iŽâ€Y;˘ĉŒayÚ˘wÉÀäP-‡>n)…²*:QSq\xÍ<)-ùR˘ (&ĥ †Öʸ½Ëİê˙_AUb£Óô°›Ğ·ö‰÷şŸaêhÔ5 K0`³^Š:żı5™}İÜ_J7‘(ÂDĵ*ŠÎÎM+”ê$­GżŜ´H‘sùRßò  rxĊ,q@,qJHŽ´¸Âñ˙ë¨R?:P 4ñĊ@×6ñĥ֚=ÇĥáPĥĦdı/sö *Ġ9>„:[²èËçN$Àéüê’jÛ,H÷˜ _µÛ3ô˜9éûÁQ)(ğ6iuDäçéš:SĊĥÜ}˘ûìPŻŸ’XÛ× *yÜ|ݰï֗·šÒvĞ{€iÁ<Žj…f ^A§ڑ³ž)U@äġôĤŒœŭ /|ôühÇ9=(„–=½M#68_ΑŸ<•!\(˘ŠQKŠJ\}(úSÀd4§Ÿ˜ç›É&œHÇ·cÌĜġi´D/îày!‡ÊMÒHŝK÷ jDzފcHÄ;ı 8ÉêjÄĥÒgȆ(ߔÜ>÷½uQhĥş5ƒ^ê²F×f+qÈÏİgQĠ.5 ɓ0~UĤi(r­w)ÑӚ d5î0¤É•ÌîJ:rp)Ùô¨ rMäP+Žg'# ¨è¤ š™$Q‚֔ıíĊ44\[…·ÎÀNŜ‚İI#Ë)wbÌ{“M4”°˘Š(QE=Wsc ¤š”(8ȧíÚĵqéïQ;vö*Â3sQ÷£½İâ—ÔrM )y5ġcŒÖlş˜1³Áò€ĉY~U˙]40•k|ÓżOĵ筊Kâzŝ?qŻÉ>‚ {¸a[qŬŻ4Ö|w§YË.§qž#‹„¸-CÄŜ'Ġ‘•=&ÌöiÇ×­qÖ̲ü<œYݳ{zËe÷‰GV<ÑJ}dí÷-ÙìNjlìmäFš$r1÷ıš§ŠšâWUĠ/OEšċeŝ͊Rח³êwxüÍWme";ll`„cï°ÜĠòy×a9ygìé[˘rĞ?ü‘¨'ë#ÔÀdXığyú˙äÉÉü‘rK­Jf&7ż˙yœŠ¨í|7SŸûkYò^ê7oóI#AÒ u•9•Êö5ù†;6£Zó„jIwo•}ÊöûÙö˜L­+FR‚}’ğûôü‹3]ÜFĝ’yÉ=>|ĠOĥJ•’OİcP\œe½ÍGŜG›—¸ì½oĝŸEC _yk÷~ŬtDòŽ{9İ#ĠudŬċÂi R˘¸j‰ß™žÎ;XĜ"Öw…è#¸”×E§|Eñ5ƒôù. ß0:($‘É&şùĥ6„ıİĠ’~ĴĈĤ…EiA5èzĴŸµ[’Ĉŝ1Ęb}+¨Ñ>:ĝ[R–4ÔaıÒ¤n 0Ŝ€û‘_2^é°K#y7 k1àñ^yĴi픏4Gw~ïŬoÂŻ/œŜœ•ësyI/ĝÄò1?‚šĝ-è~ŸĜj:ĤŸĉŸwoylüĴ¸`*°ä–í_–zÄïxSV gwyjê~x˜ġúƒ~"ıĝuûAi^$ž-?Ä~FvpéxF?í/ŭyJŭ3'œ6)Şx…ìäúŭ—óéóûϓÇíj)ʓĉ_‰ô…Šé$K$n²F*êrzŠZû„ÓWG͵`˘ŠpœÓ┑Œ Ryâ€9ĉ‚ôç/$⌒p1KÀó4À^ĈqMä·)94gñĦğe^j+”0ÁĊ߅×&ĥnKDì}%<˘Q‹×SöBynŻË$‚gnrÍĊfÉ Ğ`•'+ò×ŝëâÈĥ"[cĝÖÈ.?\Vwü6·ÄKñ2èş”çĤë~Ÿ‘ükeĘI>żqÇ,žż[}çêÏ’Û Ü*,dġùRżµŻĊËÛµ\ɤZÛgĝ` Oŭi"ŭĥ”•ÚrRQKŜ’€ ;ҊwJoJPqAéÍ6€$.Jâ£Ĵ‹½SìۂÀîÀ~Äê*ĵ”ÒêŸsŜÉmİjV--ö˘,íHáb5˘xJ7TbëIuÚ+ĉôüÎwWZŜÑû(Ÿkü˙#J÷WtĤi//kÔğooŝµy~ŻâÉ5kĥ†Ù."' ˆ½E^ş³­ä) ­ŜĞqêǂj“>˘#dótÏZ²@ŝ\˙*ĝ,˙:ÌħÙ9¨Ċ}˜'/N_Èú\Ż/Áá×:‹mġ“ċû’ĵ™„Öڕĵh†ËGû÷ ŸÏ'ôvçl“ô›B^äġ­ûİü3o&éou›Ġ›ċCùóYÜsĈ`µ´ŽŜ#ÑPsù×ÁĉÂËŬŻ]?îósżş6‚û¤}&ïQo;rŝ.òz(}“œÈa·_NĤ‹(•-íÀ§9™c-ċŞwùdÉu<ÙLá{^&;„À/vŸĵön7üì—ŝz8L>#ŭéè·³·ċŻâX›Uı’/.=–ñúF1YĖ?1,}è9T'Ž¤×'¨xĤÊÎïìÑJ—We°"FϧñXĴv3;֛“ü½ÈúŒ>†İĊDëzä ?Yvžĝğ"\éüVöŽYÁR:dfğ}'àĈŭN0GƒEžAŭĉĦŞvžß(|×~†söċ¤ŝĉML„7’9ÌgT:Fĝa'Ô!"½Ž?هâġä^TÚ·‚´äÏWšyN1ÓĦï^oñ›örĝà_ÙËZñżôÇ:;yy„20$ŠeÁ p'ŻfŸ‡ùĴ䒆Ŝ‡+ΰénr­zŠìEĜÑâ!ü&ĝÙ³ĈžÒġ=>ÛX‡^ğ{;›9PÈ#=Á`­R1^WŞßëúNĞ-†½§jş-Ĝ$47–Çú yxŽĊáöµûĈÓ¨½×sgRÔ4ğĝÒk–:mÈ_şÀ2Ÿ§zâ/ĉŒ‘ŞŞôYIÚßjœúŸ•FKk—omÀŝè+œ–ç̐´²ı=—9µ‡I)Üú§áíŻĝSP‡ÚäŸÛZ+á!óŸkCônN=ıŻÑRµ×#fcç€ÇġĊ~ƒÂ™ġJ5c†­+ÁèĵŸù5œċPĞV Ò_‰ö½)߁üuñVŽ; ;ıáÏş•‰>÷AÓĝOJûğáWìĤé—6ڏ‰Ô݆ÚËò§à+íŸ ü9÷†4è ³³·Œ(Ĉġ˙îUc‰6ĈŠƒÚĞ‘Òg=YàâsYÏHèŽWŝÓ|?k vV°Ú¤jŞ(ú ꙲0:SNriqÇ{Š)+#Ȕ›ÜJ(éL‘:zJ( ” šÏҟ…tSsŜžµ³G $ħ˘Œ’ĈšMğ rI]’\žıâŭ7G ¸ığÀ‡…úŸé\?‹~!Ç=ž!D<Wïż²úW•—’ášïP“ìĥ£ĉ–ùŸë_a—ž"tĠZ‹}“Óï˙#áó^1ĦN££EŜÛżò˙?Ììo|OĞë—̰4žY8!8Aŝ4éws:ùžlÒvQÀV?EéĴFĦMtpêÚŭĠ°ŝϰŸa˙–›Msñg‘äULÛöv_rŬúœ™-,Ç6ŞÖ›mŭż{Ùzd6&WûOPò òÂäûT5?ZÁfĥ:|şOşÜk”{/M>ûÈĤnzd˙J–]2ê8Eexü|Ä!ÉŻÏħ^-äĜÜ4ŭŒ•*jÖsj)ßùbßĞ>Ë Áü-xıŜsËy5ë&ĴF5ŝ½}!eóe@éXE/ Š{ħĊtËmu…ÓBßûò‚LŬù9û‰I˙žG _™ŭg+ÍŞ7<ÉI,t^Jí+·Ù/™÷)c0JKyËWëdŬ’îŝ˜‡Mwï È}¸]Hm°l…:e,kCmIy$²ĠRŭk‘Ġüa§éن6ı8Ûğò~˘½˜dù~]IJĴ•+ùwĝ·÷Iú3Ċj8ÓN§ÉòŻôùGöt%wÏ!Œwi_“Yòĝƒşlm ´_Ú=ÈŝP}Ĝ+Ê&×ġ{SM7FÒµ{ÛİNĠ‰,Ĝ“ĝ˙JúkáGìÙĝ‚$Ô~#>§ĦéàNV9%Œ„{c?JÁç5*×öy6<Ŭg5Ì×ߢùО”².j|مi[ùbìv×ñ<ÓF×|yâ…Ñ´;;ŬJîv ÛZqC=]ğïĊ}ç[à‘çO]WV´ÓŻĵM ĉ÷‹cè¤÷÷Żfżƒ|3àÍtï iš]°1~w>Ĵǒk§ŻĦáŜ Ħ€—·ùêo䟒ïĉusYU\Ò?ˆWüMñ'ŠĊĝà û=7Ċúüò\\jÛ-ÀÒ´Û}żhş11›tÄ€^QĈìó߈m5 ïÛĈZvztír˙álQèȤ*È·—BvFĈ S%Ĥ9=E}Ñ䛆üd´n,3ÚjWQıe·Ġ|!nmĤüäĵrnVÈô5­êz·Ċx‹LÂ?‡ĤŠâÑ%óí˘™dHí\àɲȍò°KŭŝŝÙ:gíCŻĝ“‹|Caá[{qlÚ]ŭÌWPêeC$f Ĵ| ¸ ĵ[>ĉÔħ'P˙‚€ĝ‚{O*K}ÀÖĥ—ïğ”šâîicLzìˆħô ž´Ĉ|ğŸ/‹żà‰:4Ë‘ĞÌúv‰-9Šú;ÈíÙ>˘E5ô'ÇŻ ê^#ŭ•´M"ÉPßÁŻè³ßċ^@ÌGŻĝaöĊßì´-J4ŸŒzĤŻyhÜn5 ‰ab?ğ„}+ß<Żĝ›Ä˙´_Ĉ7ÄA7†´-^ÒËE²6ë„Ú9ÚBŬI-'ŝ:)ÄŝÒ¸żd˙…‹e´w?$™ÇáPüE‡Fñü›áŸ‚5Ğm7VÑï|ĞÏĤÜDŽ„Ö˘7`yïàŭqYž ûwǏÛ˙âN£eygßÀW“é~·¸dĊŝ˘Œñ]^mR~UĈĊÎë]'Áŭ1uŻÚ{Ċ-M¤}Yt=&i‡ú›;X×xSĜK“ޏ3„fš’şml|eñ›öU–ß]ñf­ÛOÔ[OÓ/–Óù­µ˘I7ÇĈv‚ÄcŻçżewâoˆn“Ä3jžÒí†d¸’ÈğÊÙûĞœ ŭkìŻxÏâŜĦàżx‹Á÷N>)ñ%§‡|ħ[&ëhwˆçԐrÁ|Ĉ\öQí_Vj‡ËàĈµâ½qaÒtM7ÌÔu[¤§ÌíŽĴqw5òux3û#L·˘†Y‰ÄBfß#€]€èżŻ @  `JïĞlĦËì­ĉŻsÄEߘù·G³ż°v›gŞ./­íÖ)÷ŠŒf´HʏjéüJÖ²ë³Kk<3~-ÚŬÁ`œg{Q‡,T{3YZlRĦS= ˆ’iùÜ9àTf™‹ñҒŠ(QGjP2yò Ŝ{ġüë£Ï #Ŝĵ,&CFœġìâ3j“Ò#‡·Ż ĝ;K†ÛHÒ­˘òÔĊx üvùÀŒԄŭO֘[,p+܌TU˘Ĵy.nNìRÜÓBñNJäsïH[< d‡ûÒu<Ò'ò¤-šñĊ6Š(§ž½(×? NÔÂx§ı=+ZÖbÓ-X˜p3÷}ëZTgVj1Z³*ġĦJsvH]CZµħ%Y> nx_­|û/ˆ3j7ͧéҟ+;wgµbĝçÇ(–óA ä+dHÀòÇÒĵN7WwßÚ7ĴòÉ?­~µœ/J„Ujħĵş\ügŠ8ş"N…Z=mùĜ’+/ô‹‡77Ĵ3ƒÎ+&êòâöpÒıaü*: € g¸ I!<µì>;)PÔ˘ġEqŸÒĵ˙°ĠaĈŝĝ™ëM›ċU×t[@ßŬipOĞ`W–|?˙‚~Ïŝ.ğ†ÇėZǽIÈ]şÄàı2ĈX(˙{övâĝ÷K&‡ĝkĊÚMÔd²ŬEuĞß ;ô5ŭT|5t/h~'Íĥ³áŬ[OÖôĞ… Ġœë,n=Aŭ+Ÿñ·€ôßÙé³IŞh Òċy´mwJ‘RïO‘—k,ĴŒ07VGÀʜ xŸˆgĦk÷>,ĝâ[Ÿ…~+g2ËaitköÇŬšÔĞŸï&ŞQüwĝàTKoŒßüAooox8˙jX0d1ŒLƒ6œP§ÇĦ|yN˙²Îê-ÔŜş2¸n`şšĤóׅ ŸáÇÔĝ#ÁŜ µú–Ħâ?ê—?lÖġ­@ŻÚ5 ö*B€ħĈŞŞ‰€¨ 3’y ü~ĝ7(£ŝÂĝ‡áİnX|Öwkoqî90Ê}ˆÜxÁdx·gŝâQñTò{¨?í/^xRŜÂÖÎ×JĵOéIċ\ #ÂêŸÄ ù‡÷ĞÒôĊġÄóËpĞ’|ùZGÇ=~b3^iàÏÚ3àĊÍ =â…u7ÄGJ½¸Xn à1IƒŸL~½‡ĵ={Gñ&żÜĝŸSmBE6Ú=Ĉİ-Êq…Žrv}8ïßÂÚS[ĝvŜ8D6ş4Â[HP „d˙‰úג~ÒÇ/ÙŻZHÖ5 1Q b ĴË"·Íx cñŻnÒ./îü1csŞY.¨K ĵöË&ñ’ıïŠÑ .ĝ9—Â>hŜ†Ÿ …¤{%UŬN@ß+žĴÄ÷=€ĞÊżiżÚŸ~ÜZ>ılž4ğˆ?NÁ—Ùœşıġë\Żíoû_ĝSötĝm&ŸĤ\XkßïeÒôu}âۏġ÷•#àħéĈHŝt|Gñ#ĊŜ8ĝŞĝğĊú­ÎżŬÈÒÜŬ\>vħÎŻ@@AҚEĈ7?KfßڏĊÚ·í“€ü]|—V!™šÚW ì“c0U>„Œ}E~IŻĉŜúçUĝâ˙†>)™µ½"[{³ħ²\f9Ôd{9ż£ ëĥ)ĝ}˘ĝ“K•gÓġ;(î­ŬNAWPùÔÍXós v’’êlv˘”ŒRw¨<àíҏ֖€(BóíV Ĥ”*‚sLŽ2Î8Ğí(·„Ç27V…4‹Š3Â6ÜózqP£Ò­Ï4 ì…FÑüFħçğÁùħîi zK*FœòŬĞ2I™ÛŻáQ³³6IÍ7Ĥ‘ˆ¤ ġ­%aċ‚9Íd %€¤ŠV1’3I!9Îi´ı¤< öİĤ4¨‡Ô2ÍÔ/NĉŞ1,yÍR@k#ĞŻfžxž+&9 ɚfİĞC§ižt‡t‡ˆÓÔĠ”§%ĞĥEZħ§);$C­ë–úM‘.ÊÓ°ùú×Ì4ñ³î \ùĜ7_aZ~3ñ²ÜÎÓOÀÊŭz m’x‡V3oM6'ŭÚË×ë\1´èAU¨Ż#ñ~-âz¸‰ş4ôŠ#´‚[Ôż´oƒÇj­û˜zì­íäžxၠbp†t‰pˆn½[Á^kÛĦvñ´v‘ž êĈħñÄ  dġqĝİ%Ê´OĞèĵÛ}p— bsÜÂZ+wŻ’êŝGKà˙[iÖħj‚Ĵ÷Ĵ2İĠS˙Ż^•c5UR0Š0 ` ımy8;GAë_7q–uĈ™Üñ¸úr“Ò˙ #Ñ%²KñëvĦ9™ċñai¨ży>íġdÙ´3JpÜ^ĉ¤³ÓžK’Ò‚ħ''Ŝĥ"šU\mŽ÷E]‘UaF?x×Ĝċĵ ƒ’…YШtë)tOüş#:ÙK¸§ĞüÍÌšI ŝíxP{Ġ?#.72Ħ`O°żs Ħ°qÎ+"î"™Ž|Ïds ç)ĈíkÛ×ü’;px+(³;İÀ9İÌ@,ÈÈ÷)U žGU{ÉgĠĜûµ>ƒÚŠƒmêzé9=6'İ"†Y§X˘¤‘ŽTriğ†?#NaFÈcrAĊgeÍ{yÒĥ‡È´/í_áτSêžġ›´ĝ…n‡bÛ[y‘Ûɏ—säË5óŻìÓûL~Ġß?mo ĝfÛÇĞİébçÏÔôûä‰`’ĠNeÊí˽ĉ·˙noü>‡áÂxž}?LħñuíʤkïîƒÏLwÁŻÏƒu?‚ß´6ñC²]RûL”şÚÉ1Š9*Ävžâż˙²Ìş‡ SŻ„§(ûFܜšmµ£µÍïm?&âÉVxéBĞO—ktżêR~3ĝ câ.0ĝiàŭjF˙–òiȲpê~µá0ŝÁ´yo|ž89ŬŜ…âk˜L-Ĝ…,Aѳ_ĝ/ŝ ³kÇï„fŜÄŻîŝİĉȧ?Ē€Ç£ ÷˙ ˙ÁO?g]J-şùño‡'Ïü´ÒZeÇÂHŻÚìϕ´BÑ-?jOž&ż·Ô&›ö’ĝhÊ ‹¤‘Zĝ‚U·a'Ï;³ÀéÒğ{Úóàü1y>-›ÄŝÔԅžÄzĊ³F{üÛJ2ï)"ğ߆˙´Áż‹Z4WžñŝĞ³İ&ĠçònS4O†•zÍĊ†Ÿ|]YYŜ¸XUĝüGJB>N×ĵyûĝġ׈ġ˙‚~ gç}ë[³“ŭkޏ_ĝ'jf6żÑ-à†G?EPIݰĤ‚'ğyĉ‡†™óĥ›'˙İ­|àû)Œ–žġ´„`´z|@˙è4ñzüTŭ˜t{o#á?Âdñç‰ĉÛMï„I ˙ y˘çı4ïŝÎŝ$ĝ·ñI>$ŝÑ:•a§CŸì/ÂÁà´^Í>Ì!l'=Ojú˙Ä>.ŭ}OÄzç†ĵ+hĞûÉnfށÎ1Ôŭ+ç˙‰ŝ>j7>ĝ%mİx?ÀDyzŸÄ=JÄ҃Ġ,"l36?ċĦĊyŸżgŻÙ_\ñı³À Ñ,ŝ+ÜéòËĦĤÛM$z$Š2’ÌêvFÛħ…''Òì²Ò-4ß è­Ĵ\Eys¤Ù,fúàíHrp ĈrkÊ-|;à/Ù§öeÖuÈGĴ^~Ħ¨]ÉëQœñĉM+rÌIġÀí_„żµÏíñÄ+ß èşÔŝĝwhk§é³ûs`y™N[ž‹£ĵÓJI³ġ›üàÂqĤi7ŻñĊJ‹E0ĈŝzL~P>™>ĠùOñ_ŝ Cñ÷â \ÙxnöËá΋&@‹I]×} ÍÏŭò~}’Y‹1%sŜ“çI($hjÚĈĞĝ†ëVÖµ+í[TıÉqww;K,Ĵz–f$“Sé)–ŬŝG”ì9'ġ­cĠëg*Ĵŭ0zŝS(÷³$:Ä?Ċ|ë-ĴħÛ ŽFÑú_´_²•óÙü2ñW[ı̗~Ġ|ğE'ĉ6ẅ́`KŻà+²9m4YFâö “Ï@ĴçúŠŭvĝ)┯íßşŒÂ; @²‚ġÛĤù‘–)Ñà ˙mjdaŠÏżÔ+Bŭ/FÁ†zŠ˘~ġ“< -@œž´9ö¤sRŽOùP”9E•Vwê9ïC7 Ĝ6Sžä³`pµHœœÑŒÒ€²DÇ4q;œ`֊@-Àô5#Ü".ĜÀúöp'HÖ5÷úSWI÷qϽYíRÀi!A,qTœ³)g-ĉwĊT'šigß4½İÉĞqAĈĉàS¸§•-ĴŜyN=ŭĞÊġûéç”I2°–Lù(OAë]LjfÈ%œĉŜ1û¨ûí\äpAo§ÍkÛNèϗu}…}V£F*ŜŜo²ŭO™ÍÜñöWċKWäğżù˙Ė/t‰o™Óqyċ?òúW5+XġhôÛuŜT`„è•?Ä˙-Ĵże²UsĥĥÈyŒ÷ˆġĴèúpšġ‹_Ü|ò³˙Ë1×ûWJ<:uwgáı½jU1MQĝV‹Ìï4Ğ?R@T°Ï wŻ tĞÉ×J†ÊÚŜ8(œzšò á…ĵ‹Œ·G.3^Ù é³ZiÍ=ÑÌÏ÷‰ëôŻâÏĥo•RÂÁbíR²ŝ;ğ.ó’ëe²üz~ċĜÉU“ĦîÁür·ŝJ™´„JċħɂÂXlĴĉB$ı“+c;Gvŝ‚°{Ôñnk…`pGJ˙:²lÂx\Wµ‚ĵžŜM½íċÓ³×Ħŭg‰˘§O•½c‰¤l™_ôöĞÊqï|=IĴÛdC#çb÷5ĠiÌjÀ{ú ŝ‚ÂfÔ0y|+Ôë¤SŬŻĉϐ݇•J ĉXvŬ)“°èOsY÷a^ĠñŒ(ê}jğŜ‡agUoQ²Ò<<÷Òì!wTzŸjĝĴË9ŒR4ŭë­|ÛÙ/Ÿß÷žĤ 8J7ÓüŒë‰– )%c€Ğšùö…ĝĊ˙ ïz™g>ÍcS˜ÌÀ7+?^ĉ½³ĊŸ4;UÑôgıÍî£fבF:ùj Éüċ_?ŝ)\üKŭ¤ïïäÙ Ĥ˜•Ĵhß(T'‘ġ9ŻCÂo ñ9Ĥn§Œ¤Ġ*iÉßĞÚ+ï×ämŸfôĜS—ż7ĝ#öW᧏-ĵO&ÓÄw²Gn"·g˜³cċAËòŭ_ö‡ä? b+‹IÉêi‹9brǒiÈ' &ş³Ìög_™é¤cÑ.†xL,(ÏĞÛ‡Ŭùׇ|r֎•ÇVÊe·\–nìê1ùWĞh÷jÔïe|ùQ7C_,ŝú¤w µ &l—‘.Ŭ܅ŬŸé^żáÔó<<óÇóWĦèʄİĈ´ŸÙ‹ûÚĝ;ĈKäŭï56ĉAoc &™j|ÍÊ„ ŝ%ż:ĝ–âĉYŻZiŒŻ!‘˜žI'­vŜ)¸–ëH°Ô}Ò\ß]nç?Ĉ¸)ŽnŸÓżJòż‡ ĥÓí?´µı˜÷Ǚ÷GġŻcŸ[ıÖu-­ñi`¤,Q(ĈGJŝpñˋ³*¸Jı^U?eŸĥ˙ċÜmSïRK{|+Íéú׆|?…Xšxĵl9ÛkÙÒŝgüÒíçğò=3ûN+›³gf3Ž7‚ĥ£M‘*’N&²ô5ll×ċsOzٗl(C2ŜŜĠŝJĉK Ĉżŝ!ĝV+5ê\Ë{§iŜÑ]™u)?ĵıŸâvùWU$\#s Ċ'ĝŸmecâ-âŠti5+—6iŭ³2ĵàvŽ™ÈÏNµġ/Á_ÛkârĜXĝL‡Á^&šUîufµÌí%ĉïŠüŒñŠ5_ĝŞmWVğžyË 3’° èŠ:=eiş•Ŝ“â;=RĈg‚òÚe–)àАiĜѤt=Ԇ³ñƒÄ:Àa7×ó\:)ÈVi‘ù×ëßüv_Çû,ĝ˘ŜÖò6ñ şóK{nÎ7ŞÑc uÇñŻĊı™äğ•Ŭ‹ırIġ9ë^•Żâ׌~üC·ñ7ƒïÏsÄH E:÷W^àÑ%tcˆ¤êSċGô7ñÏÀMñ/öSñ§ƒb‚+‹ğëû"IŒ“œö;”s_͈ĵ?Şĝ_ĈÚ&³eq§ê²˜ċ†d*ÊAî ~ġ~Î_µ×„>9E…>ñÚG—ÓžLÖ- =}py¨i_Ùí·´›˜|1¸"+0ŬĦfQĝü‘žŭ+8ğhÏ; U“„ÑĝŜŻŬ* ùä§ó§×´{ŻĝßWoš½ÓŻe´˘mÈ^7(ĊOqqT$•œİ#=rä@ĴSÜgW }Í}ĊûÌúÏÇÍöñKvê˙jğvċR4çä×Âm){_,|ĞœŸzŭo˙‚wĝ:ËFŻĵ[}oçëşîôħAÖŜÒ#†•½Iò]Ĥ”ĥ1Äϖ›?P"ĥ,2ÀŞĠਙÚ<ž U–`¤ÉĴ·>BF˜FıÏ=Ğ:IZG$šk1fùŜ˘šBOPIšK8d֌PŞF[ֆì]QLŠ( Š( ĤH$q8¨Ġ°kÌ>*üNħĝwŝ{ı$Y5)­Ĵ°YħÇá]8L%\MhÒĤÙˍĈRÂѕjÑGñ×âÍŻ‚|7&‡§OşíÊíÂ71/İĊ|ÓÜ^‰uVW³›ùŜÇëYZ–ݍx›Ċ·zîµpóM4…Ŭ˜ç2Ké5;ˆ—i[H¸Š5ï_ıä™5,‚§_Wĉ=gùílÏêKE²]‘³ap,Ëê—*²JÇŭ/SêGzo€ôWıÔNż¨foş_¸o è— ñFÛXp]‡E ôUĵVĥqĈˆ4P{b½\ÚĤÒŬö„÷݊ü[âYuYÒĈ9ÚkhŽe”p&P;(èŜ›âGĞßK{ßxòá~|´~XĈMic}‚Š(í@fG¤y+ĝ’yI2ËŞÜ;“Ü™×0Ç<öŸĈ³ĊsñO\ž ŜHÊĦËŸÖıı`ĵ“To+K½Ö|C§èúm´×ş…ġÊAmKıäv!U@ġ$×ômû<ü O„_Ĵlu/"_\ÁÔdNVŞ@‡û¨?3“_š°oÁ+­sÊ|H×´ö6€›ĴÚT;^ċ†* 'âżf'˜ıÇo­g'}+^ï‘ -ÁbBä ­ğ'šmhżÊÄÒQ@F*ŬMh&äÇñ ÌíVŒ0àö¤¨˘” š`%ŽÈƒ,ʣԜUq§ÄßÙ.:ĉuրEš+>ÛWÓ/uCggy ĨŒĉ­ê·vš.‹=ŝ£(‚”ħÏSÇAëN1ri-Ċ6 ›–‰>+ñ6á/^k”ËPĦ*3˞ÀWċÏÄOj^=ñüÚĦ# E;m ìĞŸĉkÓ6|G½ñ^ı&Zk}ĥŜÛ?|úŸzùŝÙeżpá7;"-~ÉÂıÀöµ>9~ ·ùŸ†qË0Żìi?ŬÇñ}˙ÈŞÁĉ‘ Pyà¨ï]Ž‘ Ï5ä6ħ/ï˜eĜ"^ä×[áŬĈöî/ô‡†7?Ú>ĠÙXé‘-ÜÖ6d:“›Ğ>÷°>•Ŭâ<&7R†j\›Ûú˙‡>{”b0ÔcVĴmÍħkDħŽÚ­,?wk ËɌÖ½'IħYf[Ğ̋8Î['Ï ĴÍ3NBċÚÄ2ïíŝ5Ûé–ÏjWƒÈÒmŝXb< ŝµù׊\sżV†OžÉIÇâ\Ú(GŝžO§òŻyÛCí¸…>·‰§Zş´uqOgËĵŭÈġŝgî§Ocg&$Wwd[Y ÄJßığ,O²;aAûÇ^ŝÂıXﵝE|˜ÄjxÀ½HÓcŽìż.s“Ġ’œyŒİ‰ÌZĞ5xŬ8GX´Sê˙™ġgú8zXL Ÿ.ŽÍ7ĵżĵ×Oî´­-`?mşÇ˜ÈD£9yĵ´'#ŭځî>ĠĞy#?c„ĉb?ˆöZKŬBH‹4`5ӌD;(ġúW˨AQĉ—ÄôK²ïŝ_3җµİQ7żċÛüȵ[ĉD‹E°ùĤ)_áö­{+D³ÓÒ8ùİfÊÒHn|ÂĊîä9'<×\7mäœW%Y'd‰Ċr œvëĉÉcRò–5­Ï‹co†#İŸjIrĥVrÜ;„ ì;šç4ÀúĤ˘u U–ÙîT˙ġŞäŒ`Ÿ_êß×ĦŽnĴĥĉBÚCZé†EwB$•ŞÙ[ßx^Ŝ á4œß5éñ”‚ë½G;{Úı7ħ+âFC“ ċ³ÒĥTi]?yŝvĉß?Mà—?f]6çâ˙Œ`ž@ÔÌıŠ5Ɋc×ìzŝuOŒĵ+uá/^éW´ËZKœù‘äí?\uŻßÏĝ~ÓÄ:=îx˜´O˜G·8#Ŝż/~4ĝÇm¨èš”^VŻ+Mg1\íÀÉךŝıoĊ f"QÁ*ó(%wĥÊ^vÙùj|'ĠE×Ħs;˙šŭQ½Ĵ‘‚’W ßK ÂŜ=°ÓžCi/ü{ßÈĞǔŭ‹á?~|yĝ‹wñŻŞÏr×Aî^YdŬ‘$rÍô(öâë4‰ FBĠS“ɤ‘*)0Îh psOÈsó0QLĦ”`ž•£id·2Ş}³ŬÁôŻ`·j2DŜ&ĝ–ş?Q“,¸ú·A@"¨ÍÂĞ3zb•ԏ›c§89ëî};à‡ìô–+rżĴħ’²[FOŭòŠó kÁ˙ôO ‹‰_ӌŜUċœv*$g#zòîz­ıó”&Y­ÌaK)·§<*Ó²µ[Ë}VRV9`‡í : ‡PTßUéĝĈëÄ×öԆ³xΟ<î<ôi—+ G×xßÂëàŭQíµYl57ŒÍyŭÖŞH›xèsÓĥ +“Ώ¸fy̏÷Ĝċ³ë_gŝɟ²öİñƒĊgĊž ŠKXI5×î_ÏÒĵ³ö~ĝ|qĝŬgĤÁ[x~Dšá,qóîzs_W…ĵ3£ĝ7áŝ—áÒ+*А``§ÜÒlĈby,wÂŜÑ<à{/xzÎ;-:Ùp¨½\žĴÇı>µĵGz3Í8 ž?*“ĈnpMN1Œ³p1ÀĞAµC°çÒ¤yqŜ°3Ĝ1ژ:Ԏ۟8¨ÍP ´ñ֐ħc“Ö›J:~­ŞéšuŞê÷ÖÚvn…ĉ¸¸" '$×ĉ·ĊŻÛüAŞÜĝ{àŝ€šÖóêת]Ié˜â~§òŻĝËñÇ´·ĈIü%àÑwià›yĵ¸˘C…•AǛ!žJöO…<5àX-L6ÑjŜ"|y—×Ĉô@~è½ké2>­ŽNV´{żÓıáqár›F~ġG´Wáwñ m7öĥĝÇ/ö–İŞĝŠ->fß!‚öÚ0zĈ‰û1|uÒĦŠò˙ĊڀĊšĝçB~€é—ٞ²kÙ£·@ĥ°0ÀcŬÈô7Ä>*¸×5YĴbœEa—½ı òށ’>ƒü+é°Ù5NхÒêï˙ù\obkGŜŸ+{(¤żwŭ\ñx+âîš./ô?Ìëk6â[€C鞵>­üXÔlî.£<1îX×ÚŝñŽß³EE%½òŜ@òJÒŻÈ°Şäż?C^Ža/İZNŒz[Mg•ÒYŸ4UyŬ^÷wğéeĜüµñ^³.İĴ*²ˆÒ FÁëÜרü"ÑĴġ}CTEk(Ûi˙–„ç^oâkHu½ñE…Ġ„–:‹P·Ì_ğÔ cóŻ >èê°Ĝ%ĜĊ•½ı’AÓ{‘œ{óOŽ1ĥáû–=ÍvKfnnğ“h‘¸AŽƒ°­Ŭ/H†Î/:D2ÌÇ WñvÇĴ§„rZ¸<2ö˜Ğ+ÛgQù˙%5hĠüÙŭAWÁĴ?ÌibkL=ŬŻż"òŝjVú/‘‹.‰2hpîu‚ß9“ÔVĊ•“êĉ7˜ŭ“GĥbCü_ŝşžé$ĵż&%£ŭRW_kd‚8ší•"Œebuxï_ˆq'Šĥ#†QKQÎW[ÓçŜMġ¨×şż–>§é9geĠsib(ÁĴ<cċ>M£ü‰êúJ^…2Ĉ3l³HŸe°A•^…ŝĠ½s3›ACd’8˙VĤİZÈ×ò–]ĥ0œD½¤aßè*)ċ’úäÛ[Î%˜z{Wó¤S“Ôŭ:˘sİïtû—üĝbH {<‹qşÎ]˙Ŝ•ĉo0f_6êSµu' ‹ğğ}:Ŝ;;dß9ùczçÔÔĥ‰g êz„˘K‚:žˆ=Tç}Y[.w³Ùueht½1îï\yÄeħ˙ Š·J4¨î&g܁ˆoáâı¸Ñġ]SûB÷÷zuıÌH‡>§ÚıŭoXšŝûìdCœ–ĴİEÔw{ ½Ö¨£}wo˘ò5 ·>(ñL:}ä²F˟P:“]ÊG´Bö*Gò€J紈â<5-ÌĴG\ğwúVU•ÍÖ­ŞoMËì“]UD–ŭIŻIÖmCJpü_SĥŽU• CÇJU(ùycŜĞ"ˆ£DJħPÙù}s\”ß+ıÍ_c›Ġ˘·†K‰î#O$Ħ#×5żĊÛß |FÔµO Ċ?öÄ=XéË2ágs>¤tú×Ü2ËŭŻĴoċĴmĜìóчSô݆?k_…×:ö–żüĥ#ÓÀ7‹n0ÒĈ½Žú?†upĞ;§ġ9KHÏ˘—NeÖ2ÙúšgñQqŒyĴŻ%ÖŜ^}OÉmzĈm;Ċ·öw5´ñNË$DccÈĴŠöêiñHmy ŽÙ MV\ä`?:7ŻÖĵ…†|{×ú9”'Z‚UcË8é%çċŬ=Óíĉ~‹£OŬw‹Ù˙]{˘Š+Ô9B”Eʒ€îq…â™E(Œ(šúCáGì³ñK'ı³t#ÂCe,x$˙v½7àïÁß |ĝ]m˘h61­ìŠŝġ†e¸~ùoAĜUE›S­ìÍğeŻ„_ ü5ƒáE—†ü?nĦÖ57—$ ó9$úg5êTu54Q3Z)IÉŬHÙÎÍiGD››½M0˜à †nĉŞ<íÉ4·£ĉ#ĵUkˆŝ}ßëU°A9­8ŠÏSڕĴÈ=qIZŻh iĤÙvazûÓş2­AvÏaŜž-Ÿ~1ùĠï’”6ċ×/‡ş_€>Ùi֑#êOk뒣tŽy<8? ġ}1c›X· 'î•Á‘”çä|kİÂ7ök™ Žŝċ|ğT<GŜúT|}ü8ĝyĤkž/û]ÍŜ·)žÇNQ—xÀ‘³ÑIçŜżFž İÒZ-î8WÄVÇc%ZğĠêÛÙ[$zŜż­^ŜkÍl ĵÎ0‘¨'`Ç@=ĞĊŝ&x‘ôO  Ùħĝ {½}û·jİŞübÔtOˆójŜŽÍï|ÒÂKˆ·Ĵc°ñŸZx“QĠĴ)ÑĤ£MZÚ//ëĦ­$Ÿj+gf<Ğ8ĈÒTc#UKŬJ;VËO[·µŸuŞş˙ĦéÀ.GÍ' Ĵ†¸K$)%şoĵŭqY*v9è`œ­Ì´íß͚Ü&—LÌ.µI{Ÿá§éV÷·DÜêsJ-Ŭ³DŭëJİmişhjnsĠPġ5Ħgq5ëÉ{1ò­Ô-{"Ž­ġíùÖ5ió;tƒċùżÑ×5ĥĞoÙŞúž£vİ{Äc>^îçıŞÖÖ§SĠZò1X§ŬŬè+M¤“Wĵ֟ğħˆ#[Ċ{Ş(‰%N—²‹·ó>Ŝ^¤W²ÜëڊÛı,£9cÚ­Ċxĥñĥ›¨ÊŒI9èžżUğı˙eéƒ`L;zóT%ĈżÙĥ31ŭ÷÷˘QR&4ıà£k.‹ġävn˘÷^@”NȸfÇOsLĠŜIż³mß Fn$ŝêú}MS„&‡ ‡ĵŸ„$wŝñövĉw]֖ì^âNf“ı˘êIF*É~>l䣄Œë9Ċhĥ˙3£Žáìĵ#}e S410ĥ…ÛR;~'Šñ˙ĝÇKĝ‘áŬFkKuFG{]RÍı09ᔏB+ı¸ŸìĥħÛï,Up²×É4ícáĈĊĝÍà{;›½ġ<żh‘ĥRXó˙ ½›{ž„×ÔĥU†Ì'<,§ÉZZӓÑ9/°ûs}—üÖ]LqŠ ŭj+žIv_Íçn§Ä?Iï]ĈµáiXéÏ9’&ÀsÊ7ëġàz”ş•·ö X$v?i€ ymëô5úċâ_ x_'Âu×ü6öڝĦZ0ĜÜ{İŝäŠxġż3üsà GÀ>,şI’WÓ÷˜üĈNƒĤ×ġŭ“á—2Ç ‰“Ž&—şÔ´“ĥ–~×t~WĊ|<µ]jJô§ŞĥÊŭW‘âR ²•î84Êê5ŬA`şœ ö=ñûÄço×ük—ŻÜĜˆV‡4]πœ˜ıùqIE@S•ЏaÔSiA*Ùƒ@ Ö^2zWé?üw[6˙ĵu ħÊŜi)2ûä˙ŻÍ–<ĞpOzûßŝ Ŝ× ûiêf;y^Ùô–YBċc;ŒžÙĈ*'%vÌq1r$ÛûF"àZ.˙18ħb'œQ#ç)94Ò@&šîrk>I ħçŠI-mQĉxaŠ)%mÒ2 Ÿz…‚Aˆäµ -ÍÔSĜ 1B]ŭê}*w•Q Gž:ûÓ$”µV'œ÷§`ħÜrsM'ŸzJ)€TÈQ† Ô"Gµl+n@sJ+5g*vġr9CІ€›?•gܳnĊ_¨zñքäîâ Oü]Ÿ\ñ ´~ÓĜÈ+m3(ûħ'£9Àö=Ğ›ñWŠuxĈÄş’Gó›gâ;XTmEEìŞ8üÖGÙEŒBĝ‰˜ÑÉÏAġ?ʳ')l÷÷RY QÀöôFF”yԗMġùŸÌ­5ĠŬùżĝ<ُ¨\%ğğ°b½N~óVMŒO$í¨]rçŭZúVlM&³â}ÇìPž9à×SnğÑîŽ(ÛË·_ïżŻUNŞK˜ŠT\šWż’9߈·²i?ġ‡ŬħîŠÛ¨ÄÌy‚ĉĵŻHì<#ifĴPGfAœ Íó_›…l|iÔOöfĦG!uY8ç–=˜ R×d!kĜĝ‚:qüĞùĈ ÔÌ1pĊívŝKüÙŭŬôpG(Ëêc'ŠÑ^²{}Ñżü9ÚêRÇâ/ŒŸ Ĵµ µµ·šxí§¸é²=ʤŝ _¨–š×„<'òĈ×Jı³“NĥˆG6Ò+ޤżİġŻÈË8Ĉğñ_Â62Îĥ½ŜĈ”ş2 5ôŬ‹´ĝġ4t‹YÙ?sLŻŬµ8x½”UÏ!­)rĈœq[{ş~HŭۀòZX9chR“que?üŜŭO·<ı|]Ğ_Ù[ifÚÙP2KżsqƒéŸJô–1i3ĥAÇŻ5À|"ħħÒ~´Ħ}Nĉá‘ä# ‘ì3ÎşİœÜ_í1GĈĵkù‡< 5^ZKÉyÁê}g*İVI+E~&´ĵ+qpwcîD?^Í)ùĜ…ìĞ39`£ñ݆ IĈ{×ÍÂ)*Qż1<-<“²ÄIfëŠĜ-ôèüÙĥËrFBġĊSûTVpĥÁ||Îj€”ĥé¤%³Ó=ê’ĉf„Şy/Ċ“Í<·×ádÛäú(Ğ֐˨ܽÄàӐm œ˘ĞÙÙ˜ÚyŬRĜ°Î bŻĵÍ} ·€,“İġĊD›k•mŭXš³KŬ†–ëÛŝ bY_Pqggû‹$ûííVê8lĊŽ™´ŻG‘{šµ-r).×BÒePO޽i‰â³ÓÒ S“·ĞgAÂ*Rë²ŭNoŞÊÑşÓ˘ŭYvI|„6–£|ïŝħĊ=.-4­†w™‡šŝ‚³ÜvÖeĤnĴk^·7'NӖI>ÑğÎĵ ôÏEúÖrä_3˘u&£-üàö­â.4Ĝ–Ú%żş+Žá`´fF;ġ=ĊcÌŝEşĈƒtŸusüéñ&ȆrXòs[*j*Èştİò­‹ Ċ²ÎK19&ĞLìò§ é ÚÈ!àäzTûO–X+|Üjù˘Œžµ½8_W²:á-žu‡ŝ(ĝQMCĈż‡Û<9߈5‡cû4?ˆĵ:×ñeµè]ß`Ôà([Ù]IòŻÙluÍQ—Ŝ,Ómuû(R€b@{g÷?ŭz·oßIµHc´.’¤™G>žĤżYŒÙ†„oQFŻW%Ġ§ŞóÙùŸ-Oì4çĴoÎéŻğŝ ?u*mėš\ó[Ïqm+E#BI]Àà VfzWê7Ċ?ƒşˆuëeà½2i ŭċċġc•‰è S‚O^•ò­FŜ('š 3^´Û‚ĝŒà ŭ󅆜‘´’mP3îkĠ/~x†L·²Ô×ŝĝ—^ñˆâ¸²{´XRÈÑÎĉ÷çfiż°ç„ôġÑuyĵoŻIp³nšŜ8WiÀn£$úvŻ´ŒĥZ'…êêc •˘*$lœŽOsĊ9XĜÛ]Ĵú~­˘­ĠŞ™9ùz?^äŸ4"ż¨èÍΔd÷iÏġé¨MĊò9i íQwĞ’Ċ‚HUm¤ElŒ†Œb¤ÜBàtĤ;ÓsĊ0i(˘€ (ï@('¤šSŠN„Ôħ1Q˘m£’{VŒP,c$ԛ`r ÒÑKŒÔĝ—ss-ġóÜŬħ9vµĉßġ·ƒÂ—hŽDî `¸™ í/ï#Žn„‰@ÍxïŠ× ĵEĴÎ1ò*íûĊÊżĦó*Žž£Žê/ò?š²J*ĥ>Š–ÎQ_{G{ŞÛĝ?JµÀžĉfsÛ* jŸQÖaħHÙvŭžĜàSüG×ëŜıġżá²ż”ô²Š8É>ˆ+”ĠïEŜżĤÛÈá`3ŒÉ5ÇW£EM쒷Ìô0ĝ7[ÍFZöġ[ÄZħÓlÖÎÌy—’ˆĞÏ>•Ħkm nÙct@d9àġ½J…4ßSWIÊ)ìžĈŭĊĜ*gÊ·^ŜĠÊk~'&ÑĴtĤUˆ|Ż/÷ oUפżÖÔ²iêpÎżzSŭÑġŞ>Ô×ZâkZĞ’ÑŸÜZĞ|ú}[Ô×Ħ†ÁÓiW§CJx*p=N<ÎÇ@Ó>ǧùòİ7SräġÒş2ÀRÒ¨jwc£Kp‘<ò4ħ=qUİ:Ġ.÷fġ'êjj÷ĥZF:Ĵ—îُ=ïXzL“\ o&ûò§İ>µ‡ocŞê˘+­rHâ—Gü Ĝ}qŜş¨# ¤qˆâQ…ëIS…(8§v÷aì£N<·ğë˙G™ÌŽrzè*„:˜›ÄÓY@ÈVó+g'' ŬcXħÑ´ynŻf˘İ8OÒħ|=votçĠŜû:)Ô£ŭ÷_ï7§ÒŠt%ìÜÚÓeêimÁÉ­6:›İÖ+Ò:ÇŒıcŠË҂èKw™"ÎÏCôŻ7Ö5KŻxş- Mfk`˙‘z$ûvŻS‚Ŝ 2ŜÊ@ èŻ…t(ǙûÒéÙyú—R‡²‚ìPÍ,4§d ÷ŻıĴíG\ŠÊÀ³dÜÊÛ ŒrÎO¤ğœŭÀp e˜œµ¤éĥ×™ÖĤ?k›•Û•AÓäĤ~µéAGž{Ċ.i6vßfӕ #|Òï1êj½ġçÙ4ù^ón[ä‰zċAY!חOğ´Òí™dĠïNĜcùj:ıöδ,Ĵϛ²ŝHÄyîÇĞ' iV˘²{/ë§A(ŭİ´0Xhë „Ms1ó.ç­[šÂÊIdµĥ€|ħVFT. ĦŞ÷y~\j‘° vÍrJĴܜ“Ġ™I{I]‹y˘x}´ë[tÒĴÊoŒ#–9Àüó5–™§éÁĊ½ ½ċ&3W}½)28$qž•|mjşJNÚiwm )áá –¤ğħӚùƒĊ½*ŜÒáŜŸu¸Ġ0 ıíӜĠżÚ ½O ]Xi’q⛸ĜC9ò3üDġù—IŞê:Üşĉ³q=ÍŬúK.ù,Üž}ëúSÀkçÖc‹÷ií 5où½NìüƒĊ>•aF—½5Ğ˙/Wײ?O?gÁâ?Ú+à…ž oŜ? êŜì•X#ĈҐğ´ŝ#ŜżLöÏJüŭ‰ġÈtoÛż@żğ“e›|ÒıĈaq×·\gï_ğÖz…ä62$à}Ş!$*z°"ż²ëáÜ£·üùÚu9+è˙à²y[df³™²}ëRôĈ¨9nġŽzÖ Š(ĤGj_çOEgl“@ ĈhÇ5£öCädrqÍShÊĥĊGߊš(^F^=jHá–$/W½Öml^;}è'”íŽ<òkĠáJ<Òv4§JSvHĜQmi餎%îÌjYŬĠ^ÚC$dgqÏá\Ȳ’ĉè\^Éĉž¨ƒî­i-Ŭµĵñ$ħ£1Âİ<ŸsC6ï%eĝ›N”ymY˘ŞXûS‰ĈBñëM3/DzšmuÜĉjÇà>ħrږħ“lÇ`lÌ â–Ğ ‡¸ôk)¤Ĝm9áH?ÌWumjşW‚uMRèŝûìÏ#ħê88‰Ċ|ħ­j“jz!žY˜Ü‘Ï;$˙™ȊïümDžmµ›ĵ}ŠĈĠÏF}¸Uüùü+ÍĜŝÓ—͢Èfı%rPĦwŭ×}Šöym;?Šß•Ï_„pÛ;­Ì´7çoÊçžjŠƒ\’ Kİ?÷Ñ5Ż{ll>ÛÀßë%Ÿs}@~Ĥh‘jxĈvل–V‘ÀKZ1QÓm–cĝšü/9ÄĊ°ô:Éı|˘ŸêUÖqÊħ˜§´" ŸœÚŭâiii~'rL1 á˜cÇêÂż™³eOëözĈ:/7˙{u)¨UŻ^ßĉt1(LÌn¤úÔwڌ:m›aÀœsŸġcÔûžĠĊIĞÏyz—ċŭD_ŬŜ5Ï_jp]jm ÷,,ám÷sgï²=Írá°rĉ–˙×ô…“³ùVž"G—Äş´P-UAŭïİŞ]ßĝĤîDèZ't’“âħ˘†÷ĊZˆğĵVÓü9hż$` vúú׈jtÒ4ĜVÇOG+ò·Í(>‚ğpĜ µëòÇY~_ĉzT°²”½ß‹ü ñtm.â {%ßol]OİÔúתĝ[\µÖ´÷û ĴÚŠ żBädŻıëä- F½ñĵ—†H“ÀUġŻ­ü5 …àˆ×w•lNèÁ<·ż]™î ‡£qÖa™PNšŠÖGc\·â­F´ieóYN8ĈKAMÖġĝ­ô&08óYrGp?ÄיèšDş×Œ ö˘ĴÑCÌvç˘úgÜ׋‚ÀÁĊÔ­¤WŜÏ; …‹‹M=_I½ž˙OŽò{cgĉ Â79`;gŜİÙĝÇG½Öu g–W²½”/ÈNq€kñ݈î-ÖheŸSıù]dĈżÒ°#ĦÑíÇÚµ‹‚ "uf>µĠK-ŒéóÉYËá]—wämOǙ­ö^]ÙÒ\+x›ÄÑµÚżĜ}Éxlt'ÛükCXıĠ.ƒ˘,íâáxXĤżaWôMĉ-/÷Ż‹™Gï%Â=şRJ‰m£ÛĉĥX¨9f÷5L\c4˘ŻË²éêgRĵc+G[mŝeoè6ZœaµŒX~öb9cŝ‘âoèŝ„nݟîAËïè+’ñ·ÄH,m/K™dĵJcoġ~ÄúטxKEğñgˆ&Ġ/äXDäÉ;ŸGa]ĜLŞU“ĊcQüYĠ†ÀsGÛ⟋=+MmgĈÚ²-Û5–oıXò‡´yôŭŝ•èÚÎħcáż Iw Xà‚=°ÄĵdÀ_N’ÎÛFUĥ ”kòž›€ï^Gİß?Žĵq,2ĝvÌàɜ ?6?–i”1¸ŸyrR‡O.Ŝmœó^Ö[Z1éŭufŸƒËÏq¨xûÄsˆċĵ!,–NqgŒ~ż•{Ĥi<öñÙ²Ï ĊĵH§ŒWË:žŞĝç=—†ü;gq&Ĥ!YĉCĥn™'ĦÇa_AxrÚËH„ÙE*Üܵ]Pçi#½mŸ`â’İ7ïµu´c´SíĦ˜G•I½{vGtNSÀuU‰ŒN„ò~‚˘ğ¸”¸·ĥÈGÎǢ È´Šñ:*Êì|ÎIË`!_/J’iı3‰GC ğwŠ2‘ĉğíF#§żá^Kñ{▗£àġÖŻu2KŞHV›jÍóM&>ö?ş:š·ñ âN…˙Ÿ‰5뀖ĥı†ÎŬOÏu62Bŝƒ=ıŻÈOŠ_µïŠ_n5ŬffXÛgh­ûğxÇ@Żİï_³ĝGám^"ĊĴN&6AëŭçüĞġ}6Üĝ>7âĜeXaIŜ´—ŝĉ˙Cžñ‰uo|A¸ÖuyĤ½ĵı˜ğn$ġ9Àôè Í: °Dó´‰ĤTÚ~Uˀ?%ÏעZĊ§xjçV¸Pdù1䃜}zVâŬ†°rGîô‚üŭWżÑ\‡ „ ”RJÉtWI/ëÈŝ:âlo×TžörWîù[‹Kï7~jÒi_´5•ÒÈc g:’ŽHġĉżyt[Íŝ8ĴnYD÷Áù Ïù5üü|3ŬĈ? …;~ÑĦüwè+÷×VP韴 1'iÖ×E@½ g=ŭi¨Gêw{ûßúLÌâÌf˙´Ĵĥ´ôİ‘íò–i76j#֙·HqÀ¨ħ_6{bRûQƒÒĴÁnÒ¸À;{qÄÎĝjĊ Ĉĵuîi­­`2§žyòÁçê})­ Éϵeí"ïf[§%ş'2apżTv‰À½1äK9Ú˘ĵ—Ĉ`ħŠK-6@óôià}+ÊÍóœ6_AĠŻ+/ÌìÁàŞâjrÁ'Š‹Ÿnn@ò\÷>ıü3Ċ~5Ì³"ĉž<°_ z]ùw~{aJ8,ırKY=ÙoDñĥİâ+Xmt­9—–y>êZİŻĝkÄVúÍĞg=ìê(ŬÓÚ½7LÒ´ŭFX-£H"Aó7sîMpZżÄ[8‡ ÂŬ¤şÇc´á ù!‡üSNq˙/8ŭ+ßuyçż²Ġ.%bóÏŽíè0IŻÀ˙„:sÎát£ô5û÷ŜUaċ‘ĝ˙‡6… –ŝx~§Ĵèúì£àĊœgÊ´ĥVy˙–Ó'è0úú×# ^/ ĝ×\'Ëj, l÷°Ĝ˙€Ş"ñ ĝQo6i˜0ĝGùŭ+"âv‹ÀV–€'ğyĜg¨POêġó™Ö9ĠB’ UŭZ>ӅòµB*ğZÎm/D˙ÎçSà;|£txÀX×ß9'ùγüW!ıñí­²ç(?sŭk£LDxf\—¸#ë€+žħ‡û[Ĵ1Èt·…¤#û‘‚Íú)ŻĊŜ+Ú縚ïᣗŬ˙LÏ '`°˙kU7é}?Ĝ< |S‹_Ô"ŬegžJŒÁH­zçĝÓ­ĊìŞ-Òpî_•TS¸Ż&Óï^ĉ‡K}Ì#à’y5ë˙ mßPñéÓa`ĴÉğ§\w'uŻĈĝıò:•_Ċrú%óÔŭ£„è?İJ³ÑÔn_}żÈûŠ ‘â½z *×ĝ~À+]²&aÑ>•ŻĴĥĞâ™-àÈôòÂ'›²§zÏñVıĤ|1ĝ$HuKıĈĜ‡ñÈärĜ˙=ĞÊ|# F ŭĤúÍBßñ/ĥ@xŬüMêÇİ5ü÷…Ë'Z“ÄF>â|ħó}_Żċò=j4ùċx-‹ġgk³i–ƒ*ŭa—9˙V;X~ħ·ş·›WĠn < ]¸vŝµĈ_ArÚĥİq#ÌÇ/ğ7÷}ŞĈ­q5›o6ğ!³ÓaQök8g€Ì×lSQT˘ìŜíoèż/#ن0§£Ġ˙ZG$Ç7‡˙²´XĤ"í2__Ï˙­ÒĵJÓ§ÖuAutvZŽ~cU@›WÔ~Ûz‰ °?ıxvݵ=v->Ém•²çîĝŝµôlĦeF>óŬö6Ħ‡8Z:[žŒ"Ò4˜Œ+N‰‡šGŜ¸oîoS^¤xŠó^ß}x‹*Ù ö&qŝ…¨kŝ"ĥ7óHì<¨AîkÙ5ۘtM>/é² oßĉe?w?Â+Ç̰4N:ÏĞìş˙üŽLN2šŠßôîuKt5 uííÈı•yrF{WAŭi x~Xa‘&żq——ĥóŭyîŽ&ZS›‹ƒúœÖĠ•„wĴk÷Ò lEoŸĝ›×é^єŝÓĞ˙‡8ëт÷^˧rΙgvâûOµ{íZ|ĉâ^=É=?LÒ­ô›ùoŻoĠeoß]7ŬOöPwúÓ|e>ĈÏBµ:~’ƒ÷“HğIAWœêŝ"ˆ_†Ÿ'™>v´î<{WE>"³q’µ÷ïo>Ŝ Ugu-;úy˙‘ì^5D‰í­2k̍ÔûŸJñïüLOĤTâ[û£ċ‰³’Npq^K›{9ôMä8û— òÍÜùóTĵ £Ûß_Á­x†]ĥVÄzí_Q€á|>ƒÄâ˘êÌéá!¤Ö‡¤ĝ3ÁÓ_XĦĞNÑZı3]O)9=k½ƒĊ6walt¸’Ë’£íĈĠ˙g?p—‰Û^š[+]ÖŜ·;X ˙\@è=ĞsŜ­İA¨j&•ĦÚ7ú4G‚Ú?í*‡n.ĥ%Ùô]żÎ_‘XŠînóÑ-—oĝ'yj·Ú­†4hċ2̀ŬËâŭ>§µh\éšg‡|; ­} …Ğ -ĵg÷’úûŝUçž*ĝğaáÍ`ĝsÀZtşœ„›ğçä'lŸSġàW…ĝ£ĈĈÖúċŸQ›\×çéW’6V1˙<Gµe€áü^%A(òEê—Ú~oùWn½ğœ£Ri-—â{í˙,´˙ ͧx~´Û|ôK&}?Ú<ó]g†µëĝ;½EĊ…°ŭäÍıĜħûÍܚù#SŭŞúo_ı3EÌ6ŭ½†=O­ĝ›S:†ħu÷’GÜ÷3°ÎĠ˙dp8 ï_ĝrçHħ× ğÖc´·*FÎp2O'è?;gĊñdАJ˙ٖ‘‹}:8P rĝéÏ­}N[á\q¸œ>•7?çÖIh£7ÛEıñ@ß~ħäñÖLZŭÇ%˙Ú?m›ĝĵß9!ÓÂ)Îq…ßÖż ĵÍmñ;ÀRŒ‚ş˘? ÏúĠġúWëÀŬwí]­É<¤J,ĉÚXóÀ˙=ë YʝJo¤[˙ÒQŽy U‹Ĵ—àĉ˙SôJPwœ*ĵúĠ= ŬxJ™_p9%˜û×7¨xóNµż’ÇN·—QżÎħŒŒŭkäñĝú&Ġi[§ŻĦô<=LTTİF÷Ôì’ KVžêDŽ598iµíSWıòtPÖvCï]°Á#ŭ‘ŭkZ]ŜĊŭ§â›µŽ îŽÑOÊ=3ŭúVwŠ$ñŝ?ÙĈNÛÎŜ%uŝ‚g0ÍĞN›Ÿ+QJüĞâ~żÊżÛÂ`a¨Ŭ7ŬìżÍf›Şé+âÓ˘ĵ7şˆ–BÛ°~µÑ^_Úéösw:EŒ–c_"Úê:N¤.áĊ:Ĥ“]ñnħŻ2­ċÁ(ÀxÜ×ÂĠñ7INŸï>ÌVÖóg³_†Üê§Ï§^ç{âï‰ŒŻ§é€Áċé ×Jş§ˆĉÛ}ï-›ŻÔ˙AÏÒĵ²ËNż2=ĵ³—{•\…ıŻL|K%˙‰´û]+ف#ˆü¨Hé‘×ÄċLïíó8ı&×"Úë~é|üÏC†• ˜{$·î{NmY¤ZdÓNQ&ŬĤAŝÈì=ëZŝ˙M4i.nĉŠŜŜ5Éf8Ĵ}{Ċú'†ü$ڍĊÄK ŻîÑH˟A_xŸĈ>&ĝ‘âÁegĉÔ!µ8ÇİŻÙ³ ß”ҍ8{ġÉ—D|Ž ‡Ìĥ&ŬÊ9a˙Ù:ùÚYm|EooŽ÷ĤġQè—yv^Gĥħ§KÙP÷cßĞôó5Ħê6 ofİgĦ"í’|`H£¨_o˙]xŻŽŝ+E 7†< Çŝ­îbıé„Çóü½k˜ñ÷ěŻŬ݆|(&],Ÿ-„K†ı=1ìżÎğ˙†˙ ˘Ò!‹WÖ£IġV•e`˙ëÖ#2ĴĜ7Ĥҟ—hù~g;pÇÚUߢŭ_™ĝß­‘§xáßċşş„ĥÒ9XñÇç×éŠU;ĵ#x;‹„?νwĈz‹ŬÛ\Nä#áTtP{xê’4{ĝŭOë_ӜWWŭĤ1íĝ£ñĦŝĊ)÷œ„’àĥioÎĜяâXÔsÊg’ÎàŠ1îKĠCÀÓżÚ/ÈR[ÇêîċıöŻ€ĞQòı>ˆŭ{Es(ŻógŞh2‡2\†,î§oé\&‰q$ZÍŭúħVKYIoMa˙ë­ısgrŬs†tÇ_RMqšzáíJQüm?™-˙²Šüó&¤§OUŭşœż+Ûġ?câzÎ52Ü2˙—Tyŝ|·üÑëú*˜ü)`œäÄ­}Ğû8xJĊ|?İxÂúDXъ~f$žßá_*xG·"ñ>“áûËM.ĜÉ@côö'Žïmĵ9ĤË᷅gŽÚ!Ú‹ŬYğûWó—~ħ7†„ĴêĥÛíê˙Èŭŝ G Ou½V<ğĈ>"o‰ŸäıgĝGìXÇcÎTòä{ž*ô}NÔ﵃Bµ&`ğMÁXG¨=÷ë[>ĴÚ ­¨†´°Ľ$™;1ô¨öŻ]ñ§oà?Ç&•ĤĈd•ü¸T/ʜg-ë_ĉ™Ŭ8TŽ ‡ŬK˘˙ƒÜíúĊ*v£Is=ĵŽ*ëOïŻ˙ik&­:ĉ4nIE̚mBê˙Ğ!—XĠĜí˜aŝ†+¸—Lĵ×/ˆ–÷mİÉÏ!ê¸ :Â˙ÄŜ4òĴьŽs$½w$ÖÁŸŒ|B–6~bÁğ÷ü#Üר\.“˙Àmonêod]ĤB>i×è+ĉkbċ‡ş^ġYôìpûGKMç#ÏġÛèô}>mD*"„böġ•ğ\íŒw^ •VY n7O+Ÿ—˙×R­¤šœF÷QXhѝÎìpÒoRk•ñ?‹aM0[[ ħÒâdë)ġoS^)%N òê˙­ßätÓ§eËúżëŻäjx£ĊÖÖúgö^Ž>ϧŻË¸<ç×é^ Ğĝ’RdŠ 9Èyéì)š9„ÜÜ0ûTœC?q}MpÓْƒ–cŜżBÈòJTavŻúħMF+•^Ĥž™gİİïşŸÉ´Œ3ó9ôĉ½9µ kİ£Óšî=?H´AöŽyoa^kĦFAuĴ\q l ŭéOË­e^™şRĊŽóğܓ^# ĞÖ·5”vòĉg(ivġ=ëGÖÓÄ%‹C͑‹M‡iĜu÷?Zğï…§.Ö3Q)²=Ÿv%ġÇ­s6­oà/„“ÜDñKĴ_ ħ(İÇ'wÓŜê^]HÓ\Hs“^6(§ˆÄıµû¸hŻĵŸVÎ8Rú"ÌzÍġşŬe"IĈ$òߝSĥˆÜ^*3|£ĉ‘aŜ݃†Üyŝµv!1(ùä#yöôŻŻTÔoʵ:š;íE¸ÖLÒ(‘™dÎ6…Qšâ5+éu=~îŝbwÍ!~{ıwt4˙. =Ù9ƒ˘ŝ'ùW5om½ t>ƒı­#NĞĠœôcy9ü‘ä)×eż½û Ú°ÛH·tÀy­ŒŭjİÎìž+úg-†8­Gñuİ‹ÇV­7Ĵ¤ßâ8ĴëڂíċlϘÓ(ôŻA3ɒNÄÊ38êX˙Jġ½>ĊÛÂĥWNÄ 9÷5ċ( £œäŸóùW½h‡f… •d~&DŽêiߢ˙#óo1r£†¤ÖM~g9§éâËâé%LĈ5(Ġë‡}˙àißöŸñD–˜ÊlÚz âIÔô¸£áIÑyĥĠ#ç8ÀŜ ĉĴĝ;êż|mĞJɎÑ߯tŞm‹Éڈ¤–­E/›Gƒ[=U–Ğèçĝ&}µá­KĊ:÷„!Ñí‚dx’ç=zŻAÓ4;˙èÖPhêî>|œ‘îǰ˙<כOì˙Xi: "9ü&¸ÛžĦGġŻkĵCÂV÷G<ˆg“ï3¤ĉż„hâ3jÔ>yĊżyí}£ŝgìTJYe8òĊ˘ë˘Ġ˙‘=ĥd¸[½AÖĉàr –?`*żˆüA§èÚD†á‘äĈ.í\w‹ŝ#AĤO5†–ñÍp¤†”réë^ İkwÚ­ûI<²M+¤çò݉xó/Ê*5N½uŭYŬ—dµq UĴíßÖĊZâÚîúĉm˘##– ;Uß ĝ:ó_½Yd o§Ğ|Ò°ûŜÂş x"][_?g²QĵDç‡Ğz ‰üyÚĥ‘áĴ[Ú¨Ĝ÷1‘螟_ËÖż/§•Ó­8óY?†j~½‘ïWÇ9?e‡Ŭnú#Yñ.‰àí%´} §Û†#‡ĠsíüĞÀµ9f¸³÷áÉ-šœ–y %ÉäžIŻNŻÙµ(Eŝ³›]< ”c´°ġ>ƒġŻ}ϛ֍:P´c²Z(£–ô°r“Ġîú³Ç´ x§ĈڌVżhğšÂ&Ĥ™‰Dö§Ú½’ëQÂOŭŠÊ(µO:d  }\ößÏ­G/Á è/˘x&( T¤—ÀÙġŝuó-ĊËËtóÏ+Ï;ħg‘Û,Çë^Ln,N8wí*ġ›Ġ/(˙™É1.ġ=Ĝvïêz&ñTO‰éâ]wĜœ,`öQÛúĠ_xÄżT¤×Ĝ~ĝ}ĤĝKH¸KE×÷×,9úAZdx,Ë5N››T›ĵŸwúŝBĈÖam%{Ħ‘ëᝧ†lÒŝü-Ĉë—r2"ö_ñŞ>/èŝ^ÚŜXî5gR"…œ˙O­s?8iŝÓĤÑ´IïZuÚ6D^çù÷ŞjşžżŻÏ¨jW]ŬLÙgsÓĜz ûŒN&†‡Ġ°ŠÖŬ˙]O*†Ĥ"~Öİòż‹äò´ûDè[síÀĉÒü“]Gğïŭ+şñ´gˆ£€tKqÀí“\%úíÖ&QĜôŻÚĝš·6.oµ—à|?aÔ2êIġMŝ7*’|•îj[ż!ﰁĝÔÂ*í’ı‰Y$P?:ĝìT”i6ÏÒ°0sŻŽÛĊmäx3HµŞä}Vfm%Ώ¤ĜBĦ¤Ġ@Ç÷ĥ…˙B5gÇRçW²·Ĝú’?tŝĥ<€?™î½If9ü”Wç¸jï ÓİüÎs%)~ˆŭs6¤ħS^ŸH*p_}8żÁÈúškĝÀ·úüŽa•˘Û% q=Éİŝé7˙>0F×ÊñĝnɄ×qƒŝ·„'ıcúfı݉"›WÔ´ŻèêÒÁnʞT#&iNu9ŝuġ'Âû]?ÀŜ 0Ĵڅĵ@ê÷jàĴ—g%˘Ô !IèXWòĉoV-ݍjġg¤|—TµûèZ¸§vг—ġv{…ŭġ†žg¸xàĥˆ` ôŻ$Ôï.uíIo/Ԋ6-m‡ñŸSZ—í>Ħ2ŬêŠEż[k@zûŸjĸ’âIš;@ZîA´²Ž#ƒùWċXJ ĉŬßëŻvràêż‘ÂĝŠÛVÖġÑĦi€³ÈœñôAŭÚët߅ËĤĜYÁĉĈò1w'§û#Öğ˙é6Z5£ĈĴ’jóO)ġôú ­­ĝˆ[ÚÍ ‡ïfPs'ƒUW5ĊT˙gÂè–ïğ;ŝħ'%lq(ĵ·µÔ`Ñl<˜>iħüGŜı{+ [Ċ÷“q ĴîF‚ĥtŜëúĵ ħTcîíŝꎤ[Ú·ŠôíĜh^€\\Íß7LûŸWu&èÁR˘ı§ßÏĞgC›‹TàŻ/ÈĠğÔt_‡Ŝ[KP'Ô\d/VcêĈ¸k[oíG›Ċ>1šH´Ôĉ Á”öP)ŸÙipoĝŞáoîŠÜĥZC^}âÏ}ßê2lââŜŬN}”Zĵ·/s“TŬä÷ŸŸh˙™thnÓßwú"ĝ ŬŬKw8Z)ĊµŞ}Ô€÷Çzò‹Éµ E§po*ŬŒzš‘žóÄZ›²–ŠĠ~óħ¨¨Ö͵‹ĉÑ4N,âï.ی×ŸLœ_§ċı}<\cĝ˙XI·>EÛó"Z²†³}&­,pnxâ_.%ġÇSĝœÖ‹ċÈÑĥ†ĉşùm#÷…˘’`³|™D#ŭD~żSüА(Zb7d,k· 8¸Ú Ñy÷dĦŞ2}…_³Ú·i2Ê95QW<ş:š•ÜĴXÇİÔġ +ĞžgşÔÏŜcÀô–;YċŒ/ÒĦŒœ3qĝw˘E3,ğN#Ž&b}‚Iŭ(IÊIw"¤” ßD§;ŻĤ>$­şl€ĝ)s+œu&šz×ġ%5d‘üV\ғóà ÊV;œšJ³"ük—ĥíˆYŽàUô݇íbáN…;F òJ˙èBqXŠÍ‘ÏÙ3Œ˙x˙ġëé‹9/xRÁTŸ,4ĜÇ<Şŭ’ï„`ĠJÉ/ëî?&ñĴ]+Íżëï:³Ç*@f,İó~QÎGá^áI.d¸ñ5Ä{Ä#I1ċ‰üĞ ›Zd| (R?ÈŻ¤ŝ ÜCi#‹3\jQ’ĝàSÇŝ=_K›û8Asğ^P^ĥw·ÜÍ°&äí­”˙oÔû›BÒ ĠüaЈí#1YĞ~úċ‡Ê£ÛÔ×ğ½{áÏĊ½ÁhúFż9ĥ…ŽĝÜ.GXd8L>'uŬ˘^6­HRr‚ğ>Ï„4_xpGk) ™§ĵçԚùğ?Çè´ĝî<;á9–[ӔžázG˙×âßí uŻ  ÂÉo`~YoC?²ú}kç'×zÇÚ.‹¤$ċó×éÙĤu‡ÂQö8{F+·èxXL êKžĴ¸šëPÔ&ĵĵšIĤ‘‹I,‡$“P™cÈİ­kL¸´ÔÚ%ĵqjŜĵ/ğeÖ ¤/U×ë_'WJ4ŭ¤žçħmğ#áżuMfQ“0 ӕÖ¸ĞÇ ­\89vğÖçAñ†£œüĈ˜>ݟŭ–¸F9ş•ğòkúC;—4ù˙™·˙“Hü÷…ĦÉKÙ½à£ü’ġ+×CĦ@dñV•éĵ;}3šçğWĦxVĊ#ZDœlĠ˜úeG˙_ Äŭžż–O·ê~ŻÂOo˜ÑOnx/½ßòF‹ĤóĵqrHĠP~_âkÒ4 ÑĤ|Wħx%´í!cŒz9@ ~lMyv§*ê.ž1ˆĵڟîîÀŭzo†­&Ö~)jĞnù%XħÀ=Iì&Oˆi{ ;RiúHŝĴûîœqùŬlCûuÓùETŸáhŸDü,Ò.gñ3ĝ˘tyoD†-,­;dî‘ŝѵ} xr PKsžíıHğ÷Ż,ĝ/X]Û>ı˜×HħĊ†”[ĝ˙Y1Ŝv$ûjú%nm.ŻäĥŠXœZ‘ıAÉ.yÇWñ—fu*†‹ĵa½ĥ²ŝŭ;ğâ1 ËEn˙äqÍe{İêĴ„ùjżëÇ =i X4¸G•™äâ%ĈIí“]̖ú~“$²•ÌŬ‹ÂıkkÀ5ÔŻFé$?¸Œ˙ ú×Ä:²­²÷VËı*Ĵ-‹WvǧŞĵÉ °Ŭ;ç“í\>£%”ÇËÉĥÓ!?1~vġ­Ÿêwğ4'eĤ0ĴÜs\³ßÜGif4­9äwP§% 7nŝG‚˘ùy¤>˙\Ô/í×EÑb6V-,yŝĤ’Ñĵ#˘5ĠÒ%Ö²ç÷Pç'ê}7TÔ­<%kö;&K­z@D9[o÷ż•r/ Z}§öÎZêòoš 2~f=™ŭ·zġéaâé¨ÚÑ{%ĵżçtcíĊ™ZîŻ+Ğëž!™ö!„p_ÑTvġĉr[_ĝŠĝêş°Lc÷QžŜÀWY-Ôĵ@úLjeylj:c°°ŞšŒW!Ôr}“K‹ċŽÜ}v_ÉAZ6VZ‹Ê=ߙÓĥ•G*ÏqĴ\ +FO³iŞq, cpİu½NǞ>ÑÊù†½œ}é°Ï ÏëWu½^ÛúöV›:„‹‚#˙–`˙Zĉ ûĊjuU™îd?ğ„ġÏİŻ Ċi)é‹ĴŸvÔĉ´Ŭ"çZĠÎàˍÇwCZß`a;iHÀ2-ă˘ñÀŻPöˆcÑĉeġoCÔŝBı+i§Áu<@=Í䤢u!ŸİĉµŝÔuŞÊ0ÙY/Sğ;Ñ #˘ŬͨŜ¸+ä?(?΋IŝÏÔ?µ5_aÈEoùi û߀<}A× †ÂóĊşËùb/’Ùœgkâı$WœĝŻ^:† ëùv o?q{ŸSë[aŞŭb­HĊù7çĠ/E§ÏÈÎR|ܨçu;ùġMz{İIw'è;ô޳§’3+rŜĵöĈDoƒ%×/€FĜ”Ž­ŝ5ÏÚ@ÂĈç[ı(}ŝ9ôQÏċ^Ċ i¨/v: ò"ˆß^\û˙ġŞŸY2}iÌĊ˜äçԚ8ÏĠ°ÇŸ–<ĵŬ>•%Á6ŜÔçž&Œ}ÉŭqP…g”cż Wµ8Ġ•Ĵ²<¸Ħ(ß\s[P’Ubßtr"ċBq[ÙŝGÌÇïpC½T‚3ӊt‰²êD'£Ċiêy+Ĥ0j­˙ŝUÍÜŝ•Ħ6÷ùĜÈ#¨˘¤•ƒŜJĦrEF=+UŞ0’³ħÒN˜ÖU:íħ€~jŸ_@éá¤Ġ4è“(£oŝ½x.ŭğ ?y’˙¤ sñPç²’} ŝ•úG FÊĵż­Ùĝ—O™áaŭmf‘šîfġrM}YŽËÈĝGe)à]];ôì_é_%™éüÍ}³àKCiǖçŒZ‰˙{-ŭkŸŽñÂaċöI˙àú´|Ç áŬzÎ?Ìáü ¤DÏD‘ĠpħŒĈ}kşßÓ ğ³‹OIµrbħ´;÷üâĵûİ­mDÔ5½MmĴag9ùœ•İ5ŝneycb–wI%}û.ŝgö6*•Óµ_…O6Ħ­ëï4žeċôïÎ$özUíWZĈm×Ö̑°pä)ô5ô…üaáû5••g½#癇è=cü@×ôhü9>›)R.4=b}+ô*ŝÇ •TĈf5ùj½Rż^ÏĞoËñ<8çnxˆÒĦ Äù5ÏkÍ8ĥPŒÂ29ú×CÍAsÜZ´mÜpkçòŠu!‡JqħêĠiËFycn.sÉT,À(É­ë­á.v˘–à­méÚ,p ’u ßŬŻVĉ8·´–5 ê@úW#â-]N$–ÙAF{ŠúoJ=÷Š&Há„n8iÙpÓÖĝ‹á”>†+•v½„œ;2ŭÊġı~:~·>U×úÜçz.~ÉËV|İ xPÂ{ÈÌÒ‘ş>µ×Ëköc°€¸wÚĆÚÀ-ŞíÈÀJÌò’äî F&ysM›¨F*ÈŞñÇ.7">*P `Ҝ˘ŻZÙOw:Ç nîO ÇR”n~[é{ŸánşyJ¤û‘ñ5ÈÁ`r0kżbŬw]|dù„ÙSŭ+Ï:ĥ}Ğû5…°ĝgŜ?Ğ˙3òü‚ñxĠÚ§ŝÛú˜($à ô­&ñtoëä!I`ÓÚ#žĦÔ(ÏàEp{¤z휒ĞI•›è&.+³<Ÿ4×ıÜŜ™pÇùWÉfxU^‹‡{/Ċ_ò?DÈħòÂbcQtğù¨ğ~em8yž!ĥbxóA$öç“^ÁàyŝÇáMVñ?òúvˆ7t1úĥqôŬë^9c@ĝ<ŭ˙^½żÁZT÷š•ešw`wf8Ż‹úêž ÷ŬòWgéžáŭcym)|ß*_›>µĝë?ĜÚµ5ĠáÒ!}–6[ϗ-ÁêûzqÇçí_[xsLK;ž\?–KO+‡“\6…§ZĝCá֏ĦĜ˘Ép‘Ĵq(ëen§ó$×I­ÜÜCeiá]=Ì·²cíRİŝ#ËŝzWĈw‹ž?R²ÑMÙ…uŭ_›?zÄÔu*rĆûPMkVšWrš-—Í#ÏC˙ר-ġ Hì.uŬSbFÙ[XO ôÏjòD×xcLRÖ×çœôfïšüA|.Q’Ĉ!µ ;Ò°ÂċŝĠ(ÇEù/óg] 2•–Ëô˙‚\½ÖnüMĴŒ´v–Š8UĤ³îĵHşu”Ö:̤b[Î˙O­eZÇ6‚û5„c÷ŽÇzšÉY~ß|–Ù¤ÂħàżûDúWżO)§ËÌ׺şŸs֊‚—'n½Mï i1\ ÍsS“mµ¸-½ùŜŭ‡ıމk.ĞĴKw irĜŒžĴ{[ŽƒQŠ "Èşi6ëËWîĠÚokáŭ_Ŭ$bçgú,ĝ÷½y•ñ’„ċSy=ìżàîÈĞZÒó{#ϵ  M/y(›U¸Áéz}Oé^cĴês&ë ¸›’P>T'°˙ï×I×ŝ ĝÒXííôÄ9žá³€;Ժ֕§i·°he‘f˜ŭéı5ìċĠᇒöÒçݽş/_NˆPİÍ?fŸİäÚO‡áħv½qu~ÇsÈü…5&Ó{нÔĵÁÜ^ÄÖîĞŠqa •Û °îj‹\Á§41˘ Ŭ)3¸öÍ} kԚöYKoOëCı¸ÚËdtzÍíż†Gl@—[żMİäĈï1ü8ZóË/\Üj‘ ì-ːX7H×˙ ôL†Ôüm₷2DJÛBçƒ'aAŭ+ÎŻ5{ŬNúhírf¸bd“ĤĠ=Nk sŠ*{§ïKßEä‘ÀĞc­šë§v/żġúžuZ‹žIt_çŝGr"WñŒ(żĊ$9ï^óáY„SxŠàœ8Ó]Pûĵˆ‡ôc^%D|C´VĈÇ>Ê zċ-Żé U?Lçú ŭC‡İ7 rŝż3~2Żj”5Ú7ûôŭ khÚ[˜PI’`ƒô˙ûçLµĥöÖŞ0°[Şé…&m˜ĝ™áÛ\oW½Fa׀Ù? ŻÒ˙‡>‡^Ôï/XdF?‰şóí_âĤĥ3ˆ~˙²q˙Á³Œo÷BFÜGFSÙÔO˙ƒœ‘…ü}Ż\$Ò·°ĉŽ[ĜWşv“Ĥè:BĊmpÄ£ĉnçܚ}ĠĉĦèĈIž+Xb^=+ÀĵWğÍfY-l™íĴzÌ˙ŭjŝwÁċÙOay­Ï]­ú˙ÀGï•*âs:–Ú×ŜγĊßV?6Eey>ëÎ:/ÓÖĵRi¸ıy§‘•ŽY˜äš{҄,p9ŻÈ³Î1­Żí%ïK§eèż]ÏĤÁċ°ĦX˙‰Ŝœ‘ïV˘´fÁo”W_ĦxNóYœ$1àŝ)œp>•ŽO—f˜ÜTd“ô˙€uÖötİ·7dqP[Íst°ÁÍ+Q“^á†ĴÍĉıÀêĥú×Ħè^Ò´ `ÑDŻ>>yœdÖg‰|u§h‘400ıĵLJ§ÖżmËĝW—SúĈc%§NŸYñĝŒÎ­y{<:ù˙[DÓ随ıŒ6–ñŻq^/ jŜM:Ê0,ó̍Ġ•çŝ+ñ´“‡ĵÖŻ„0RÜ~½púŒt­zòH-ĜĊ0?"?VċgÜW_FPǖ’Ŭ÷_˘:p9T)IJ£ĵކöÜÜÙ4jpkƒ¸ÓçK’ĦäñĊz@óÒ´£{4ĥó$HÔŻVaÍ~oG2Ô¨àħôQ9oĦé^žr²]fŭ1óíUtŬOfQ£,Äò~Ĥ¸żüFÑôˆ¤Ž Vîäqµŭkçïx×U×ça,Íx‰}.-İZÒZ.äÔÄÒ£¤ugŜ˜7]jß²ùŒñŭ+π>Y8Ó-˙·ˆS#>NGä¸â1h2IŻë,Ê|ĜL7ĝ_ŝ”~S’ÒäÌqßâŝ’‚3ĥ`ǰ?ʖy˜>£ñĤ"³>gš–ŬCŜŒ€ ü…|ŭYrĊÇĜPƒœĠ’(- ™ˆÇ|àJûëහàOZŜ\íZ5˘™]†NTdeĉÓÑñ› ‚\şŞàs’ßŭzŭ-Ó>Ċ¤ĝCOі_-X nw'kŻó SÁÑÇyó|–—üì~ëáž*Żĝcĝ;žÓaĞ·2ĝ†ëjĊ ”°ğž›ŞkŬJ/ ĝFmby}sQ[Œä€‹é^wkrúŻKkX‡˜ñçċT3ŝ{×[á"o|A}R˙÷şmĦC}Ž€{WòŽ/k=–­~KüÏÛ4âçQéğŭm]XxF]sV‘Nۂ†•A\%íà^I*F½=Wwñ;ÄSñBèÚ{OħùNÓżËxŭĥ§ ^Ŭßî%-AH‰è_ı÷Ċ}.K„Z|ġ4OWú#Ò çìĠI/z[.Èé5½D[iÖŝ°v ×’)ê;ÇK¤ÛȖäÄ–UÚ÷WÛëX&Ÿq}İ”3\Nûċ'ĝGaùZġíÊÎÚûí—8kk¸żóÑ˙²Îñ§H˙_ĉüŞŒWÔé´m.ÛAâ^_ ÑĈG,}Msr‹ßĝÊ;hòí#`˘/żáV5]FR3\˒ğĥĊŝ#èt~  ^í‘ckÙ-šköqŸ"#ÂŻ³ŭkä"ċ Ԟ³×áùžtœĦQë&^ñ%ĊŸ„Ĥ™ĤŞÇ=ÀÚ ġc^ca§Ï€‚0&×5Ûóŝ­}OlêSÔïĉfîOEoÇ)†ÛKC³+5Î HŞ~TcÀÏ3_]ġşĠjBŠÖVNMmĤËÉ_#zÜܞÉ=^˙İĉ~/×g×u›oè¨ĉʵTrX÷cIo§E§éĉÚ"‰tƒĝÛ{ ŸHÓ£Óôy.$?éÓó4ßŬ_îsŜşè“ëŝ%†ŬWʀ|Ò1é¤×mlM:4ùbíîûĴ¸F)]ü(ĜŽ‹kia?‰5u §ZòˆGúçì|÷Í~÷Ċżnbù.…öôġÈW7ĜmF²O*Ù35ϏŻs_/ُ.ĉkçĈ#FÚ=ëğ†(JRž2˘÷ž‘]—o^˙ i9UŸ; Y–"-2ŠùÈŝ9_Ĵ/$ZèĊÛŭl½>•Żojú…ĉĉCğŻŻİĴFqsĞ”AˆbáGÒÛ§ıógDµÔM#NmS_‚ÓvÄ$™ûŞIüЧÒ,­§ñD×$ÒìqğĝħÓóĴ­)¤‚ÎäD¤ÜÜŻ”§Şġoé]Eͳiŝ·Óá\Ü^¸.{‘\¸ÊÒspú/ĠŝƒĤµżc7y£ÖScòûĊ"=;IÓtH1§™6;ħ9"c¨JĜÙœ’œú…ûè^žSiJ3èÚ·˘ÑŸÌäÌ$‡İn‰ŝGΒ.n•sœżġŻRñIx" ˘ŝŸŭjóB7jöë€2ù×ĝĜ˙Ċ'˙ÓÂ{ ŻêĵĤMíF£ûÜü!ÄQË"ğâhŻş5èyŒ„{Ħŭ*+aşú!×,*{áĥŭ—u~”Û Zzó¨GߌO>¤“§9÷G§H|Ÿ8%!SìN+½‚ċ,ôs/ˆŻ8şlĝ”2ò]UAëÔÖߊoĵˆtÍ26Ȩ̄jŭ3ŒXz5§ÙéêÏs<ı18j]Ż˘ĠŸIü„j?lŻQ~K[g˜{6Ô×è˙€ĵWa |<ıŠd‘žVż{ ×Áß4ß'KÖŻ™yH˘ĥSŽç,ßÈWÖqít˜ÌÌħĈ‰ó3_“ĝО×ËéJtßż9F(C™˙äÓ>ż<š–+ï|0Œó”ìżòX'éúk=ĥЉwwÌ~âŭ=kÏü+ñW‹ĈèחO<7*°Ïíé_Î3ËñX*Ġ]ÛïÔŭÒ…)(-ĵĤ’nċ‰8XaŒħÚ u&¤VW‰]OÊ ûWx‹Qı:ƒÄ…Äj}kç¨à0ô]sÔİQB7=·Àöú.ğâ)ĦžàJ€D@pŜġí׺Ž‘á͸– ;x—Ô ĝJ×5-Y[í:ċá¸ü¨ïQx·ÇşŽĤ|ŭsQyp>Hà}q_Ħd\KG„téѽW׿İòıŽ*·4§îö>‡×-ĥŻ4ÖzKgn }=+çŸ|F´ÓŜXlŸí·¤Ò•ú׌j~)ĵ-[ú)äŭjž‘Ħj ĠŜĈŜYŬ'İzÔk*{|lïċÑф)G’’#Ġu½GZżi§’f'…Ïè+ş…µ·×-ġ@ÎĠ;ߍŜÀW£x{á}†‹ wZĞÇ{z"!ÊİŝµßށQB¨à+8Ÿ‰<5hôğŭéaNüÓc³é^ ˙këSiİşÚĠxxßĝ×ĵW)ŻxGLñÄs]oŽEêÈ5ò?WƒÄŞĜ¨û½;ŭÇV&2œm|Éiaİëâ;Xfı‘Œŝµëŝĝaˆîu™pÁòSükÔô½OÑìŜĈŬ"QĠħó7ÔĠĞğÛ[c5ŬÄPF;ğb²ŸVĈ×öXxZú˙À8ŝFòzŸ^—nŸŻÇÉŬ`HıÈÖı‚~U^x­ßᵛ˜‹mW²›9ïµ ù­`žµŭwZ·5*qŝ[ŝw>+ †öxŠÓŝfŸÜ­úW(’q¸`Ġ›™Ĥlĥ<ŭ1ŭj²ŒÄŜµ$NcŽ\–M§ñ5ĉÖ\’=Ĵ3äİ?SĞ0—Vh÷y'°Ž‹Ù*ǽ­W/³é|ŜìΊ\Îouŭ›œW:G…Ĵü;¤²›ĉîYy;ÏùĊy-ŬĴ-µ•ğ™ŜŬuÔŝêšômQï ·’ÎÌ?ö½ßÏuv~í´~ŜġÎZÙZĜi'`/ó0ùĤïjö°ġ}”\şËúğŭ> ıŬ÷Ŭ™f'žTʏ?ğúע”mÂQhöd§~ĦŻ$˜Ó²~=MCá=헳êwH ˘y¸píü+ùÔħ?Tĵğ|ŠÒÜHz(ЏˆIòtçÓúô"jO‘l·<“â ñŸ²h¸ Żñ1ë^Iq–Xtëq…ÎdoJê|E|Óë“Ï;É<„[Ħ''ŠÙÓü7•n_PpÓ*ï¸'˘ž¤~+ïMa0ħĉß§›:c¤QÇêMábêĤ9ĤO.=B÷oóë\Umeğ”á’Hê{ ÒÖïf×ĵVĈ%>Vï.Ŭ@½ŞàħkŬfÓF³ĉˆ¸èÍÜ×ıG÷T½íŜŻÉġzW„´.‡&İp2°)ôÏ'úWE-‹7‰Öĉt kmSӁ]m˜²·‚Ôŝî9<…Ǣ˜×#âíGì^ğpvÉ/îÓñ˙ëWÉÇ[‰jÖrvôF˜iBTùÓş<{Wı}[Ĉ3ȟ1’]ħŭ: ‹Sĥ}3Aż³g 5€ÄvQÉž?*ÙµkĞ­Raû‹U$:µ`jWúòêV9/ĤçVĜ…ü0·ġ÷5Ï Gş<ĉ0ž1‚<`y‰Óë^TĈ“i97 Çü¸ÍJ<|NŽä‰Ñ•w~4+.ħb |~p1ž3_Ġü/ûÚsŸWEä҉üÇoêġİÒèħ ˙àŸùžY¨ĝšŜ³.Ŝ•›mÔb#ħÍ^ÔÔË÷˙§:û5TħPndr8D$˙*ô$ż§Ôña/öW~ߢ;ËW^'Òħ˜‚FyâĴÂNğñ°ù­í›Â˙ġëÊìZŬÑÎ`²f›› :ìĜ0Óï5)2Lûž¸êZúüżŭĤ­:=9ıŸ˘Kġ?8Î-ÖÄġPäĴ›żÜĴ}ËĈêÓEĝ=óċÜÜ<òŻ£ôŭk˜ñġ~Fĉk{\ü°Ĉpĝ֔uĠ—À{->œÜ]0. v<óù Ğx9@Y59ŝâ×âŝ)f˜o­ÒŒŬÚR—ŝ&˙+aáÎ<-Y.­G˙Š_Î:ËM½Ġ.v6zħWŞxoĥzUä77·W*A*ŸhbK²˜ma=Oĝ×-¨ĝÊXÇ`˜WġqxĴcċ¤­ëİúDc z½Ï¤Ħżk›Ĝ|£s:ċ“H ë–ÏP+’g‹Ž£b!@‘0;ZïŻġM>ÂÀÍy•òÙ/âóUŞù`úġ~‡U|d)è·=‡_ĝ™ih )<ùzyŻ}xÎİâOVğgıı–F'ž)–ĦİH6DʄòÌ8îÇzf™Ú/Ydp2K_£á0yvSZQĵöÏ2sĞYŬì~_ĞĵnY +`ŒƒĜŒCšNôî7JŝŒ<4uNŸm˙µĞ\ÈlNÛĊŬ™dŝW01ög’úÖĠŽĦySbz“‘ŭ+ œ:W%:œóœ{5ù&wÖ£ìéӗó'ùµúfƒ/‘ĴLÙ! ŸÎ}A#ċ_}x4-1n$ gml“\|Ħ‚{žy÷Ç|/E€ôˤHŒŻˆÜ’wĥ3ùWÓ÷şÏ iš=Ĵám"MÁSŝµħÒżñ)ŭn0QVnRû(èßО§ô†ĝы§{ûħŸŝŻà—Ŝv–Ŝ'YîùoÄĠ™à¸F‘ákĞġo.#0FĴqç#ŸğôÍqŝ$Öf†òŻnn(£QîÇ}|Ö:³ŞĠ/zZż$ĥ_ĞùĊ9×ĉ 6ġêŝ[/ŸsĠü%¤‰–]nñ<ğKq˜·˙‡Zñ&³.§¨d|FzúšëĵY­Ç£x:ÏÂö^_ÚV57Ĵƒ>3·è:Ÿ|zWhqbIġ9‰;PícÔçêz~5òXxûµEèż%úúúœÛĴ×§ġĉhê³AĤĝr 2\HCülz·âAZš]’i^]sVQ›xIMéĝwÍaivÛ~8+xĊ4ë@n5ĝ~½êM;_ñ#ĝŸ[kç‰môĞP#µÀüôŻBx Ԃ‹Ŭ½_éú}oŜT×Ğùíóf‡‰ü[&ħ˘ÙiĥvÍenTcŬœú~ÙĝwB°Ñü5mL̅À<aġŻ7—.ğKxĴÛċ?ŬQÖşx€jÜzœálm>YO‡ôŒÂÓ`èhžŻÓüßġħ•XsMRŽ‹vbŬ_BY`CċX†ßq'v=—ì+"&ĠµÈm-c$³tċʋhí-ÁŻ_öÛÔ×ĴĝOKĥżƒgñ.¨ڌE£FŻ úšŠĠ]/zZ%çßúèkV¤hSÑkÑ|E<^í§‡m•Cܕ<³žÇéŝĉ,Ԇ‘àˆtĥb³Ŭb{Î~lS[){öÍZ÷Äğƒm3LIëè£ëÒıŬGŸĈ^0ğñ>²Ĵš-ğ–Ž6àHŬ‡q]Ù~+ÔİŞŽŻÎ]ë÷É*jÒŬjŭN_Dњ/ĝİuXÀ¸Q‹HèÇ£cÛŻ×µÉĝËY+jt¨ùÒó;Ñ}?ï|U+êF+D Ü´H?\žĠäXñ4Ĥö}ʟ=Óú²?•}–{IŞ•6ŽÈì‹ċžìÌĥ€ĜX}¨Ħ7weŞw­]vƒfşW†Żġi>fŽ6mĜîC§Yİ}."ìLùV‰Ž­ĝÔûT2Ԃhz_…4ÔWşĠŻcµ@nà[§á^ĵa,V"~²w—”VŻîWg›œc€Ğ]oeç'˘üZ='°Iua¤&˙&;'şœ‘‚CĈ ò߈7˘}rŜĈv&XŻı<ʽòêŜ ü2|Gçb³˙<Gò'ñŻî‘µŞbŭw£Nd\ôU^ŭ~‚s)ĈBĤ"ĥ5ü½½^ż­ŽŒ%?aƒ….É/ı™•Ĵ§ö›M%@7 ĉNGQžßçÒĵô)0ƒÜŸÒşvġµïÏċ1ò ˜SÙQ{˙:”cOış#l{ĥF=…}–_M”yŝ)jŭ_ġĝ´M³ÄԋŸŒá+öÜŝ˙ġĞħñ4Ŝ"üC–{ΙĈyÄĝy~Óñ+Ì<áŬóù×{zÀx˙Lßv+Ÿ5½0¸'ôŭqäuéŻ³N’˙ɟùÀ UöÂU˙Ÿ•ħîĤżù#Ëġr£~;Më'üjı+m9–Âŝıŝ”ûç-)'Ò3ŸĈ’̸‰Mû›è?ÉÍêÙrÓwë÷Ĥ.nZÙ0]Ù#àuÇ˙^œ6‡˙BÑ£LĉD[ú×ϳŝÒñÔ.Ëş8ĜÊÙöé_k|$ÒĊǏ~Û"ŝîÚÀ˙´ÜÒç‡0ÍáĞV[ËŬ_×Ğ?"ÜjŽ"†¤}ézìż{Ĉ˘Ħ½şµŒW—k+šÚêKkhÄl§is]Fżâë+-JxáÄÓİÚ}ĞĈġ;¨jr\³qÎ+ùcˆ’ÇgĜšÒ¸Ÿ,}#îŻÁµpÖárŠÛ÷šğġ–Żó ÍNêòRÓLÎOż&b×ó°i£’ÎqĊeñžM9]É @ö5›§hrCÖO[³¸·Ġ­išµôS݆aŻCĤŠßĉf°µdùäŽ/j¤X‰B8Wk··òkĊ4´1 ıŻmµ“L“ĆÂ[ĝ"eûÌÍÇÓëIĴéŜ7k*Z£ĵc™\òĠçàŞŭV§5Xî:”ù£?!˜bLwŞäUQ’Nİî£)ŞŬ!rVİĤÛÈb½ŠAÉVżĤ&ĴÚ>j”ı’hê4[H[À^'ı™öË”ħGŬ‰-“ô~˘ı%˙Z=Î+§Òċcà˙ħ<·–N~­\Ú))¸ ñ0RXÄßù—ŝ‘ê38ħà­ü’ùRgI`Í$–srVĝíŸ˙]}SàkH {jê^Oɵ‰şK68ú…àŭqï_2ĝ^ĠµŸ‹°Ë;H3ÑF Ïá_HxƒQŠÏI²Ñ,ŝKxT÷>§Üòünj¤£‰§u²ĉùÉ˙À?uÒĤ ·+ĝËîÁ~· şÖouNE™ ıi¤ô™Ğš~­kk­ÛÁö;aĉJṡ°üë'HôyfˆĥO–Iü´r8Qüϵ?KħYfŽyÇîwgn?Ö7a_—Ö9èżöìĦ‡Ñw‹×àž{hìnX‘ƒ'fĉ­xCJ–ĝĴ)eMÉĦ[Hżxô3{gy?†,_Ä´+xzĉq’X$×ïğŭ\K‚Tzg…ükè™ÌzNJV˙´+wȈ0!Âö{‘_œàċR£COiÊO´ZÑ/_Â)÷8˜¸Ġ§8½£)'ò{ċìS½ĴÜ1{íI‹"·,ħç–?ïŝGÖŻj3ÇĤé‰g ‘€\ïü+ôMg>°.ĵWsĞI Ĥ~[hGŬ‰@¨öNr˙jĝ›SñUô-sĦh˜eˆŽ/.˜Ÿ*/}Ì2ÙS\t°ÄâTiü1ŝŻòÜôjĠö4ıê}Ŝoeù#OT†}'~ĥZÔÂŜk2÷‚>İ=¸;ˆġlvrí˘ß·Ñ|Ş?ĵ{Ÿ­m_Ksn/nŻĤûN·¨Èf›=3ÎÑì* Ö´§|ĦĴí`‡ŝZ·eüëĞ1ĞN„mħž IAÎ{½}_ġ˘ò:¨ċ˙„;ááqŻj‘ŝì½ gżħ=Ğ„*`o$g?4ÌOB{U›ÍNmG^ŸUş"IKbížĜƒŠ4 *]oÄâÄ@‡|ò“ÀÍxĤ¨SZŜz·ú‘ĵW"n_?ëÈë<áïí Z;Û¸ók-HûíWĵu G\‹D´ô[S‰ˆèÏÜ}lêíĥ‡á g° Íž£İl|ÒÀGêGy< Ĥ“u¨ÈàŞ ħ'–c~uDŽÊĴħS}˘ğÁg[İQĠ–ËEê2]:ëĊž+²ĤžĈ;ÜK~âşx‡Nô¸|/˘ñ ²lmżÄßçġĤi—)áOË:œkš†XžèĝÖ¸X"µQşÖu"g†ĉ°s“# rqĝdöŻ_ CÛâ#ü‘Ùw}d˙CXSğö’Ú?‹ŝ´FUÚ&ƒá“Ş^7ˆo‹XH˙P‡ÇÜöŻ9´ĥ’ŝïìÁżv[}̞µİâ=Fê˙W2Üî7·'!ü³NĠĞY-²Çd@߁-Ûúz/ġŻĞûš\Ïâ’ŭO3J0rnOvYÖ/ít}4ĥYGèqëĝœŸĈ²>hïâŻÚÂe %Ÿ‡4q&ċÈó߅ġ‡ŭó\³¨GŻŬê364ë%%z…Ż˘fíô˙€ú·5dı×µ.7³dy1Ÿ1oÈVıoì̏Šûr\‹ÖĤŽŜRûÑòĵGSÛâ˜%³—;ô†ß|šûŒz›ŭ/ÂöGĉ Q\ĉ­*x_à,%ĊÔbŜÇo;˜}I'ñŞ–7˙h GR,Zœ¤DôQ’3ĝ š§+ˆµß‹1éhû'I•Ĉ0ĵ·ôäċĜEJ0Oìyúïoĵúin˘yê[½ž˜~Ro.;¨?Yŝ!Ċ‡†Íĥ~hàf 5ĜéëŭĦ­^êÓĥċ”v ç^&Ÿí^Ż3ħ-\ç?Ô×Ŝà­Œ„uWäÌj{,Z˘ß܏.4~gˆîçĈBCÁ÷&ş}]ĥxŞÎA’Òb8îT¨ŝuà%NNùAüëg[ud•ˆÂÚ°äŭOôŻë.NxŒÉôNŠû“İüâ+T°Y"ë(â%÷¸ùS{íLçoëVÛ Ëž6ǀ}ÏU2âI?ĵÄĠ‘Ĉ’ˆÍ,¤Ÿ à3ZFW¨äsÊ£?/ĝ'Ĵü<²h—Ċ@g;TŸOóŠûcáġ·öGÂĞÍ^QµċBÀû_0ĝgIhôÍKš_-JÄ ˙ZúÇĊ…4?„–úlXRêħ\ÏòŻÓkUY^E*ìAż›˙‚>âÍĝ‡•m9Ûä´ü¸¸yîċ•‰Ëħ$ŭjŜĵÒb—m%ĥÛğ?§â’VCĠ‰8âT,ÀY@=*ċëZH^0ğû9œŻm GQ§ĝ7SżÓžà§”€eCZĴiŜĵ’ókäDÏ&³‹ġvƒpU"ÀU^^˘ë+Ği+ 8F%ï^;Ħ'k?Àê§Rz熭|5 Ĝ–QÉxú×?‡?R×/\˘ĥÈżş8ÌmܓOf’H}Ğĉ*NS—4µgzŞùyVˆ{9,NrÙêi ÜfımSĊvDÇ.&ħàVĥ­Aİéû]³.7ë[KZ4ŭ£Ž†Ñ7coŝ(5ĠSuÔ_èúġèŝÑ´½$†ÎŬ³91ükвĠÍĤiU"xħÀħĵCñ. XšMŽf=ÒŻO[܍íĝFqƒĉ=§Zñ.‘áŬ9Ĥĵž8ÛXÁË·ÒoñwĊKZi-Ĵ]ĴĴOSó8÷5çzŽŻ¨kƒMs4·ħîsR¤mŜĤ˙gˆò¨~ó~ô”0TpösĠ˙[#*ĝÙÔÒ:"Œ2]ËyĉĞ1ÁÉrk_Uñ-Í͢[#•U]ÀŭêĵÔDİä[ †ÑGr{šÍTy2MzÂ5œÖÛ<íh5˙â˘×z‚“ıé˙M?úġŽœL‡Ŝş m~Í]z3™˜sŝ5‹hUu8€ÚŸJŭËŞM>ìrùİP§%Ù~HÚÓĜëÇĠ˘ŝmQhV˘÷X‚Ù‡[ŸÂjê<­ᤞ Ŝ5£àħñ\RLû#@K6z˙|~-Ԇ(|WÓג)~'é9liUĈe°Ğ¨ûŜžÒmŝïÂù˘´ĝyq?]”˜çœċx^Ez<óM¨jÏ#`É+ô˙ZĵwÂRîñä„ Ğ0r?S^Á˙ëŽ|Üsí_œñëÚI=ıWê~Ñáök~y~†ĠŬıžÖÊ!şĈÔ8ǘǖsġ? şm!‡“&Ğp-âùm£Ğz×ch÷š”vëĠŽ]żş+Ùü'iàÛŬfaâÍf}EÓíË[a.ÓÊ9ç İ"ż:ĈE֒‚Ŭŝ_v?Rö‘£MÉŬÙ]œ†uèüûAx—RĠO!Ñ#t€‰b…P÷Ú:Ÿ÷}ëÚmuë­GÀP4›[›ÖûEï<·'bŭ1ÏÔûWÎö·6^5ŭħ5+ÈĴŝǤAbŸfĥ#$˘TŜ{ħûÍïš÷ËĞí>È#É(x"BîËÈv8ìô˙âĝŞN…8/}ӅÚ˙ żO¸8J*QÄNóöĤž’ŝfFµy40Aن’ŝñ‚"/ŜÁ8ŭzW´G݆ŝiZ%Z e”ùyğpÉîÂ/°'ĝy‚´In5iĵeĴ(°&Î7b˙{òé]Ħİŭéɧ.zŸZñ#/İÑp[½˙SÜÄGëU’é~eYŒ×ú¨„òÈÙcŭ*î§'•mnÊAÉġ=ÍMf£´ÛC_\=Q?Ä˙*ēìÈ|›†ċǧµ|¤¤ëÔĉèĥ˙3ъWK˘(ÜÏċ@Ğ̍òÄ?™ŻBP4 Zhq2Ǩ] ›PœcNĦsôĉĵĥÎu¸Ö~ĝ$pˆJßÔŻdòXHċ'ĉV'£ ˙>‚Ż€”½œ[ókòû·ġħ’ŭäüŬcW[ë°1ÚÛĈ#ĥŒž‹ê}Ï$×6¸ú†½m§ĜËşÒóç‚G­akÚâ.—7à³ħHy>Ĥıˆ/^ÇE6+6£|6 îĞÓ?5ôĜLĞÜĵ–Ú/ĠüŠj:${{ÏqİŬ‰·1^ èˆIŝµĈÜêÖ·wR3ΣG³&G­`0żΰ5VâAám>âIo§E[ÙÉÉ ŭÀŸċXĈ²êVÚ-ħ͵ıŬ;7zÛ –(+ßWĝ/ĝ=9s+l‘§o;náës¨\œ ğÇÜŝWò5ô_íu>ħâo xFԗžĉo:ESêvŒŭ&ĵž2qž7–_ŬŠuŞ|öż¤RöññıcúĉqˆĊ?†6§ûwYäÍŻ‘ç^‰|-;RñMĈRâHĜ[îÜÇĤ?Jòä›ìŸ§¸fó5-Zàċ-°3^İñeÓü)£ĝ^ÉħolŞ£ĝ›çóŻ6ŜœuÁ Ö:t[ß=0żâÜ~59ıèÔĈOŝ^;˙ÛĞá_İö1W÷‹÷ñ áLP°Ù<ǜ~uâŜ$+áŻ&FçU^½‹ŭk×|{yżR†ÍOÉÉÖ~ñMŜï‡Ò0f˙HÔ6 ìU-ú•Żĵàœñš3–îqoïżä•ÌÒ<Şĵz¸M/şß›C| %ì„}é@ĝŭz—Ĉ­—g÷’Ğ çëW<•àˆŽ0^FcïÛúWâûñwâ1ÛŜ={×ôç bpY•^²Ş˘żíĜ£ĝÏÄLžg’áúSıżû~mœĦûŠ1ÏSZú=ı½ñF™mŒŻ˜ĵ{g&²6ħMĜ8Íw´kŸ ˆ Œĥ}ú ġ2ĵ;Ż‹§Oğ_qóùö1arúµŻĴSûúZü2ÒÎñ>œnŠÍ ĴÚè+>$^uÛ[58HSssRü$ÓVÓÁ—ĴŞ]Êv“ŭĊà~ı/ėÇPñŭÉlĦ”Ş}îxŻ™*ZĊëQŻıkŝGäeÎhñZSOïz™‰Ž?:OŻ&“wÔÜ×óŠ?ĦE'9Í7İ⊑@,)€8=ë<T^̛Ċ8ZÂÒ´­FP@X-ÇŜšC…µfıÓ´'hôıšîóiú(ú˙Zó1íVĤéGvmMrfzö§g§@ZyüpƒİŻ7ĠĵMu|Z8‰†ü+ßë\ü³ÜŜ]–vydcߚèì|<Ğoö½VQioŒ…?yІŽ„JUċŭl‹u%=kew¨]‚7‘JíĴŝÉá‹gyçóïpbCÀúÖeçˆ"··6šLBŜ…˙‰Ğ“’I%˜ĵŒYs]RW­5ËŬYQÛskS×ïuHg1žNMĦxsRñ ÷—id瑺/Ô×ġÏêv+iĦè|@fÒÑ$ıa˙=’? ġÍwš”u[™g,#ĥ…3ış#ċ_ϟ 5ĝ·â.´*-~%˙“6żN eĜj”˘÷ÍĊ'÷´O`kbvü·™/sY—·rVÚ-£àrŜĉy#[$ĥ)7žĊħ$ŞxlvĠœ÷ñişĤ™hQ&şı¸Q°žƒı>ÂFŽs“ŒċĞûżDĵĊciaéûZÑÑ/6ôIy·˘á!#~·VŜW™eċÊAm,>§ükééÖ~#ñÔ3şµŽš͌tfêû sĝWÇö:íŬí­^iì†G–d y §İúúWżè÷77„,â²ıfĠµnuÏú˜2V5˙yŝf>Ċ}ëğ‹2yĈµÒiERƒù½Ûû’GÄpFbħq1WĉuĤżéäŻĞîÑêú÷‰£ı֗Oµ“m…ılë ĜT–+çÜĴŽĴcL|żŜ=‡\E”ħŬ\ş˘….ž@ŝñú˙*íâ‡M„Z Ë:Ÿ°'İ<°è=˙üż1ŒŞO’?ËüÏÒ(Ĉ4İé˙É5}VK{Ók+]çt²B{ĠĊëzĞC§=Ììİ>µ2$òʰ)ó.ĉ9b{w$×-(]wĊ&ßñ'ħ?3B;ŝ5Ġ–`)ÙԚ÷!ĝÄĠ“‹Tğ:o ùßÙCP½ù^Q˜cŝêúŸzĈñĥÒ-cƒ'úéŻ§Ò“SÖĵ)Œ2LDvè½qb¸òßڋĤ#ùÒDw]HCJ„nŸ\×§€ËŜ"Ĵħ^‹ÌU'iS‰&şiäùm`ù­M§ÌÑĈ·p3)ùm×§ò£R09ħl—sDÛï&Ċ!겎>ı­Û HġV şm‚îs٘vü+ÒĈòÒ\­ú˙]ÛT$ĉŻb “HßP¸9Ġ/3°7Tıópaµk{w&êàígïƒÖÖµżÔH‰Ĝ>XÇ ĴĞ9Q[‰ ,£=éá¨ÍEÎkWŭ/¸sièĥ.kqÙĝ|ÙÄq ´FY˜÷ fĵïCğŽ/ Ċ5ÁĤg“öÏ˙ZŻĝÊìÚxZâ33ÎÁd#ħ=Gà8ú×1ĦÚŬëšö‘˘éèdş½ž;+DےĊˆ\€=Í}˙äJ^ù”ĞòŠ×óüËx·Š–4N:¸Sv^rkôâÙ[ÂͤüÖĵi{Kï_3DÄçŭ,@X·ÂĴihŜ%ŭ |II½Žš µ–zd ?ç^ÇĞ‹?‡ß`Òl–6LÓÒĈÑQp%—sö˜“ĝ×-k¤Eá߁kfĝs.ùß³·Ìçó Wòögž<Ó4ĈcVĠçÉ(F×˙ÉTWÌúžÀĵ)k&µóoY?ĵùŸÇs5ŝŻu~ÎJÄäçûÌOùüĞÂzD)~ Eú÷˘”œÒWçgß=xp});ĠĞ{ygR4iž&’Ô4eĠï.,c¤)Œ^?OÒ/5rˆV!÷¤n­‹}"ËM€\jҐrĥèyü}*žĦŻMp† uօ8ĉ*^í§~ŸM­mdkyúV…ÛeKËÑÖFP}И½Ônġ ’óHÏè;  –lğ’Ğŭĉİ’4ڃ>ôéP„%Š]ÄäÚò òöòÜRŭÑO##t‡h޲Nʃ ]qRl†<÷ä֎› j:½ĈÛ;wuyħÀÍÄßĝŽÖŬŜ@Û5ġ$0hŜ"·„/ÌÍÁn+ĉĝ‡ˆ^\J”9§-ŽĴ6Ú]·dŽ +Âö{ßmĈ G~ ûzWİj÷:Éyœ…ŝSĝ’ĉŸ]Ïlí$O! ŸJÀ“ÉôŻ_†÷UZšÉ÷éäcRzÙli¤?~CêXŝ˜Ç¸.u‡ŞŝEóä‘ÓáV)µá×ÄŞIëÑ›kúħÒêšĊéM2 7\\ĥë§}>•ÛNĉ8„òÉ<ħˆÜÄöĊÛĈşMŠĞ7™İÜpÄ˙˙XWĦ&k`ú¸Ĝtàd †ş“ĝb_ö˜ñì2{Wupò¨Ġ8u{÷îÏÚċV4Ìú5ı›ŝ‰7)ñşğ`?4Ŭ cħ~ƒĜZáġ+˜ômÛFĥ ÜÍÌÌ wÍX›WşĠġŭSĈúîԞc‹H…‰Â"ŽÊ}+…’you nî\†˜ċ÷½JxxԚ‚‹9iİB.sĝċĝy|żà›~Àu%ËLA‡LOöş4ż‡o|z‘˙³`ò£ĉòÏÜß§ĊYZŭ”ĞċÙÄyóÜÖ9—;Ŭ˜ĵÌzúW°­¨ÄÎ1mĥÍÛ$6v€Ŭy9Ûô­½jél|;mĦĜeoşqùœ×.ĉqÍğ.Ÿw=Ğ^ŭ$³Ñ’è–Ôï×Ì ġŠߋuúzóŝ¤çQN[-~}‰UċV[³ nù‚  ĞEƒ§%>á˙kû߅!AojY†O°Ż;×ü@ó³éúcċ첎€z ÷òÌĥ6²§Mz‰yž{žá²ĵ3­Yú%ĵŸeŭhP×î_ñ…“dû£ˆ“$™=Ï úOöQŞxƒöıñ!ŽFÒ<3jM³ZvʧË?á_1é!4˙kš˜ËLÄ[DĜÁËg? ?•~~Î> >ŭ•tÉg€ĊĞk_ñ0şÉçkŬŻŭóƒ˙ŻGĊ îUüÎêYç{›éçċ¤ğ~'5ì?/£Ç÷ZM³7Ùì#ŽĊx>Züä}X“^/_Ĝœ —û,ĥYo5Dï/Öß#ùÄ,ïëıXÇáR;%ĝ/ĊŠÄ³’iQ ̈9,À mnĝfÌßxçNƒ_4;}“üĞï0Ô]J‘§­#ó|n%PĦ:Ò oî'Ġ`72ƒM‡’ğ zë^ç4_ĝk”B>×p…sÀùcSù~µċÓ[SĝàQ°V9ŬÙ³ÀÁĈk×<5!żñ†Ğaüİ|Ğm¤Q1úWb1_TËëÖÙÎO€ŻKëü=-Ġ8/ü ĤŸ’lö}^_/š]žï™#Ëzĉ2jĊĠËŬNĝÀÀ[½~™+*­ïoÁ¤e˜5…Ĉšéú…¤y¤ĵz˜p3],:ÄVšhŽÊŠb0òžXŭ=+œŽ7•ÂŞ’O@v6•lĊöĞ"éö]F˙ŝÀwLW²ċŭçġŝf”ï} w÷_(’gcÏzĵlíìFnYfŸŝyŻAġĞ÷z­ĵ›m*oC'ñ·ĝWßŭjó½cÄz–µxÒŬNî áA ~ŜÚĉòċb‚)&‘FkıħlôğEıÖ¤O3ŞÂ§?eG/Â`eí$Üê>ŻVŭ;/AÊĴêh´G1oß^@óE ˜ÔdħE`ÜĠœq]…˙ŠÈO#Omà^0£­`Ë­ÜÁ ĵ„|ĊZô¨ĠÄËYBÉí˙ÎJ+ݧhĥĉö1¨Ü¤;ú°yü}+ĥ—QÒ4K/.-™î§$ŭMxûÜJòo,sëšcË#ŭö-ġĴĞċ’Ż$êONU˘>x×nßLñĥŸİ-êGP1ŸÒĵ˘äçPœúÈOë^ŬñFÎ+K DŠd’ğĥ;¸ŝuâ×â$ĠĤHœ8–Œ÷ĊHq2pÍjC“ûìρàg™ *ëvJëô+"$&ş&˙ÂOEF'òĴˆLcÌE#çb2IöcIÔ—Ğ´ÑÄf”‚ħİïšĝìâ3݃­FšĵœZ_3ôžĞK ™aħ£&ŭ>†Òínġ­z=>ÉüĤE̳˙Ï´~ßíqĝšıñċ!ÓlĵĦŞ@ÓĈ>Û0ûÉìOĞV÷ûÒ~KĞ_œŜ]“4Ïèà}xw‹üKäê—Ï™5Ĉ-#ŝ­z(…~‘`Şcó§JŸ½ NÎÌû¨ëoEŬŸÑ<[œ “ŭb*ë^êžüг–—ġ}‘›Żk––:lz”Û,âp'*yr:ŝXiGQµ“Pfx™°!°ï\NŒÖCÄĥíİ+Ëm’Jƒ÷›n}·c>ÙŻD×ġ˜ôÍ1mmJŭ˘D;xá­~›àêa & 7RwĵĉÛġü´?8àìf Va˜I*4’´|ġQKÓ[yğžsŞÁ5·ˆ.b•ĵÉdżŻší´›ijîÒFbLEıw'^żZâ/îî/&·šċ@anˆ¤ nUA>üWs˘jV ´ÒŻm-¤òŝváŽX‘Ï^ŜFšËéĈ­?h“IÚŬßüĉĝJ½YĉġeFŻħrMk½Úi6µÙîuÚgeş×…ĊÔi>z˜ÛîvÓĝßKñŭ‰il4K/˜G ˙Xçï3cıÇày]߆´ˆ˘M*ŝK{ÇûáŽP~5…u˘ëš[˜a¸[¨G$ÂûŻÏ^E’?‡'NvÙŜ?ÑúŸúÍÄxX*°]ci/ÂÍ|ÏoĠ5ÛmNdŠÚhĊŒC ĦşŝŠÒ´¸’8ÀŻ%‚îú2X”žù5ÖÚḛ̈*Oi2*ŒeyŻ'µ0Q˘ù—Ş>ğ&œ>-ŝŭr?GŝZ}çVîÒžŠ0 ¨ÇLÖ|zµŒŞŞ²'¨qŠÑ„‹™Ub`Ä0kÁІĞMÚqhû>7Y^œÓôgCáûY.'Ġ5E'IħPó(8ó›ĝ"Ġä=Ğ>òüŜjwşÖ"*—ÜsÂ粏`;z V_ˆUäûúWó/ÒCˆĦŒÎi`İżvŠwíÌ÷ü’=ï ²ıÀOSâİoımŝdWXµÑ..[Š"ÇÜú~uä>#ÓÀÓ|;˘ĈK_êSµĠß°Îúšġ½ZU:Rفıç•r=‡5ĉVóGâžħĴIÍĥ›‚ tÈñÏç_”pŝ.8lÜ~)&˙%÷Ÿ­áĦ.~n‘˙†_‹9?ˆZ‚[ħ‰[mĤ“k…à?ŭB[ĵˆL}ÇôŻĜx )–'N„Vµ¤£˙nŭ§÷s3‡ˆsHċıUlC ]½^‹ñ>vñĦ&âKĞİsÉ+;ġ$’MbSäbÓ1'<Ó+ûj…(ÒĤĦ‘üIZĞİ7'Ô+²ĴƒO°Öu—À0[ùp“ŭ÷éŠjû_NşéŠU`2ù²9fĈ~ž•é`kŞ}£Ŭ'o^ŸĉxùĥXŞÁm&ŻŝŬŭëO™è^v°O‰uÓ·í(³ĥÉĈ^Nä2 öíMnciŒmÀ~sóOêMyo†lÌëᝠ'PğV^äáü?{ÁTCӟZxÏ3thÒGù]ŭ^żĞ_#ƒ$Á޵ê×Ï’÷WàŻó#š^„ô3}6ż0>ÈRrh4;švğÀ  ù4Ûäžʧ#räUŬGYĵÔî|ëıŜG=aĝV}(]Ĉ@I5›.kj5&•‹[‰9íRĊ·Ĵq!bO ÒM4Aj·ïċ+ ˆ˙‰Ş´—„HŞù(§€:ŸİĴ=Ż6˙€U­ıÚĝz XëXœBD@üÍKyâ$†ÜÚiQ-Ĵ=7óĈ²µ ı.´ˆ$šG2¨Û‚y"²l£IïÑ'.2yj†Ú~ò³½ştû‹sĥ‘*Êešà³n‘ĜŭI›NďşĠ$6½pßyż Ġ—SÑtX|½>ğıòĠĈp}Ğ’żĠïu ‹O+z +UR½eh.X÷{ü­ïİÔÜkÖUħĥÑ T8Jyc\}ŬŭĊÜÌòÈÌIîj—&—ZèĦƒ§KUĞî÷&Sr­Šİu¨YYÄZĉâ8ñÛ<ŝUÏÜß\›}ĈIXñĉşôü+§‡İ5t´ïJ SD,쨣İcŠoíQ€]×O>k‰Ġn,4˜~×âÍqV\elámÒ7ĥyŻñFŭá{_ éñéPcâa™Hġö5ŬC,_‡Uße÷ġù”omÙħñ‚F:–‡c2É_l˙‘^%ċŻÚn ³Ĉ}kĜ~06uA’ĤÔò>µ°˜9#=~ĠÄògWċù#ó:?͓¤²E3o47Ñ[w‹lÔÄn%’Pħö‰Ċe<Îñ,|$` ?Zëĵ'¨ĝ{GñĤ‘İ_hMöyƒÉûµĜl äó_#™'%YB<ÒċvKví˘ùŸ}•ÉKF5'ËevöJêïä}#ñ Y bÓÒD[İ"İğ€Ĉq_!K3ÜŜĵÓÈKıË1ĉğ_ˆ^..ñß[‰c°Š1ş7\w${šàëċ|?ᷕe‘öŞĠgïKɟ/Ìúß8™fùĴ½ŒŻFŸğÍ.ż7ĝ-^8ġ(^OġjàšżĞŜ CWyŸ)T*ÖEö2ÂBUĠgşM/™ñÌ* ,2ĝdÓ~ĞoÌ×Ô-ZßNÓY¤ŜZ#ßîóœ~µħáûğH­&†îÖ …'#q/N†ı½ÓKn3½Ààdtİ ’Ŝ‹ÊŠEY7ᗿN{Š­€•L/²›ğ½î´ësĉ£ŽöôedĴġè–ĥ=6âÇK§Û­Ìïzë™v׈žĜ<= @ŝvaŭ“Żŭ£.qƒéßyğ\ĞÌÒ2²ıçċkz~Ò½ĵUH`0“­o‚/˙‚|ZĞS‰Œdŝ'ùŸĦż4$Òô{+HTA3ޏŸÎP@ÇŸ VÚÏÂsŜÉĥ6 ²=Ŭĝäŝ|~êš}í½Ŝ‹öÈ%i Á>c| 9#}küzµħıjï[I/Vġ?­r 8XSîŻè‘SQ>^¤²FBĤˆ÷üë‡Ótk½/JƒOğSöЉŒ÷9=*³Ô†ħ˜Š³4q9#Ž?É­ê gàÍ^í¤+…òĦçŽ??ÒĵĜS­‡”0ík+}÷²üî{¸,cĞu›ħòwg:·Šu›¸(Î--sÎ ü³ù×Ê?uHñĵ-Ĵ­4{e´P§ ɌÈï˘kê=ƒá ŻÊ—f­*ÎùqòÏšüŭĠ5 5b{§ff–BìÍԒsšŝÊ_)ZorŠċ_âvżÜ—ŝL~KVtİP—Áë'Í/E˘_ŻÈÍ÷˘ŠQÔWôaüèMbKÔFÎßĵŝÀ ŸÒ’%iġ×ĝž@?3HĴnàà·Ëĝw­ Ê]CĊvV.é$3B’ĥğÏDÙô€ìL‘^ëD"· Ú4çÚ½ œœš§ÙE§hÖÖP€ħāF;úšĵ‘é_šgY‹ĈbSĤËÓúÔß/Â*’ê÷ŝĵ†Òô§Àĉ™^QÚ£ç8¤§Ĉx  };DğÔ_tj÷¤c€+^FÓ4q²-ċàÊċSì+ž‹PşŠĜÀ“:ÄG µ rÍŭMq΍IËß~ïeúš)$´Üħ=Ä·3´’ğ;ԚEeĉĈZĞ– :äÔLìO\ ŬAZŬlžIË·ÌsP'½ ŸjÎĵĠtí=Òn£ ŭÀr*֝6Ŭ˘ 6hrNzÓ]Ò4-#ިêIÀQñ¸H˜YÄ#Oùé)Çé\eĈħ¨ê²3y­$CïË+„‰>¤+Ô£•U’ĵŭÔhİ>§İ_ĝ£N³RO=ŭ§ç\´ž!Ö5i-:1 =ߢĞó[­kFħ”³Ŭ>µv:Gn1Ÿv=kžżñVĞxĤ(ŒvŬ£‡úšġ¨ċpŠ÷WÍ˙‘Ĵ)_cןZCùúĉ&­x96öü}Ĝ×)ĴüL½ž³ÑbF²<·;w˙^VÌîÛ¤vsêĈ…ŒÈûYÛÑEvÓËéŜò÷ŸŸùlmì˘µéîš[ĥšVg™ŽYĜ–cĝžjħyXüЏv­/²Û[¸ûuÜŜ¨§sŝB­qj£ŝ%ú\÷²v’n½%NVĜÍ×Ĥ´‹û‹Ŝ-ñ|ž)];ušZ-Ĵ[8lî5ÈĴNŜÂşÓʲĈÈ9”éH֗ƒ€ŠàtŠïĊĉuqIüOİçeÙ J4ié²üLa …ç-K°c§è+M’dÀ’ÚQß8Ĥn‡?8tïÈŻ?ÛOıëŭ^—ò™ĴqĜTuch¸Ô6ĞR~óvÉ£Üş)  GzôİÊÉ&ġ‡ŠiL7úÄ?…dÜeÔÚ1œVÂö˘šWĵ´{î˘f- F~´£#ĦÇқžzŒ})à rà~ž€)˜ä šş§LGBÂúċˆeb?ùށ8=APY”`€zŸJ™$ÑtĉÔż2ŭĊä ÄYĜg1ËŭIïô³Éùêw_.`ĞħÁŝ#O˜yvˇ@ċèÎqSMAj–ċĠs~ìžßqcL{xµ5{„ Ф#=аĝ­-Ä¸[ìŝt÷“,HĝrĠÀ­ÌѤbín:ŠèüD^3†ÏżÙ”È ó“\Ŭ(TÀU„•ÓOŝâk—ÎPÄÂQî~‡ZxÂËíoyyiv0¤1ÚĴğZiϧZêġïÜ]ĝ:ŜĈÌE§Ĝ0 ½²`ıÁ'ñ5ùŻ'ŒġÍh5%Œ·âi0ŬFïŻ mŝ"GİxĈÇGğUwÙn›q•_9íĊ9g^K éIĈöĵ›èĴ–ŝwğ?WËĝÁVSWĥÉ|ï·à}Ÿúáżĥl`—(íHU†< ŝµÑ|EF—úvŠ'žëˆ³Ë“˙믛´ß âCêŸ$öĥA`eiŸ˜ñßıŞz7Ĉk~Z–Şèş{ÚĜMŭ•÷!˘G^Ó=OZü{À#<}*ğ§gë­£múЈŭ+  4éMûîV^ÎköĦ×mĵ;àŭÀör)ż‘<ëÌÖíÚğ?ëzŸŒ(k:÷ˆ5¨'¸–vßpy€ħ ŝNĠĈàg‚kû7îŝÁÉ(áe.iüRkĴžŻäĥGóçç²ÍóJ¸—³v^‹aÁG”XžĝĊ&9ĊšPĜu'œv5öŸ4:N/p9Ż]ĝK˘rëZ•ŽÜyqgûçİAüëÈd`ò–PF{W×ÓcÑ~ivá1q$^tČ|ÏÏòÀü+Áâ [£„ċ[ËOó5iP@Ğ—8ôéĴċ¸è=)¤’Ù'&’ż=Hë *ğŬÛĈċZAŸAP> ŻÈ’J{`VŠœžÈj,ç½)ġëXí¨Ŭ¸ŭĠ“ûŞÒßêİi^ÎÑqĠÜ Ñaäû‘Ĉ>nž”…ĝÇŬçWž$H˜†Ö-Ĝ˙Ó6ÏÜxŽÍµÖż)ŭĠ%Ĝŝ5Ġ ĥoV˙3X“=jkË[t-=Ì1öœW3{ (IK8ŜêN€ôZò{Żĝ~"LPê:œž²°Oó5ÍŜĝÂúpRĈ ]:>Ÿğ]Íùœ×£‡Ê#{É7ĝÁ4+žĦ¨x“Pž&kğċÓ폧ükˆĵñ„{…ħɞf™·ÁGġÍyäÓOs1’ĉigsŬܚŒ öİaiÓVKî6TŸĦµs݉ĉß#MtĦáGà*…ĊíĊÈU–Fd_ş™á~‚ŞàžŠMJĥÓżA´VöEF”#„œu"£iĠÉ÷ĞĈÉPžu_İĠGȲÊ}q´~f­!Ê^f^ۗ?À úĠ¨í˜Ĉײ÷HP*VŒu ú–5vĠ~Ñp"‹Ì‘÷­W´iv2ö1“Ġ6R‚`Á†Á3ĵ'ġ­xaÔîÈHQöŸî ĝ×EžŸ§ÄĤXžîè˙\TSŜêr§—Ca`1šËÚ7ħÑì#ôòEIĵ/âHKetêĵ’‹ĵ~c"³Í†ıA†aŽ£+|)íú³ÜÇÄe{SbO]NA˙ĥPÑË˙|ĉĞÉ6 b+-TŒÑ×Ħ‹Ğ°ż4ÇİJpżöh ›Ét4JİċqƒlżeS“É`iŒÈrZàšġIîìĉ‡dštCsSK(´™ċ[(“ŽíMJR–¨\”á %ħçÓiPWЇİdÁaqš–ÛvÇeÏk׍Ħçezµ/Ü6|™Àüi˘>½ħĞŜMܳ\!˙rĦhĦ# #{+’$úžJ0_dĞċwïҐDOu1ƒŽ Ÿĝ!‚@8R~†·ŒÜä”aÙŭ˙‹rßòÖ!ġ&¤[2ÀĉâŬàGü*?*QüùSĵİBä‡QŝíSî…SìÉ˙³×ŜڏÄ˙…;ì1?Óm˙?áQG feó^P§İÛ[°iú[˙ŸvjÊU%ß7tŜÌ)#0³£ÓŞ6âÄĥIîs]Ÿöf˜OÊzФBš<·âC°d÷RÓ°V'Ĥr£İÇG éIJRFî3Šî<À­˙ë?ĜZÛ_*3L#"6TúÖ*†gAfÏ ËBéJœĠÓÜҕISš”wGuŞĝÂkïÁ§¤÷jiÌ· ž3\)'vrrzš•­î1óC(úİÎJş}r+  †4•“wûÍ+×İZI̍6‚7)o`qJÙğ.qWàÔµ[XZ;{ûÈcoĵ‰+?QŜœ–Z…ú´¨Qŭz/ô§$µfJnÉĠ,RùoÊ$‹ŬXuİĤ°ĵ·ĵˆŭAÍ$vâDɖ5Çfİ•H[qŞ3nÖ&´{fñ RË—j% Ñİè3Ҁ?t8ĦUKKĥP0xUÔWĞ!ħĥı sĥBÄÖğ?Ùُ—Á^¸n€Íĥ?ñêùìڒ׸ċo;– eï#żo‹:BÄJé—ÎŭeİIñv=żğÑžÛĤ˙ëW17ÄyŜ"‘xSƒĠ,?İ5Ê]ĝ†kğ‚ò[ÚĊ“È‚@?!^eĥâoûzç_°‡cş›âÙb`ÑĴ˘?í’Ġ›/ÄÏDmgn?ĜˆW50rÖo+µ.òŞ’Ŝ,’–Ä€gù×tpĜ)Sc£şñ§ˆïïuiÔtÄ/òĴ)/..d-s}päó—rjşKìşJÑHZ³öˆdAÇğûó;9ÈWDiB:F6-E"ÖàžY·QŠî8aÄò?>Jdà żÊǢŬJ£'ĠĞ]ŠŒ[ĜĈĈNNixÏa]2xVí†Zâ$ç`xP.<ÍB=¨çE*R]PñÑÜŝBYıĤMtÇA°Œ~òü7ûµPÛX$Ŝ\6²Ü°=sÁŞMâ̑?ĈŽ˙ İV+ıx- úé!Òġ €<Ğm×ĠëR?Ĉ€=ġèÏ÷"İsH¸Ñ“èré1ħŬ-ÚnïĈMiÁĦÀĝÚ.&÷ĈuñZéYĜùÎ?‰ù5xZj'iò N˜ĦĴp˙?CŽŝÌħ·aĉB™ô'&‘eğŠrşu²F¤cvŜkÑíü/n¸’êv8^2irí†ÚA Éüë?nşjn°Rµċîžs“ŬrÂ@\ V‚xzîĉ\ÍCŭĥÇí­èĊĴ"Ú,}óO‡AF”K{p×wâ“ÖX8½˙’ĠÄW̉,+Œ‡…ŝST˙án›‡=š½" ĝ˘ÔöÊ }ĴÙ ñ?+ÁnOÛ?"~Ğşg˙Ŝİœ,ŽżF§Ż†ġ8ž@3ŭúéšó[ŒsiüêÖµd\D§Ÿ=N–² ·ıÏż‡µ\}˘Lż\ŝğ§êş çwF`6“]Éñ%ĝ᠈}Ak\Ġno`&h£ Êâĥ êsĞœĜ¨áŭ›Qnç–lc.?‹8³Oħt·VûA‹V5„?iñ ¨\¨bX{WĤ‹ğa !²€…ğħu´QäċÔbï&ìcÁˆâ+,‰8?Ŝ59‡K‘yo>˜İċû$żòéjθ°·–,F mê+‚×èzÜÜğ2Çöw‡ß;˘d˙ġê´úFŽx·yuÉ~•@èìWŝ>M'ö3çŝ>8Ĥ£b]K­‘m|6ŽıWH½jü/|FbĠ ”z5ÑäŜ7OòçœÒN£Ëżt'ÔĠsÉu%S§.Ÿ‰…/†u$³G(ŻTWJ¸‰É1;z€ĠÙÂ?¨m>]‘{Ĉޝŝ3É ôj^]Ȗò˜(ÏÙñɧ]]7öTÑù{/÷k`éS %ă†Ğ]Ĝ¤Îö}¤¨éĈMÉ]™Î 0vG SK†D”ŽzÔa 펠֖İ$ÖğsóB¤×­){ÉcîĥeŽ•sOR?%CH9İœqÓŝ>•ÙìáԃÇݧÄ4ÄÊp1Ç&ì+7‚ ĈD§è)á î—?vàxwğ°>ÌĠ6<4ÊÀÍ.:c{Tó#^Iw_yÀħƒó  ĊHŸž—˜ċt˙–‘‰úµK&Ÿá/$™˙yİsÌ^ʧu÷˙À8-Ú@füĊ76ƒ§˜}2kİ—IÒC‡ŽPôËñF²›ċŽĠÛĜŝ5Ĥ†>ŭíct’W|,ÜrV’CóÙLÀv˙׃q -#CêO5Ħ„ÉÈòŽüâ³n=ΈF§ò˜"ÏBb4ۖú’ó¨\Ş„²·0ù֍ğÜF>ˆML4E+ÄÎÍŝĉ*⺚şU%öN­ë‘¸ËÇ3ìӖĉ)\ûµwİ ^ÈÄFcÇİĞ+áĞϞúsOÚı+ U퍵²óDƒŸğÔ֜_oa³N´09vÖZĝ~$ş sp p u­ÔƒO…0 œvĴ§Z=5:èá'×CÏâÒ5‹‰Uµ#˙žq Öŭ€q–Y8ûÒĥä+ —Pµ·bHËc@Ĵ÷žàníàSÙy5“œŸCĦS·,½˘~öXӏş‚uÁ[;RäÔ6úiş˜šòĉCÚ4äŝBĥżáşòOgxı˙žÎWô&°zPĝä9VQ]ÌĊ‘W÷P†%ŝ⸧q£[Ŭ~ùŭB5Áát›{d÷nMtz„êQ@ħ'¤qàW-lÓ ŜRĉúíŝ$ß͜7ö›şŝĉÊċŭ2Šîµ&,6 5î áéâM^Gı›@²•œ4Mi°?³b9éÒ½!h“ïMŒwBjÂ]¤xógl{Ĉj½³KŬFV„ĉÙĉGÚáùĊ÷…(ö²G:L]ĵ?Ĉ½Uµ)H`|ƒT%Ô4İ$9½ÛŸLñDkÔ}¸ĉüQçŽjŬ[Kˆ÷…8hZ‚}ëe˙ĥ‹ŝ5Ŭyú>òŭŽ~´ ´ü×AıŞöÓíĝ´›ñG4İQ2[İöeû4 `$`ÛۭχS‘,_ˆ&§]GALm{pqŭŝ{i(–›ûkï8orx³@*ġĵ:Œ`íĥœäç Wn5m+YE?áNŝÔÓ:}¨ĜNĵż”qÂQNüç.Ğ­œĥ|{VBëy˙`?[ÇWÒÀǟ!Çû&НkM }(ü gí$ŝÉŻ%%˙/?8Ĥ³‘#AŠamH%>§p­1ĴĜ8ùe˜Ÿ÷M8_Z·FœÀĝQÌ×ÙìúOñFRŬjħ†Ċ›râİϨêc‡· èG5Ñ}˘,|‹rçFÂĤ‚ċ™“Mžeĥ°ŝ•2Ş£Ż)IY|‘Ê@šċŜL6r?¸N?Zµŭ‹â)ÛĠ×Ĝ+²ğîôĞ‚çü+sIµÔn.VXô]@F,AÇòŻ?˜Îšĉ²_×İĉVœĥNçmàŬfRıĥçĝKó]ĊŸŭrÒ\ϧéFvŝµéö:½Ž•QĤżÛûòàÖ=ŝİyİ\–šF#²Ž‚vĤu­+(˙ċİÍ*ĥßs=OÄş}İĥÓbÓ­@àĵqOYĝñ]ċÁ,ñË+üĉğ­3Ağż}û|¨GŜ‘¸½%ö‘ĦBc²Eĵĵd<€}Іy„!.ZtÔéù²y-dq6ŝñ4V‚ĉöKKx‡Mì?…I.ıŻÚÄÖÚsÛB ŞŒŸĈ§½Ôo5’ÓÈϞŠ{O.Ǜ& ·zGàRD—6!GÒÚÁ;½˘ŽES_ĵ¸%¤Ye'İĉş´MFÒŬn5)íà\ePŭĉü+~]KNÒ"0éħ‰îq†‡O b{‹›ë˘ó;Êì{œĞĠĞ´TcéŻü[Qóc#²ÚĦîO”żŬîjVıXÓeş—ıMV°÷ġ‘›˘ÌÌÇÓqƒóu˘ŠßÈLxÉp*ċ…Íäâ;x™ÉïŽ(˘ıħ5:nH¨+ğ/Ÿ¤'ïĜ^^ŝĴ‘ıïX7wóŬ͙ }ĠöQS„‚”ċĞc›³²+GË ˜öÛèŝı¸ 5î`‡û¸äÑEpĉĝʔb”4ı)=N²{Í–^\a|>êòÇêkĠ´QXNMÉD¤´ı\#Ŭ\à(v=+ĉïŒÏ_,ìády#³\$ñEïÖı‡/D™€ò¸­˜ĵ‡’-ä|ë_zxÂúD_ô-KUŠŬ•ì‘ö²ŽxŭMVÜsˆİ4£ZòĥžƒÀèÛĞÉ£Nĉ &ÎÁûâ1¸Ö-‰ŬÀH­·1ôíEó>Öxz/’OO6uĴEW/‰šïĦĝvÂĜ‹kğ²?ĠÇ! §Ü÷jOÓävÛn¨ıû ÑEi„ŻYǙÍĥüʞ.ĥÜÌÏ}J‘{4sïJžÒ$l 'Û4Q]SĊÖKI?ĵÚI½Y<žÑ˘½”hÇĥyާÂú.ú"QEE,myE77÷‰ÎIîeĜŸ›ŞmŞ=wz'Í:ö@ͧ°g—eŝTQ\™kŠ…Ĉf´ääìÙÜOáé@Üiv…€à–jáu; àşXéĥħö dš(Ĥugi)ĥüÛ*µI'drmá{&f2½–Ÿ„ô”xšCŝÓQE{Ï^ß1u%Üşş˜‡ä·Ĝ=Nš—e*>´QYKWù˜ıŬÉ˙³,£â0ùġÍ_Ó´YnoÚÇ$Œ{ž”Q\˜ĴMJt\“Ô¸ûÍ&z§‡´ŭ*×ízĴˆÌ9ÚOŝµ‘ĞxžIÔÛX/ÙíÇpOĝQExùl&ġjê×Ŭ÷UòhŽrŜÚâöè$Hò;ĠĠEĤéş4+>İ"ÍqŒˆçó˘ŠèĊUœĞŞ7²}‰„W/1—İx‚êġ<˜1oj8§›g§]_\„Š7rzœQEtVK Eû5bWĵġ:gèp‡uşğĈD*xZÁÔuğğöÙğÊ·v5à (Ĵ°•H޳ĠżëAÍÛDG§ézŒàEÛŬA^ĝvÒÁäQ<ŝĴ8J(Ż6ĈĠöŽšvFôiĞ\˙Ù././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/demos/quad2quad.c0000664000175000017500000020617714712446423016275 0ustar00mattst88mattst88#include #include #include /* This code is basically the output of Maxima translated into C. * * See http://maxima.sourceforge.net/ */ static void quad_to_quad (double x0, double y0, double x1, double y1, double x2, double y2, double x3, double y3, double px0, double py0, double px1, double py1, double px2, double py2, double px3, double py3, struct pixman_f_transform *trans) { double t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14, t15, t16, t17, t18, t19, t20, t21, t22, t23, t24, t25, t26, t27, t28, t29, t30, t31, t32, t33, t34, t35, t36, t37, t38, t39, t40, t41, t42, t43, t44, t45, t46, t47, t48, t49, t50, t51, t52, t53, t54, t55, t56, t57, t58, t59, t60, t61, t62, t63, t64, t65, t66, t67, t68, t69, t70, t71, t72, t73, t74, t75, t76, t77, t78, t79, t80, t81, t82, t83, t84, t85, t86, t87, t88, t89, t90, t91, t92, t93, t94, t95, t96, t97, t98, t99, t100, t101, t102, t103, t104, t105, t106, t107, t108, t109, t110, t111, t112, t113, t114, t115, t116, t117, t118, t119, t120, t121, t122, t123, t124, t125, t126, t127, t128, t129, t130, t131, t132, t133, t134, t135, t136, t137, t138, t139, t140, t141, t142, t143, t144, t145, t146, t147, t148, t149, t150, t151, t152, t153, t154, t155, t156, t157, t158, t159, t160, t161, t162, t163, t164, t165, t166, t167, t168, t169, t170, t171, t172, t173, t174, t175, t176, t177, t178, t179, t180, t181, t182, t183, t184, t185, t186, t187, t188, t189, t190, t191, t192, t193, t194, t195, t196, t197, t198, t199, t200, t201, t202, t203, t204, t205, t206, t207, t208, t209, t210, t211, t212, t213, t214, t215, t216, t217, t218, t219, t220, t221, t222, t223, t224, t225, t226, t227, t228, t229, t230, t231, t232, t233, t234, t235, t236, t237, t238, t239, t240, t241, t242, t243, t244, t245, t246, t247, t248, t249, t250, t251, t252, t253, t254, t255, t256, t257, t258, t259, t260, t261, t262, t263, t264, t265, t266, t267, t268, t269, t270, t271, t272, t273, t274, t275, t276, t277, t278, t279, t280, t281, t282, t283, t284, t285, t286, t287, t288, t289, t290, t291, t292, t293, t294, t295, t296, t297, t298, t299, t300, t301, t302, t303, t304, t305, t306, t307, t308, t309, t310, t311, t312, t313, t314, t315, t316, t317, t318, t319, t320, t321, t322, t323, t324, t325, t326, t327, t328, t329, t330, t331, t332, t333, t334, t335, t336, t337, t338, t339, t340, t341, t342, t343, t344, t345, t346, t347, t348, t349, t350, t351, t352, t353, t354, t355, t356, t357, t358, t359, t360, t361, t362, t363, t364, t365, t366, t367, t368, t369, t370, t371, t372, t373, t374, t375, t376, t377, t378, t379, t380, t381, t382, t383, t384, t385, t386, t387, t388, t389, t390, t391, t392, t393, t394, t395, t396, t397, t398, t399, t400, t401, t402, t403, t404, t405, t406, t407, t408, t409, t410, t411, t412, t413, t414, t415, t416, t417, t418, t419, t420, t421, t422, t423, t424, t425, t426, t427, t428, t429, t430, t431, t432, t433, t434, t435, t436, t437, t438, t439, t440, t441, t442, t443, t444, t445, t446, t447, t448, t449, t450, t451, t452, t453, t454, t455, t456, t457, t458, t459, t460, t461, t462, t463, t464, t465, t466, t467, t468, t469, t470, t471, t472, t473, t474, t475, t476, t477, t478, t479, t480, t481, t482, t483, t484, t485, t486, t487, t488, t489, t490, t491, t492, t493, t494, t495, t496, t497, t498, t499, t500, t501, t502, t503, t504, t505, t506, t507, t508, t509, t510, t511, t512, t513, t514, t515, t516, t517, t518, t519, t520, t521, t522, t523, t524, t525, t526, t527, t528, t529, t530, t531, t532, t533, t534, t535, t536, t537, t538, t539, t540, t541, t542, t543, t544, t545, t546, t547, t548, t549, t550, t551, t552, t553, t554, t555, t556, t557, t558, t559, t560, t561, t562, t563, t564, t565, t566, t567, t568, t569, t570, t571, t572, t573, t574, t575, t576, t577, t578, t579, t580, t581, t582, t583, t584, t585, t586, t587, t588, t589, t590, t591, t592, t593, t594, t595, t596, t597, t598, t599, t600, t601, t602, t603, t604, t605, t606, t607, t608, t609, t610, t611, t612, t613, t614, t615, t616, t617, t618, t619, t620, t621, t622, t623, t624, t625, t626, t627, t628, t629, t630, t631, t632, t633, t634, t635, t636, t637, t638, t639, t640, t641, t642, t643, t644, t645, t646, t647, t648, t649, t650, t651, t652, t653, t654, t655, t656, t657, t658, t659, t660, t661, t662, t663, t664, t665, t666, t667, t668, t669, t670, t671, t672, t673, t674, t675, t676, t677, t678, t679, t680, t681, t682, t683, t684, t685, t686, t687, t688, t689, t690, t691, t692, t693, t694, t695, t696, t697, t698, t699, t700, t701, t702, t703, t704, t705, t706, t707, t708, t709, t710, t711, t712, t713, t714, t715, t716, t717, t718, t719, t720, t721, t722, t723, t724, t725, t726, t727, t728, t729, t730, t731, t732, t733, t734, t735, t736, t737, t738, t739, t740, t741, t742, t743, t744, t745, t746, t747, t748, t749, t750, t751, t752, t753, t754, t755, t756, t757, t758, t759, t760, t761, t762, t763, t764, t765, t766, t767, t768, t769, t770, t771, t772, t773, t774, t775, t776, t777, t778, t779, t780, t781, t782, t783, t784, t785, t786, t787, t788, t789, t790, t791, t792, t793, t794, t795, t796, t797, t798, t799, t800, t801, t802, t803, t804, t805, t806, t807, t808, t809, t810, t811, t812, t813, t814, t815, t816, t817, t818, t819, t820, t821, t822, t823, t824, t825, t826, t827, t828, t829, t830, t831, t832, t833, t834, t835, t836, t837, t838, t839, t840, t841, t842, t843, t844, t845, t846, t847, t848, t849, t850, t851, t852, t853, t854, t855, t856, t857, t858, t859, t860, t861, t862, t863, t864, t865, t866, t867, t868, t869, t870, t871, t872, t873, t874, t875, t876, t877, t878, t879, t880, t881, t882, t883, t884, t885, t886, t887, t888, t889, t890, t891, t892, t893, t894, t895, t896, t897, t898, t899, t900, t901, t902, t903, t904, t905, t906, t907, t908, t909, t910, t911, t912, t913, t914, t915, t916, t917, t918, t919, t920, t921, t922, t923, t924, t925, t926, t927, t928, t929, t930, t931, t932, t933, t934, t935, t936, t937, t938, t939, t940, t941, t942, t943, t944, t945, t946, t947, t948, t949, t950, t951, t952, t953, t954, t955, t956, t957, t958, t959, t960, t961, t962, t963, t964, t965, t966, t967, t968, t969, t970, t971, t972, t973, t974, t975, t976, t977, t978, t979, t980, t981, t982, t983, t984, t985, t986, t987, t988, t989, t990, t991, t992, t993, t994, t995, t996, t997, t998, t999, t1000, t1001, t1002, t1003, t1004, t1005, t1006, t1007, t1008, t1009, t1010, t1011, t1012, t1013, t1014, t1015, t1016, t1017, t1018, t1019, t1020, t1021, t1022, t1023, t1024, t1025, t1026, t1027, t1028, t1029, t1030, t1031, t1032, t1033, t1034, t1035, t1036, t1037, t1038, t1039, t1040, t1041, t1042, t1043, t1044, t1045, t1046, t1047, t1048, t1049, t1050, t1051, t1052, t1053, t1054, t1055, t1056, t1057, t1058, t1059, t1060, t1061, t1062, t1063, t1064, t1065, t1066, t1067, t1068, t1069, t1070, t1071, t1072, t1073; t1 = y1 * y1; t2 = x3 * x3; t3 = px2 * px3 * t2; t4 = (t3 - px2 * px3 * x2 * x3) * y2; t5 = x2 * x2; t6 = px2 * px3 * t5 * y3; t7 = - px2 * px3 * x2 * x3 * y3; t8 = py1 * (t7 + t6 + t4); t9 = px3 * py2 * x2 * x3; t10 = - px3 * py2 * t2; t11 = (t10 + t9) * y2; t12 = - px2 * py3 * t5 * y3; t13 = px2 * py3 * x2 * x3 * y3; t14 = y0 * y0; t15 = - px3 * py2; t16 = px2 * py3; t17 = t16 + t15; t18 = t17 * x2; t19 = px3 * py2 * x3; t20 = - px2 * py3 * x3; t21 = t20 + t19 + t18; t22 = px2 * px3 * t5; t23 = - 2 * px2 * px3 * x2 * x3; t24 = py1 * (t3 + t23 + t22); t25 = - px2 * py3 * t5; t26 = px2 * py3 * x3; t27 = x2 * (t26 + t19); t28 = t10 + t27 + t25; t29 = x1 * x1; t30 = px3 * py2; t31 = - px2 * py3; t32 = t31 + t30; t33 = t32 * y2; t34 = - px3 * py2 * y3; t35 = px2 * py3 * y3; t36 = t35 + t34 + t33; t37 = - px2 * px3 * t2; t38 = (t37 + px2 * px3 * x2 * x3) * y2; t39 = - px2 * px3 * t5 * y3; t40 = px2 * px3 * x2 * x3 * y3; t41 = py1 * (t40 + t39 + t38); t42 = - px2 * py3 * x2 * x3; t43 = px3 * py2 * t2; t44 = (t43 + t42) * y2; t45 = px2 * py3 * t5 * y3; t46 = - px3 * py2 * x2 * x3 * y3; t47 = (px2 * px3 * x3 - px2 * px3 * x2) * y2; t48 = px2 * px3 * x2 * y3; t49 = - px2 * px3 * x3 * y3; t50 = py1 * (t49 + t48 + t47); t51 = px2 * py3 * x2; t52 = - 2 * px3 * py2 * x3; t53 = (t26 + t52 + t51) * y2; t54 = px3 * py2 * x3 * y3; t55 = px3 * py2 * y3; t56 = - 2 * px2 * py3 * y3; t57 = t56 + t55; t58 = x2 * t57; t59 = - px2 * px3 * t5; t60 = 2 * px2 * px3 * x2 * x3; t61 = - px2; t62 = px3 + t61; t63 = t62 * x2; t64 = px2 * x3; t65 = - px3 * x3; t66 = t65 + t64 + t63; t67 = px2 * t5; t68 = - px2 * x3; t69 = x2 * (t65 + t68); t70 = px3 * t2; t71 = t70 + t69 + t67; t72 = - px3; t73 = t72 + px2; t74 = - px2 * y3; t75 = px3 * y3; t76 = t75 + t74 + t73 * y2; t77 = px2 * x2 * x3; t78 = - px3 * t2; t79 = - px2 * t5 * y3; t80 = px3 * x2 * x3 * y3; t81 = t80 + t79 + (t78 + t77) * y2; t82 = (px2 * px3 * x2 - px2 * px3 * x3) * y2; t83 = - px2 * px3 * x2 * y3; t84 = px2 * px3 * x3 * y3; t85 = - px2 * x2; t86 = 2 * px3 * x3; t87 = - px3 * x3 * y3; t88 = 2 * px2 * y3; t89 = - px3 * y3; t90 = t89 + t88; t91 = x2 * t90; t92 = t91 + t87 + (t86 + t68 + t85) * y2; t93 = px2 * py3 * t5; t94 = - px3 * py2 * x3; t95 = x2 * (t20 + t94); t96 = t32 * x2; t97 = t73 * x2; t98 = px3 * x3; t99 = t98 + t68 + t97; t100 = py1 * t99; t101 = - px2 * t5; t102 = x2 * (t98 + t64); t103 = t78 + t102 + t101; t104 = py1 * t103; t105 = - py2; t106 = py3 + t105; t107 = py2 * y3; t108 = - py3 * y3; t109 = t108 + t107 + t106 * y2; t110 = - px3 * x2 * x3; t111 = px2 * t5 * y3; t112 = - px2 * x2 * x3 * y3; t113 = t112 + t111 + (t70 + t110) * y2; t114 = - py2 * x3; t115 = py3 * x3; t116 = t115 + t114; t117 = py2 * x3 * y3; t118 = - py3 * x3 * y3; t119 = t118 + t117; t120 = x2 * t119; t121 = px1 * (t120 + x2 * t116 * y2); t122 = - px3 * py2 * x2; t123 = (t19 + t122) * y2; t124 = px2 * py3 * x2 * y3; t125 = - px2 * py3 * x3 * y3; t126 = px3 * x2; t127 = - px2 * x2 * y3; t128 = px2 * x3 * y3; t129 = t128 + t127 + (t65 + t126) * y2; t130 = - py3; t131 = t130 + py2; t132 = t131 * x2; t133 = py2 * x3; t134 = - py3 * x3; t135 = - py2 * x3 * y3; t136 = py3 * x3 * y3; t137 = - py2 * y3; t138 = py3 * y3; t139 = t138 + t137; t140 = x2 * t139; t141 = px1 * (t140 + t136 + t135 + (t134 + t133 + t132) * y2); t142 = y2 * y2; t143 = - px3 * py2 * x3 * y3; t144 = px2 * py3 * x3 * y3; t145 = t144 + t143; t146 = t142 * t145; t147 = y3 * y3; t148 = px3 * py2 * t147; t149 = - px2 * py3 * t147; t150 = t149 + t148; t151 = x2 * y2 * t150; t152 = t151 + t146; t153 = - px2 * py3 * y3; t154 = t153 + t55; t155 = t142 * t154; t156 = - px3 * py2 * t147; t157 = px2 * py3 * t147; t158 = t157 + t156; t159 = y2 * t158; t160 = t159 + t155; t161 = x0 * x0; t162 = py1 * t76; t163 = px1 * t109; t164 = px2 * y3; t165 = t89 + t164; t166 = - px2 * t147; t167 = px3 * t147; t168 = t167 + t166; t169 = y2 * t168 + t142 * t165; t170 = py1 * t169; t171 = py2 * t147; t172 = - py3 * t147; t173 = t172 + t171; t174 = y2 * t173 + t142 * t139; t175 = px1 * t174; t176 = t17 * t142; t177 = px2 * t147; t178 = - px3 * t147; t179 = t178 + t177 + t62 * t142; t180 = - py2 * t147; t181 = py3 * t147; t182 = t181 + t180 + t131 * t142; t183 = y1 * (px1 * t182 + py1 * t179 + t149 + t148 + t176) + t175 + t170 + t159 + t1 * (t163 + t162 + t35 + t34 + t33) + t155; t184 = - px2 * px3 * t2 * t142; t185 = 2 * px2 * px3 * x2 * x3 * y2 * y3; t186 = - px2 * px3 * t5 * t147; t187 = py1 * (t186 + t185 + t184); t188 = px3 * py2 * t2 * t142; t189 = x2 * y2 * (t125 + t143); t190 = px2 * py3 * t5 * t147; t191 = t190 + t189 + t188; t192 = px2 * px3 * x3 * t142; t193 = y2 * (t49 + t83); t194 = px2 * px3 * x2 * t147; t195 = py1 * (t194 + t193 + t192); t196 = - px3 * py2 * x3 * t142; t197 = 2 * px3 * py2 * x3 * y3; t198 = 2 * px2 * py3 * y3; t199 = t198 + t34; t200 = x2 * t199; t201 = y2 * (t200 + t125 + t197); t202 = - px2 * py3 * x2 * t147; t203 = - px2 * x3 * y3; t204 = px3 * x3 * y3; t205 = t204 + t203; t206 = t142 * t205; t207 = t178 + t177; t208 = x2 * y2 * t207; t209 = t208 + t206; t210 = px2 * px3 * t2 * t142; t211 = - 2 * px2 * px3 * x2 * x3 * y2 * y3; t212 = px2 * px3 * t5 * t147; t213 = - px3 * t2 * t142; t214 = x2 * y2 * (t204 + t128); t215 = - px2 * t5 * t147; t216 = t215 + t214 + t213; t217 = - px2 * px3 * x3 * t142; t218 = y2 * (t84 + t48); t219 = - px2 * px3 * x2 * t147; t220 = px3 * x3 * t142; t221 = - 2 * px3 * x3 * y3; t222 = - 2 * px2 * y3; t223 = t75 + t222; t224 = x2 * t223; t225 = y2 * (t224 + t221 + t128); t226 = px2 * x2 * t147; t227 = t226 + t225 + t220; t228 = t125 + t54; t229 = t142 * t228; t230 = x2 * y2 * t158; t231 = t87 + t128; t232 = t142 * t231; t233 = x2 * y2 * t168; t234 = t233 + t232; t235 = py1 * t234; t236 = - px3 * py2 * t2 * t142; t237 = x2 * y2 * (t144 + t54); t238 = - px2 * py3 * t5 * t147; t239 = px3 * t2 * t142; t240 = x2 * y2 * (t87 + t203); t241 = px2 * t5 * t147; t242 = t241 + t240 + t239; t243 = py1 * t242; t244 = px2 * py3 * x3 * t142; t245 = - px2 * py3 * x2 * y3; t246 = y2 * (t143 + t245); t247 = px3 * py2 * x2 * t147; t248 = - px2 * x3 * t142; t249 = px2 * x2 * y3; t250 = y2 * (t204 + t249); t251 = - px3 * x2 * t147; t252 = t251 + t250 + t248; t253 = t134 + t133; t254 = t253 * t142; t255 = t108 + t107; t256 = x2 * t255; t257 = t256 + t136 + t135; t258 = y2 * t257; t259 = t181 + t180; t260 = x2 * t259; t261 = px1 * (t260 + t258 + t254); t262 = py1 * (t37 + t60 + t59); t263 = t43 + t95 + t93; t264 = px1 * t263; t265 = t26 + t94; t266 = x2 * t265 * y2; t267 = x2 * t228; t268 = t267 + t266; t269 = py1 * (t84 + t83 + t82); t270 = - 2 * px2 * py3; t271 = (t26 + (t270 + t30) * x2) * y2; t272 = px3 * py2 * x2 * y3; t273 = - 2 * px3 * py2 * x3 * y3; t274 = t149 + t148 + t176; t275 = py1 * (t212 + t211 + t210); t276 = t238 + t237 + t236; t277 = px1 * t276; t278 = py1 * (t219 + t218 + t217); t279 = 2 * px3 * py2 * x3; t280 = t20 + t279; t281 = t280 * t142; t282 = - px3 * py2 * x2 * y3; t283 = y2 * (t125 + t282); t284 = 2 * px2 * py3 * t147; t285 = x2 * (t284 + t156); t286 = px1 * t103; t287 = t98 + t68; t288 = x2 * t287 * y2; t289 = x2 * t231; t290 = t289 + t288; t291 = 2 * px2; t292 = - px3 * x2 * y3; t293 = 2 * px3 * x3 * y3; t294 = t293 + t203 + t292 + (t68 + (t72 + t291) * x2) * y2; t295 = px1 * t242; t296 = - 2 * px3 * x3; t297 = t296 + t64; t298 = px3 * x2 * y3; t299 = y2 * (t128 + t298); t300 = - 2 * px2 * t147; t301 = x2 * (t167 + t300) + t299 + t297 * t142; t302 = py1 * t71; t303 = py1 * t290; t304 = 2 * py2 * x3; t305 = - 2 * py3 * x3; t306 = - 2 * py2 * x3 * y3; t307 = 2 * py3 * x3 * y3; t308 = t307 + t306; t309 = - 2 * px2 * py3 * x3; t310 = (t309 + t19 + t51) * y2; t311 = - 2 * px3 * py2 * y3; t312 = t35 + t311; t313 = x2 * t312; t314 = 2 * px2 * x3; t315 = 2 * px3 * y3; t316 = t315 + t74; t317 = x2 * t316; t318 = t317 + t87 + (t65 + t314 + t85) * y2; t319 = t106 * x2; t320 = px1 * (t256 + t118 + t117 + (t115 + t114 + t319) * y2); t321 = py1 * t216; t322 = 2 * px2 * py3 * x3 * y3; t323 = 2 * px3 * py2 * y3; t324 = t153 + t323; t325 = x2 * t324; t326 = y2 * (t325 + t322 + t143); t327 = - 2 * px2 * x3 * y3; t328 = - 2 * px3 * y3; t329 = t328 + t164; t330 = x2 * t329; t331 = y2 * (t330 + t204 + t327); t332 = t226 + t331 + t220; t333 = t116 * t142; t334 = t140 + t118 + t117; t335 = y2 * t334; t336 = x2 * t173; t337 = px1 * (t336 + t335 + t333); t338 = t26 + t94 + t96; t339 = t17 * y2; t340 = t153 + t55 + t339; t341 = px2 * px3 * t142; t342 = - 2 * px2 * px3 * y2 * y3; t343 = px2 * px3 * t147; t344 = py1 * (t343 + t342 + t341); t345 = - px2 * py3 * t142; t346 = y2 * (t35 + t55); t347 = t156 + t346 + t345; t348 = px1 * t347 + t344; t349 = t89 + t164 + t62 * y2; t350 = - px2 * px3 * t142; t351 = 2 * px2 * px3 * y2 * y3; t352 = - px2 * px3 * t147; t353 = px2 * t142; t354 = y2 * (t89 + t74); t355 = t167 + t354 + t353; t356 = px1 * t355 + t352 + t351 + t350; t357 = py1 * t66; t358 = py1 * t349; t359 = 2 * py2; t360 = - 2 * py3; t361 = - 2 * py2 * y3; t362 = 2 * py3 * y3; t363 = px3 * py2 * t142; t364 = y2 * (t153 + t34); t365 = - px3 * t142; t366 = y2 * (t75 + t164); t367 = t166 + t366 + t365; t368 = py1 * t367; t369 = px1 * (t172 + t171 + t106 * t142); t370 = t35 + t34; t371 = t142 * t370; t372 = y2 * t150; t373 = t372 + t371; t374 = t230 + t229; t375 = py1 * (t352 + t351 + t350); t376 = t157 + t364 + t363; t377 = px1 * t376 + t375; t378 = t75 + t74; t379 = y2 * t207 + t142 * t378; t380 = px1 * t367 + t343 + t342 + t341; t381 = py1 * t209; t382 = py1 * t355; t383 = py1 * t379; t384 = 2 * py2 * y3; t385 = - 2 * py3 * y3; t386 = t385 + t384; t387 = - 2 * py2 * t147; t388 = 2 * py3 * t147; t389 = px2 * py3 * t2; t390 = t389 + t10; t391 = x2 * t390 * y2; t392 = t5 * t228; t393 = - px2 * t2; t394 = t70 + t393; t395 = x2 * t394 * y2; t396 = t5 * t231; t397 = t396 + t395; t398 = py1 * t397; t399 = py2 * t2; t400 = - py3 * t2; t401 = t400 + t399; t402 = x2 * t401 * y2; t403 = t136 + t135; t404 = t5 * t403; t405 = t404 + t402; t406 = px1 * t405; t407 = t1 * (t406 + t398 + t392 + t391); t408 = t65 + t64; t409 = t5 * t408; t410 = x2 * t394; t411 = t410 + t409; t412 = py1 * t411; t413 = t5 * t116; t414 = x2 * t401; t415 = t414 + t413; t416 = px1 * t415; t417 = py2 * t5; t418 = x2 * (t134 + t114); t419 = py3 * t2; t420 = t419 + t418 + t417; t421 = px1 * t420; t422 = t265 * y2; t423 = x2 * t154; t424 = px2 * x2; t425 = (t68 + t424) * y2; t426 = - py2 * x2; t427 = (t133 + t426) * y2; t428 = py3 * x2 * y3; t429 = t20 + t19; t430 = x2 * t429; t431 = - px2 * py3 * t2; t432 = (t431 + t43 + t430) * y2; t433 = t5 * t370; t434 = x2 * t145; t435 = - px2 * x2 * x3; t436 = px2 * t2; t437 = (t436 + t435) * y2; t438 = px3 * t5 * y3; t439 = - px3 * x2 * x3 * y3; t440 = py2 * x2 * x3; t441 = - py2 * t2; t442 = (t441 + t440) * y2; t443 = - py3 * t5 * y3; t444 = py3 * x2 * x3 * y3; t445 = t5 * t287; t446 = t78 + t436; t447 = x2 * t446; t448 = - t2; t449 = t448 + 2 * x2 * x3 - t5; t450 = px1 * t449; t451 = (t98 + t85) * y2; t452 = - x2 * y3; t453 = x3 * y3; t454 = t453 + t452 + (x2 - x3) * y2; t455 = px1 * t454; t456 = t65 + t314; t457 = x2 * t456; t458 = (t78 + t457) * y2; t459 = x2 * (t293 + t203); t460 = - x2 * x3 * y3 + t5 * y3 + (t2 - x2 * x3) * y2; t461 = px1 * t460; t462 = t5 * t253; t463 = t419 + t441; t464 = x2 * t463; t465 = - py2 * t5; t466 = x2 * (t115 + t133); t467 = t2 - 2 * x2 * x3 + t5; t468 = py1 * t467; t469 = py2 * x2; t470 = (t134 + t469) * y2; t471 = - py2 * x2 * y3; t472 = x2 * y3; t473 = - x3 * y3; t474 = t473 + t472 + (x3 - x2) * y2; t475 = py1 * t474; t476 = - 2 * py2 * x3; t477 = t115 + t476; t478 = x2 * t477; t479 = (t419 + t478) * y2; t480 = py2 * t5 * y3; t481 = - 2 * py3 * x3 * y3; t482 = x2 * (t481 + t117); t483 = x2 * x3 * y3 - t5 * y3 + (t448 + x2 * x3) * y2; t484 = py1 * t483; t485 = t431 + t43; t486 = t485 * t142; t487 = t5 * t158; t488 = t446 * t142; t489 = t5 * t168; t490 = t489 + t488; t491 = py1 * t490; t492 = t463 * t142; t493 = t5 * t173; t494 = t493 + t492; t495 = px1 * t494; t496 = x1 * y1 * (t495 + t491 + t487 + t486); t497 = t142 * t119; t498 = x2 * y2 * t259; t499 = t498 + t497; t500 = px1 * t499; t501 = t29 * (t500 + t381 + t151 + t146); t502 = t429 * t142; t503 = x2 * t370; t504 = y2 * (t503 + t125 + t54); t505 = x2 * t158; t506 = - px3 * x3 * t142; t507 = - px2 * x2 * t147; t508 = py3 * x3 * t142; t509 = y2 * (t118 + t471); t510 = py2 * x2 * t147; t511 = - py2 * t142; t512 = y2 * (t138 + t107); t513 = t172 + t512 + t511; t514 = px1 * t513; t515 = y2 * t259 + t142 * t255; t516 = px1 * t515; t517 = py1 * t454; t518 = - py2 * x3 * t142; t519 = t108 + t384; t520 = x2 * t519; t521 = y2 * (t520 + t307 + t135); t522 = - py3 * x2 * t147; t523 = py2 * t142; t524 = y2 * (t108 + t137); t525 = - t147 + 2 * y2 * y3 - t142; t526 = py1 * t525; t527 = x2 * t147 + y2 * (t473 + t452) + x3 * t142; t528 = py1 * t527; t529 = px1 * t474; t530 = px2 * x3 * t142; t531 = px3 * x2 * t147; t532 = - x2 * t147 + y2 * (t453 + t472) - x3 * t142; t533 = px1 * t532; t534 = - px2 * t142; t535 = t147 - 2 * y2 * y3 + t142; t536 = px1 * t535; t537 = t447 + t445; t538 = py1 * t537; t539 = t464 + t462; t540 = px1 * t539; t541 = 2 * px3 * py2 * t2; t542 = - 2 * px2 * py3 * t2; t543 = x2 * t446 * y2; t544 = t5 * t205; t545 = t544 + t543; t546 = py1 * t545; t547 = x2 * t463 * y2; t548 = t5 * t119; t549 = t548 + t547; t550 = px1 * t549; t551 = x2 * t265; t552 = (t389 + t10 + t551) * y2; t553 = t5 * t154; t554 = 2 * px3 * t2; t555 = (t554 + t393 + t110) * y2; t556 = t5 * t90; t557 = py3 * x2 * x3; t558 = - 2 * py3 * t2; t559 = (t558 + t399 + t557) * y2; t560 = py2 * x2 * x3 * y3; t561 = t138 + t361; t562 = t5 * t561; t563 = t390 * t142; t564 = t5 * t150; t565 = - px2 * t2 * t142; t566 = - px3 * t5 * t147; t567 = t566 + t214 + t565; t568 = py1 * t567; t569 = py2 * t2 * t142; t570 = x2 * y2 * (t118 + t135); t571 = py3 * t5 * t147; t572 = t571 + t570 + t569; t573 = px1 * t572; t574 = t86 + t68; t575 = x2 * t574; t576 = (t78 + t575) * y2; t577 = 2 * px2 * x3 * y3; t578 = x2 * (t87 + t577); t579 = px1 * t527; t580 = - t5 * t147 + 2 * x2 * x3 * y2 * y3 - t2 * t142; t581 = px1 * t580; t582 = t305 + t133; t583 = x2 * t582; t584 = (t419 + t583) * y2; t585 = x2 * (t136 + t306); t586 = py1 * t532; t587 = - py3 * t2 * t142; t588 = x2 * y2 * (t136 + t117); t589 = - py2 * t5 * t147; t590 = t5 * t147 - 2 * x2 * x3 * y2 * y3 + t2 * t142; t591 = py1 * t590; t592 = t400 + t466 + t465; t593 = px1 * t592; t594 = t309 + t279; t595 = t198 + t311; t596 = x2 * t378; t597 = t596 + t408 * y2; t598 = py1 * t597; t599 = t256 + t116 * y2; t600 = px1 * t599; t601 = t178 + t366 + t534; t602 = py1 * t601; t603 = t181 + t524 + t523; t604 = px1 * t603; t605 = t265 * t142; t606 = t423 + t144 + t143; t607 = y2 * t606; t608 = x2 * t150; t609 = 2 * py2 * x3 * y3; t610 = t362 + t137; t611 = x2 * t610; t612 = y2 * (t611 + t118 + t609); t613 = py1 * t449; t614 = t419 + t613 + t418 + t417; t615 = py1 * t460; t616 = py1 * t535; t617 = t616 + t172 + t512 + t511; t618 = t134 + t304; t619 = t618 * t142; t620 = - py3 * x2 * y3; t621 = y2 * (t135 + t620); t622 = x2 * (t388 + t180); t623 = px1 * t467; t624 = t623 + t78 + t102 + t101; t625 = px1 * t483; t626 = px1 * t525; t627 = t167 + t626 + t354 + t353; t628 = - 2 * px2 * x3; t629 = t98 + t628; t630 = t629 * t142; t631 = - 2 * px3 * t147; t632 = x2 * (t631 + t177); t633 = - 2 * px2 * py3 * x3 * y3; t634 = t633 + t197; t635 = - 2 * px3 * py2 * t147; t636 = t142 * t403; t637 = x2 * y2 * t173; t638 = t637 + t636; t639 = px1 * t638; t640 = t589 + t588 + t587; t641 = px1 * t640; t642 = px1 * t590; t643 = py1 * t580; t644 = (x0 * (px0 * (y1 * (x1 * (t528 + t522 + t612 + t518) + t643 + t571 + t570 + t569) + t29 * t515 + x1 * t638 + t1 * (t615 + t444 + t443 + t442)) + py0 * (y1 * (x1 * (t533 + t531 + t331 + t530) + t642 + t566 + t214 + t565) + x1 * t234 + t29 * t379 + t1 * (t625 + t439 + t438 + t437)) + y1 * (x1 * (px1 * (t622 + t621 + t619) + py1 * (t632 + t299 + t630) + t608 + t607 + t605) + t641 + t243 + t564 + t563) + x1 * (t639 + t235 + x2 * y2 * (t284 + t635) + t142 * t634) + t29 * (t175 + t170) + t1 * (px1 * (t482 + t480 + t479) + py1 * (t459 + t79 + t458) + t434 + t433 + t432)) + y0 * (x0 * (py0 * (x1 * (t579 + t632 + t299 + t630) + t489 + t29 * t627 + y1 * (x1 * t597 + t625 + t556 + t112 + t555) + t488 + t624 * t1) + px0 * (x1 * (t586 + t622 + t621 + t619) + t29 * t617 + t493 + y1 * (x1 * t599 + t615 + t562 + t560 + t559) + t492 + t614 * t1) + x1 * (px1 * (t522 + t612 + t518) + py1 * (t531 + t331 + t530) + t608 + t607 + t605) + t29 * (t604 + t602) + t487 + y1 * (x1 * (t600 + t598 + x2 * t595 + t594 * y2) + px1 * (t585 + t480 + t584) + py1 * (t578 + t79 + t576) + t267 + t553 + t552) + t486 + (t593 + t302) * t1) + px0 * (x1 * (t591 + t589 + t588 + t587) + t29 * (t586 + t510 + t509 + t508) + y1 * (x1 * (t484 + t585 + t480 + t584) + t548 + t547) + t415 * t1) + py0 * (x1 * (t581 + t241 + t240 + t239) + t29 * (t579 + t507 + t250 + t506) + y1 * (x1 * (t461 + t578 + t79 + t576) + t544 + t543) + t411 * t1) + x1 * (t573 + t568 + t564 + t563) + t29 * (px1 * (t522 + t521 + t518) + py1 * (t531 + t225 + t530) + t505 + t504 + t502) + y1 * (x1 * (px1 * (t562 + t560 + t559) + py1 * (t556 + t112 + t555) + t267 + t553 + t552) + t550 + t546 + t5 * (t322 + t273) + x2 * (t542 + t541) * y2) + (t540 + t538) * t1) + t161 * (py0 * (y1 * (x1 * (t536 + t178 + t366 + t534) + t533 + t531 + t225 + t530) + x1 * t169 + t208 + t1 * (t529 + t204 + t292 + t425) + t206) + px0 * (y1 * (t528 + x1 * (t181 + t526 + t524 + t523) + t522 + t521 + t518) + x1 * t174 + t498 + t1 * (t517 + t118 + t428 + t427) + t497) + x1 * (t516 + t383) + y1 * (x1 * (t514 + t382) + px1 * (t510 + t509 + t508) + py1 * (t507 + t250 + t506) + t505 + t504 + t502) + t151 + t1 * (px1 * (t136 + t471 + t470) + py1 * (t87 + t249 + t451) + t423 + t422) + t146) + t501 + t496 + t14 * (px0 * (x1 * (t484 + t482 + t480 + t479) + t29 * (t475 + t136 + t471 + t470) + t404 + t402 + (x1 * (t468 + t400 + t466 + t465) + t464 + t462) * y1) + py0 * (x1 * (t461 + t459 + t79 + t458) + t29 * (t455 + t87 + t249 + t451) + t396 + t395 + (x1 * (t70 + t450 + t69 + t67) + t447 + t445) * y1) + x1 * (px1 * (t444 + t443 + t442) + py1 * (t439 + t438 + t437) + t434 + t433 + t432) + t29 * (px1 * (t118 + t428 + t427) + py1 * (t204 + t292 + t425) + t423 + t422) + t392 + t391 + (x1 * (t421 + t104) + t416 + t412) * y1) + t407); t645 = t5 * t265; t646 = t115 + t114 + t132; t647 = px1 * t646; t648 = x2 * t485; t649 = t32 * t5; t650 = t70 + t393 + t73 * t5; t651 = t400 + t399 + t106 * t5; t652 = t540 + x1 * (px1 * t651 + py1 * t650 + t389 + t10 + t649) + t538 + t648 + t29 * (t647 + t357 + t20 + t19 + t18) + t645; t653 = t648 + t645; t654 = t392 + t391; t655 = px1 * t654; t656 = t309 + t19; t657 = x2 * t656; t658 = (t389 + t657) * y2; t659 = px3 * py2 * t5 * y3; t660 = x2 * (t144 + t273); t661 = - px3 * py2 * t5; t662 = t431 + t27 + t661; t663 = px1 * t662 + t24; t664 = t5 * t429; t665 = x2 * t390; t666 = t665 + t664; t667 = px3 * py2 * x2; t668 = (t20 + t667) * y2; t669 = x2 * t485 * y2; t670 = t5 * t145; t671 = t670 + t669; t672 = px1 * t671; t673 = t26 + t52; t674 = x2 * t673; t675 = (t389 + t674) * y2; t676 = x2 * (t633 + t54); t677 = px3 * t5; t678 = t436 + t69 + t677; t679 = px1 * t678 + t37 + t60 + t59; t680 = - px3 * x2; t681 = t203 + t298 + (t64 + t680) * y2; t682 = px1 * t545; t683 = - px3 * t5 * y3; t684 = t578 + t683 + (t393 + t575) * y2; t685 = 2 * py3 * x3; t686 = t685 + t476; t687 = 2 * py2 * t2; t688 = px1 * (t419 + t441 + t131 * t5); t689 = - px2 * py3 * x2; t690 = 2 * px2 * py3 * x3; t691 = (t690 + t94 + t689) * y2; t692 = t330 + t204 + (t98 + t628 + t424) * y2; t693 = t134 + t133 + t319; t694 = px1 * (t140 + t118 + t117 + t693 * y2); t695 = (t542 + t43 + t9) * y2; t696 = t5 * t312; t697 = 2 * px2 * t2; t698 = t5 * t316 + t112 + (t78 + t697 + t110) * y2; t699 = x2 * t253; t700 = t5 * t255; t701 = x2 * t403; t702 = px1 * (t701 + t700 + (t419 + t441 + t699) * y2); t703 = px2 * py3 * x2 * x3; t704 = (t10 + t703) * y2; t705 = px3 * py2 * x2 * x3 * y3; t706 = (t20 + t279 + t689) * y2; t707 = t439 + t111 + (t70 + t435) * y2; t708 = t224 + t204 + (t296 + t64 + t424) * y2; t709 = - 2 * py2; t710 = 2 * py3; t711 = py1 * t678; t712 = t459 + t683 + (t393 + t457) * y2; t713 = x2 * t116; t714 = t5 * t139; t715 = px1 * (t120 + t714 + (t400 + t399 + t713) * y2); t716 = 2 * px2 * py3; t717 = (t94 + (t716 + t15) * x2) * y2; t718 = - 2 * px2; t719 = t221 + t128 + t249 + (t98 + (px3 + t718) * x2) * y2; t720 = px1 * (t256 + t136 + t135 + t646 * y2); t721 = - px2 * py3 * t2 * t142; t722 = - px3 * py2 * t5 * t147; t723 = t722 + t237 + t721; t724 = - px2 * py3 * x3 * t142; t725 = y2 * (t54 + t124); t726 = px1 * y2 * t257; t727 = - px3 * py2 * x2 * t147; t728 = y2 * (t87 + t127); t729 = t531 + t728 + t530; t730 = px2 * py3 * t2 * t142; t731 = px3 * py2 * t5 * t147; t732 = px1 * t397; t733 = t251 + t299 + t248; t734 = px2 * t2 * t142; t735 = px3 * t5 * t147; t736 = t735 + t240 + t734; t737 = t389 + t10 + t649; t738 = t731 + t189 + t730; t739 = px1 * t738; t740 = x2 * t165; t741 = t740 + t204 + t203; t742 = py1 * y2 * t741; t743 = py1 * t736; t744 = px2 * py3 * t142; t745 = px1 * t567; t746 = t148 + t364 + t744; t747 = px3 * py2 * t5; t748 = t389 + t95 + t747; t749 = (t26 + t122) * y2; t750 = x2 * t280; t751 = (t431 + t750) * y2; t752 = - px3 * py2 * t5 * y3; t753 = x2 * (t322 + t143); t754 = - px3 * t5; t755 = t393 + t102 + t754; t756 = t128 + t292 + (t68 + t126) * y2; t757 = x2 * t297; t758 = x2 * (t204 + t327); t759 = t758 + t438 + (t436 + t757) * y2; t760 = (t94 + t667) * y2; t761 = t203 + t249 + (t98 + t680) * y2; t762 = px1 * (t140 + t253 * y2); t763 = - px3 * py2 * x2 * x3; t764 = (t43 + t763) * y2; t765 = - px2 * py3 * x2 * x3 * y3; t766 = px3 * x2 * x3; t767 = px2 * x2 * x3 * y3; t768 = t767 + t79 + (t78 + t766) * y2; t769 = px1 * (t120 + t700 + (t419 + t441 + t713) * y2); t770 = t501 + t496 + t407; t771 = px3 * py2 * x3 * t142; t772 = y2 * (t313 + t633 + t54); t773 = px2 * py3 * x2 * t147; t774 = - px3 * py2 * t142; t775 = t149 + t346 + t774; t776 = y2 * (t317 + t87 + t577); t777 = t507 + t776 + t506; t778 = px3 * t142; t779 = t177 + t354 + t778; t780 = y2 * (t144 + t272); t781 = y2 * (t203 + t292); t782 = t531 + t781 + t530; t783 = px1 * (t336 + t258 + t333); t784 = t690 + t94; t785 = x2 * t784; t786 = (t431 + t785) * y2; t787 = x2 * (t125 + t197); t788 = x2 * t629; t789 = x2 * (t221 + t128); t790 = t789 + t438 + (t436 + t788) * y2; t791 = - 2 * py2 * t2; t792 = 2 * py3 * t2; t793 = 2 * px2 * py3 * t2; t794 = (t793 + t10 + t42) * y2; t795 = t5 * t324; t796 = - 2 * px2 * t2; t797 = t5 * t329 + t80 + (t70 + t796 + t77) * y2; t798 = px1 * (t701 + t714 + (t400 + t399 + t699) * y2); t799 = px1 * (t5 * t259 + t401 * t142); t800 = t429 * y2; t801 = t503 + t800; t802 = t487 + t486; t803 = t673 * t142; t804 = - 2 * px2 * py3 * t147; t805 = x2 * (t804 + t148); t806 = 2 * px2 * t147; t807 = x2 * (t178 + t806) + t728 + t574 * t142; t808 = py1 * t755; t809 = py1 * t779; t810 = y2 * (t58 + t144 + t273); t811 = y2 * (t91 + t293 + t203); t812 = t507 + t811 + t506; t813 = px1 * (t260 + t335 + t254); t814 = 2 * py2 * t147; t815 = - 2 * py3 * t147; t816 = (t389 + t42) * y2; t817 = - py2 * py3 * t2; t818 = (t817 + py2 * py3 * x2 * x3) * y2; t819 = - py2 * py3 * t5 * y3; t820 = py2 * py3 * x2 * x3 * y3; t821 = px1 * (t820 + t819 + t818); t822 = - py2 * py3 * t5; t823 = 2 * py2 * py3 * x2 * x3; t824 = px1 * (t817 + t823 + t822); t825 = (t431 + t9) * y2; t826 = py2 * py3 * t2; t827 = (t826 - py2 * py3 * x2 * x3) * y2; t828 = py2 * py3 * t5 * y3; t829 = - py2 * py3 * x2 * x3 * y3; t830 = px1 * (t829 + t828 + t827); t831 = (py2 * py3 * x2 - py2 * py3 * x3) * y2; t832 = - py2 * py3 * x2 * y3; t833 = py2 * py3 * x3 * y3; t834 = px1 * (t833 + t832 + t831); t835 = (t690 + t94 + t122) * y2; t836 = px1 * t693; t837 = - py2 * t5 * y3; t838 = t560 + t837 + (t400 + t557) * y2; t839 = x2 * t205; t840 = py1 * (t839 + x2 * t408 * y2); t841 = (t20 + t51) * y2; t842 = - py3 * x2; t843 = py2 * x2 * y3; t844 = t135 + t843 + (t115 + t842) * y2; t845 = py1 * (t740 + t87 + t128 + (t98 + t68 + t63) * y2); t846 = py2 * py3 * t5; t847 = - 2 * py2 * py3 * x2 * x3; t848 = - py2 * x2 * x3; t849 = - py3 * x2 * x3 * y3; t850 = t849 + t480 + (t419 + t848) * y2; t851 = (py2 * py3 * x3 - py2 * py3 * x2) * y2; t852 = py2 * py3 * x2 * y3; t853 = - py2 * py3 * x3 * y3; t854 = x2 * t561; t855 = t854 + t136 + (t305 + t133 + t469) * y2; t856 = py2 * py3 * t2 * t142; t857 = - 2 * py2 * py3 * x2 * x3 * y2 * y3; t858 = py2 * py3 * t5 * t147; t859 = px1 * (t858 + t857 + t856); t860 = - py2 * py3 * x3 * t142; t861 = y2 * (t833 + t852); t862 = - py2 * py3 * x2 * t147; t863 = px1 * (t862 + t861 + t860); t864 = - py2 * py3 * t2 * t142; t865 = 2 * py2 * py3 * x2 * x3 * y2 * y3; t866 = - py2 * py3 * t5 * t147; t867 = py3 * t2 * t142; t868 = py2 * t5 * t147; t869 = t868 + t570 + t867; t870 = py2 * py3 * x3 * t142; t871 = y2 * (t853 + t832); t872 = py2 * py3 * x2 * t147; t873 = - py3 * x3 * t142; t874 = - py2 * x2 * t147; t875 = t874 + t521 + t873; t876 = py2 * x3 * t142; t877 = py3 * x2 * t147; t878 = t877 + t509 + t876; t879 = t287 * t142; t880 = t596 + t87 + t128; t881 = y2 * t880; t882 = x2 * t207; t883 = py1 * (t882 + t881 + t879); t884 = py1 * t662; t885 = px1 * (t826 + t847 + t846); t886 = 2 * px3 * py2; t887 = (t94 + (t31 + t886) * x2) * y2; t888 = px1 * (t853 + t852 + t851); t889 = py1 * t738; t890 = px1 * (t866 + t865 + t864); t891 = px1 * (t872 + t871 + t870); t892 = t656 * t142; t893 = x2 * (t157 + t635); t894 = t221 + t577; t895 = x2 * t253 * y2; t896 = t701 + t895; t897 = px1 * t896; t898 = (t20 + t279 + t122) * y2; t899 = py1 * (t596 + t204 + t203 + (t65 + t64 + t97) * y2); t900 = t385 + t107; t901 = x2 * t900; t902 = t901 + t136 + (t115 + t476 + t469) * y2; t903 = px1 * t869; t904 = t874 + t612 + t873; t905 = t408 * t142; t906 = y2 * t741; t907 = x2 * t168; t908 = py1 * (t907 + t906 + t905); t909 = - py2 * py3 * t142; t910 = 2 * py2 * py3 * y2 * y3; t911 = - py2 * py3 * t147; t912 = px1 * (t911 + t910 + t909); t913 = t912 + py1 * t376; t914 = t481 + t117 + t428 + (t133 + (py3 + t709) * x2) * y2; t915 = 2 * px3; t916 = t138 + t137 + t131 * y2; t917 = px1 * t916; t918 = py1 * (t167 + t166 + t73 * t142); t919 = py3 * t142; t920 = t171 + t524 + t919; t921 = px1 * t920; t922 = py2 * py3 * t142; t923 = - 2 * py2 * py3 * y2 * y3; t924 = py2 * py3 * t147; t925 = py1 * t513 + t924 + t923 + t922; t926 = py1 * t420; t927 = py1 * t640; t928 = t685 + t114; t929 = x2 * (t172 + t814) + t621 + t928 * t142; t930 = px1 * (t924 + t923 + t922); t931 = t930 + py1 * t347; t932 = py1 * t920 + t911 + t910 + t909; t933 = t315 + t222; t934 = py1 * t654; t935 = (t10 + t750) * y2; t936 = t824 + py1 * t263; t937 = py1 * t671; t938 = (t19 + t689) * y2; t939 = (t10 + t785) * y2; t940 = t296 + t314; t941 = py1 * (t78 + t436 + t62 * t5); t942 = (t26 + t52 + t667) * y2; t943 = py1 * (t740 + t204 + t203 + t99 * y2); t944 = t611 + t118 + (t134 + t304 + t426) * y2; t945 = (t431 + t541 + t42) * y2; t946 = t5 * t199; t947 = t5 * t900 + t560 + (t419 + t791 + t557) * y2; t948 = x2 * t287; t949 = t5 * t378; t950 = py1 * (t289 + t949 + (t78 + t436 + t948) * y2); t951 = - py3 * t5; t952 = t441 + t466 + t951; t953 = py1 * t952 + t826 + t847 + t846; t954 = py3 * x2; t955 = t117 + t620 + (t114 + t954) * y2; t956 = py1 * t549; t957 = py3 * t5 * y3; t958 = t585 + t957 + (t399 + t583) * y2; t959 = (t389 + t763) * y2; t960 = (t309 + t19 + t667) * y2; t961 = - 2 * px3; t962 = px1 * t952; t963 = x2 * t408; t964 = t5 * t165; t965 = py1 * (t839 + t964 + (t70 + t393 + t963) * y2); t966 = t482 + t957 + (t399 + t478) * y2; t967 = - 2 * px3 * py2; t968 = (t26 + (t16 + t967) * x2) * y2; t969 = t307 + t135 + t471 + (t134 + (t130 + t359) * x2) * y2; t970 = py1 * (t596 + t87 + t128 + t66 * y2); t971 = t444 + t837 + (t400 + t440) * y2; t972 = t520 + t118 + (t685 + t114 + t426) * y2; t973 = py1 * t405; t974 = t877 + t621 + t876; t975 = - py2 * t2 * t142; t976 = - py3 * t5 * t147; t977 = t976 + t588 + t975; t978 = py1 * y2 * t880; t979 = y2 * (t136 + t843); t980 = t522 + t979 + t518; t981 = py1 * t276; t982 = py1 * t572; t983 = px1 * y2 * t334; t984 = px1 * t977; t985 = (t94 + t51) * y2; t986 = (t43 + t657) * y2; t987 = (t26 + t689) * y2; t988 = t117 + t471 + (t134 + t954) * y2; t989 = py1 * (t740 + t287 * y2); t990 = (t431 + t703) * y2; t991 = - py3 * x2 * x3; t992 = - py2 * x2 * x3 * y3; t993 = t992 + t480 + (t419 + t991) * y2; t994 = py1 * (t839 + t949 + (t78 + t436 + t963) * y2); t995 = py3 * t5; t996 = t399 + t418 + t995; t997 = t135 + t428 + (t133 + t842) * y2; t998 = x2 * t928; t999 = x2 * (t118 + t609); t1000 = t999 + t443 + (t441 + t998) * y2; t1001 = y2 * (t901 + t136 + t306); t1002 = t510 + t1001 + t508; t1003 = - py3 * t142; t1004 = t180 + t512 + t1003; t1005 = y2 * (t117 + t428); t1006 = t522 + t1005 + t518; t1007 = py1 * (t907 + t881 + t905); t1008 = y2 * (t854 + t481 + t117); t1009 = t510 + t1008 + t508; t1010 = 2 * px3 * t147; t1011 = py1 * (t5 * t207 + t394 * t142); t1012 = t784 * t142; t1013 = 2 * px3 * py2 * t147; t1014 = x2 * (t149 + t1013); t1015 = py1 * (t882 + t906 + t879); t1016 = x2 * (t181 + t387) + t979 + t582 * t142; t1017 = (t43 + t674) * y2; t1018 = x2 * t618; t1019 = x2 * (t307 + t135); t1020 = t1019 + t443 + (t441 + t1018) * y2; t1021 = - 2 * px3 * t2; t1022 = - 2 * px3 * py2 * t2; t1023 = (t389 + t1022 + t9) * y2; t1024 = t5 * t57; t1025 = t5 * t610 + t849 + (t400 + t687 + t848) * y2; t1026 = py1 * (t289 + t964 + (t70 + t393 + t948) * y2); t1027 = px1 * t996; t1028 = px1 * t1004; t1029 = x2 * t429 * y2; t1030 = (t436 + t110) * y2; t1031 = (t441 + t557) * y2; t1032 = (t393 + t77) * y2; t1033 = (t399 + t848) * y2; t1034 = (t26 + t94 + t18) * y2; t1035 = (t64 + t85) * y2; t1036 = (t114 + t469) * y2; t1037 = (t98 + t628 + t126) * y2; t1038 = (t134 + t304 + t842) * y2; t1039 = (t20 + t19 + t96) * y2; t1040 = (t296 + t64 + t126) * y2; t1041 = (t685 + t114 + t842) * y2; t1042 = (t98 + (t961 + px2) * x2) * y2; t1043 = t456 * t142; t1044 = x2 * (t1010 + t166); t1045 = (t134 + (t710 + t105) * x2) * y2; t1046 = t477 * t142; t1047 = x2 * (t815 + t171); t1048 = t32 * t142; t1049 = t171 + t526 + t524 + t919; t1050 = t536 + t166 + t366 + t365; t1051 = (t389 + t10 + t430) * y2; t1052 = (t393 + t766) * y2; t1053 = (t399 + t991) * y2; t1054 = t17 * t5; t1055 = (t431 + t43 + t551) * y2; t1056 = (t1021 + t436 + t77) * y2; t1057 = t5 * t223; t1058 = (t792 + t441 + t848) * y2; t1059 = t5 * t519; t1060 = t338 * y2; t1061 = (t86 + t68 + t680) * y2; t1062 = (t305 + t133 + t954) * y2; t1063 = (t115 + t426) * y2; t1064 = (t400 + t1018) * y2; t1065 = (t65 + t424) * y2; t1066 = (t70 + t788) * y2; t1067 = (t70 + t757) * y2; t1068 = (t400 + t998) * y2; t1069 = t21 * y2; t1070 = (t68 + (t915 + t61) * x2) * y2; t1071 = (t133 + (t360 + py2) * x2) * y2; t1072 = (t115 + t476 + t954) * y2; t1073 = (t65 + t314 + t680) * y2; trans->m[0][0] = (x0 * (px0 * (x1 * (px1 * (y2 * (t388 + t387) + t142 * t386) + t383 + t372 + t371) + y1 * (x1 * (t369 + t382 + t156 + t346 + t345) + t337 + py1 * t301 + t285 + t283 + t281) + t381 + t151 + t1 * (t141 + py1 * t92 + t58 + t54 + t53) + t146) + py0 * (y1 * (x1 * t380 + px1 * t332 + t219 + t218 + t217) + px1 * t234 + px1 * x1 * t379 + t1 * (px1 * t129 + t49 + t48 + t47)) + y1 * (x1 * t377 + px1 * (t202 + t326 + t196) + t195) + px1 * t374 + px1 * x1 * t373 + t1 * (px1 * (t125 + t124 + t123) + t269)) + y0 * (x0 * (px0 * (t261 + x1 * (t369 + t368 + t157 + t364 + t363) + py1 * t227 + t202 + y1 * (x1 * (px1 * (t362 + t361 + (t360 + t359) * y2) + t358 + t153 + t55 + t339) + t320 + py1 * t294 + t144 + t273 + t272 + t271) + t201 + t196 + (t357 + t20 + t19 + t18) * t1) + py0 * (x1 * t356 + px1 * t252 + t194 + y1 * (px1 * t318 + px1 * x1 * t349 + t84 + t83 + t82) + t193 + t192 + px1 * t99 * t1) + x1 * t348 + px1 * (t247 + t246 + t244) + t278 + y1 * (px1 * (t313 + t54 + t310) + t50 + px1 * x1 * t340) + px1 * t338 * t1) + px0 * (x1 * (t337 + py1 * t332 + t202 + t326 + t196) + t321 + px1 * t29 * t182 + t190 + y1 * (x1 * (t320 + py1 * t318 + t313 + t54 + t310) + px1 * (x2 * t308 + x2 * (t305 + t304) * y2) + t303 + t267 + t266) + t189 + t188 + (t302 + t10 + t27 + t25) * t1) + py0 * (x1 * (px1 * t301 + t194 + t193 + t192) + t295 + px1 * t29 * t179 + t186 + y1 * (x1 * (px1 * t294 + t49 + t48 + t47) + px1 * t290) + t185 + t184 + (t286 + t3 + t23 + t22) * t1) + x1 * (px1 * (t285 + t283 + t281) + t278) + t277 + t275 + px1 * t29 * t274 + y1 * (x1 * (px1 * (t144 + t273 + t272 + t271) + t269) + px1 * t268) + (t264 + t262) * t1) + px0 * (y1 * (x1 * (t261 + py1 * t252 + t247 + t246 + t244) + t243 + t238 + t237 + t236) + x1 * (t235 + t230 + t229) + px1 * t29 * t174 + t1 * (t121 + py1 * t81 + t46 + t45 + t44)) + py0 * (y1 * (x1 * (px1 * t227 + t219 + t218 + t217) + px1 * t216 + t212 + t211 + t210) + px1 * t29 * t169 + px1 * x1 * t209 + t1 * (px1 * t113 + t40 + t39 + t38)) + y1 * (x1 * (px1 * (t202 + t201 + t196) + t195) + px1 * t191 + t187) + px0 * t161 * t183 + px1 * t29 * t160 + px1 * x1 * t152 + t14 * (px0 * (x1 * (t141 + py1 * t129 + t125 + t124 + t123) + t121 + py1 * t113 + px1 * t29 * t109 + t13 + t12 + t11 + (t104 + t43 + x1 * (t100 + t26 + t94 + t96) + t95 + t93) * y1) + py0 * (x1 * (px1 * t92 + t84 + t83 + t82) + px1 * t81 + px1 * t29 * t76 + t7 + t6 + t4 + (px1 * t71 + t37 + px1 * x1 * t66 + t60 + t59) * y1) + x1 * (px1 * (t58 + t54 + t53) + t50) + px1 * (t46 + t45 + t44) + t41 + px1 * t29 * t36 + (px1 * t28 + t24 + px1 * x1 * t21) * y1) + t1 * (px1 * (t13 + t12 + t11) + t8)); trans->m[0][1] = (t161 * (px0 * (x1 * (t382 + t156 + t346 + t345) + py1 * t733 + t247 + y1 * (t694 + x1 * (t358 + t153 + t55 + t339) + py1 * t681 + t144 + t282 + t668) + t726 + t283 + t244 + px1 * t646 * t1) + py0 * (x1 * (px1 * t601 + t343 + t342 + t341) + px1 * t729 + t219 + y1 * (px1 * t692 + px1 * x1 * t76 + t49 + t48 + t47) + t218 + t217 + px1 * t66 * t1) + x1 * (px1 * t746 + t375) + px1 * (t727 + t725 + t724) + t195 + y1 * (px1 * (t325 + t143 + t691) + t269 + px1 * x1 * t36) + px1 * t21 * t1) + x0 * (py0 * (t29 * t356 + t745 + t212 + y1 * (x1 * (px1 * t719 + t84 + t83 + t82) + px1 * t698 + t40 + t39 + t38) + px1 * x1 * y2 * t741 + t211 + t210 + px1 * t650 * t1) + px0 * (t29 * (t602 + t148 + t364 + t744) + t743 + t722 + y1 * (x1 * (t720 + py1 * t708 + t200 + t143 + t706) + t702 + py1 * t684 + t676 + t659 + t675) + x1 * (t607 + px1 * y2 * (x2 * (t362 + t361) + t481 + t609) + t742) + t237 + t721 + px1 * t651 * t1) + t29 * t348 + t739 + t187 + y1 * (x1 * (px1 * (t125 + t197 + t245 + t717) + t50) + px1 * (t696 + t13 + t695) + t8) + px1 * x1 * y2 * t606 + px1 * t737 * t1) + py0 * (x1 * (px1 * t736 + t186 + t185 + t184) + t29 * (px1 * t733 + t194 + t193 + t192) + y1 * (x1 * (px1 * t712 + t7 + t6 + t4) + t732) + px1 * t537 * t1) + px0 * (x1 * (t568 + t731 + t189 + t730) + t29 * (py1 * t729 + t727 + t726 + t725 + t724) + y1 * (x1 * (t715 + py1 * t707 + t705 + t12 + t704) + t546 + t670 + t669) + px1 * t539 * t1) + x1 * (px1 * t723 + t275) + t29 * (px1 * (t247 + t283 + t244) + t278) + y0 * (x0 * (px0 * (x1 * (t720 + py1 * t719 + t125 + t197 + t245 + t717) + t715 + py1 * t712 + t29 * (t162 + t35 + t34 + t33) + t660 + t659 + t658 + (t688 + t711 + t431 + x1 * (px1 * (t305 + t304 + (t710 + t709) * x2) + t100 + t26 + t94 + t96) + t27 + t661) * y1) + py0 * (x1 * (px1 * t708 + t49 + t48 + t47) + px1 * t707 + px1 * t29 * t349 + t40 + t39 + t38 + (t286 + t3 + px1 * x1 * t99 + t23 + t22) * y1) + x1 * (px1 * (t200 + t143 + t706) + t269) + px1 * (t705 + t12 + t704) + t8 + px1 * t29 * t340 + (t264 + t262 + px1 * x1 * t338) * y1) + px0 * (x1 * (t702 + py1 * t698 + t696 + t13 + t695) + t29 * (t694 + py1 * t692 + t325 + t143 + t691) + t398 + t392 + t391 + (x1 * (t688 + t104 + t43 + t95 + t93) + px1 * (x2 * (t558 + t687) + t5 * t686) + t412 + t665 + t664) * y1) + py0 * (x1 * (px1 * t684 + t7 + t6 + t4) + t682 + t29 * (px1 * t681 + t84 + t83 + t82) + (px1 * t411 + x1 * t679) * y1) + x1 * (px1 * (t676 + t659 + t675) + t41) + t672 + t29 * (px1 * (t144 + t282 + t668) + t50) + (px1 * t666 + x1 * t663) * y1) + y1 * (x1 * (px1 * (t660 + t659 + t658) + t41) + t655) + px1 * t653 * t1 + px0 * t652 * t14) ; trans->m[0][2] = (x0 * (px0 * (y1 * (x1 * (t813 + py1 * t807 + t805 + t725 + t803) + t799 + t568 + t731 + t189 + t730) + x1 * (px1 * (x2 * y2 * (t815 + t814) + t142 * t308) + t235 + t230 + t229) + t29 * (t170 + t159 + t155) + t1 * (t769 + py1 * t759 + t753 + t752 + t751)) + py0 * (y1 * (x1 * (px1 * t812 + t194 + t193 + t192) + t295 + t186 + t185 + t184) + px1 * x1 * t234 + px1 * t29 * t379 + t1 * (px1 * t768 + t7 + t6 + t4)) + y1 * (x1 * (px1 * (t773 + t810 + t771) + t278) + t277 + t275) + px1 * x1 * t374 + px1 * t29 * t373 + t1 * (px1 * (t765 + t45 + t764) + t41)) + y0 * (x0 * (px0 * (x1 * (t813 + py1 * t812 + t773 + t810 + t771) + t495 + t29 * (t809 + t149 + t346 + t774) + y1 * (x1 * (px1 * (x2 * t386 + t686 * y2) + t598 + t503 + t800) + t798 + py1 * t790 + t787 + t752 + t786) + (t808 + t389 + t95 + t747) * t1) + py0 * (x1 * (px1 * t807 + t219 + t218 + t217) + px1 * t490 + t29 * t380 + y1 * (px1 * x1 * t597 + px1 * t797 + t7 + t6 + t4) + t679 * t1) + x1 * (px1 * (t805 + t725 + t803) + t195) + px1 * t802 + t29 * t377 + y1 * (px1 * x1 * t801 + px1 * (t795 + t46 + t794) + t41) + t663 * t1) + px0 * (x1 * (t799 + t243 + t238 + t237 + t236) + t29 * (t783 + py1 * t777 + t773 + t772 + t771) + y1 * (x1 * (t798 + py1 * t797 + t795 + t46 + t794) + px1 * (t5 * (t481 + t609) + x2 * (t792 + t791) * y2) + t546 + t670 + t669) + (t538 + t648 + t645) * t1) + py0 * (x1 * (t745 + t212 + t211 + t210) + t29 * (px1 * t782 + t219 + t218 + t217) + y1 * (x1 * (px1 * t790 + t40 + t39 + t38) + t682) + px1 * t411 * t1) + x1 * (t739 + t187) + t29 * (px1 * (t727 + t780 + t724) + t195) + y1 * (x1 * (px1 * (t787 + t752 + t786) + t8) + t672) + px1 * t666 * t1) + t161 * (px0 * (y1 * (t783 + x1 * (t368 + t157 + t364 + t363) + py1 * t782 + t727 + t780 + t724) + x1 * (t383 + t372 + t371) + t500 + t1 * (t762 + py1 * t756 + t125 + t272 + t749)) + py0 * (y1 * (x1 * (px1 * t779 + t352 + t351 + t350) + px1 * t777 + t194 + t193 + t192) + px1 * x1 * t169 + px1 * t209 + t1 * (px1 * t761 + t84 + t83 + t82)) + y1 * (x1 * (px1 * t775 + t344) + px1 * (t773 + t772 + t771) + t278) + px1 * x1 * t160 + px1 * t152 + t1 * (px1 * (t144 + t245 + t760) + t50)) + px0 * t770 + t14 * (px0 * (x1 * (t769 + py1 * t768 + t765 + t45 + t764) + t29 * (t762 + py1 * t761 + t144 + t245 + t760) + t406 + (t412 + x1 * (t711 + t431 + t27 + t661) + t665 + t664) * y1) + py0 * (x1 * (px1 * t759 + t40 + t39 + t38) + t732 + t29 * (px1 * t756 + t49 + t48 + t47) + (px1 * t537 + x1 * (px1 * t755 + t3 + t23 + t22)) * y1) + x1 * (px1 * (t753 + t752 + t751) + t8) + t655 + t29 * (px1 * (t125 + t272 + t749) + t269) + (x1 * (px1 * t748 + t262) + px1 * t653) * y1)); trans->m[1][0] = (x0 * (py0 * (x1 * (t516 + py1 * (y2 * (t631 + t806) + t142 * t933) + t372 + t371) + y1 * (px1 * t929 + x1 * (t514 + t918 + t157 + t364 + t363) + t908 + t893 + t725 + t892) + t500 + t151 + t1 * (px1 * t855 + t845 + t325 + t125 + t835) + t146) + px0 * (y1 * (x1 * t932 + py1 * t904 + t872 + t871 + t870) + py1 * x1 * t515 + py1 * t638 + t1 * (py1 * t844 + t833 + t832 + t831)) + y1 * (x1 * t931 + t863 + py1 * (t247 + t810 + t244)) + py1 * t374 + py1 * x1 * t373 + t1 * (t888 + py1 * (t54 + t282 + t841))) + y0 * (px0 * (x1 * (py1 * t929 + t862 + t861 + t860) + t927 + py1 * t29 * t182 + t858 + y1 * (py1 * t896 + x1 * (py1 * t914 + t833 + t832 + t831)) + t857 + t856 + (t926 + t817 + t823 + t822) * t1) + x0 * (px0 * (x1 * t925 + py1 * t878 + t862 + y1 * (py1 * t902 + py1 * x1 * t916 + t853 + t852 + t851) + t861 + t860 + py1 * t693 * t1) + py0 * (x1 * (t921 + t918 + t156 + t346 + t345) + t883 + px1 * t875 + t247 + y1 * (x1 * (t917 + py1 * (t328 + t88 + (t915 + t718) * y2) + t153 + t55 + t339) + t899 + px1 * t914 + t322 + t143 + t245 + t887) + t772 + t244 + (t647 + t20 + t19 + t18) * t1) + x1 * t913 + t891 + py1 * (t202 + t780 + t196) + y1 * (py1 * (t200 + t125 + t898) + t834 + py1 * x1 * t340) + py1 * t338 * t1) + py0 * (x1 * (t908 + px1 * t904 + t247 + t810 + t244) + t903 + py1 * t29 * t179 + t722 + y1 * (x1 * (px1 * t902 + t899 + t200 + t125 + t898) + t897 + py1 * (x2 * t894 + x2 * (t86 + t628) * y2) + t267 + t266) + t237 + t721 + (t593 + t389 + t95 + t747) * t1) + x1 * (py1 * (t893 + t725 + t892) + t891) + t890 + t889 + py1 * t29 * t274 + y1 * (x1 * (t888 + py1 * (t322 + t143 + t245 + t887)) + py1 * t268) + (t885 + t884) * t1) + py0 * (y1 * (x1 * (t883 + px1 * t878 + t202 + t780 + t196) + t641 + t731 + t189 + t730) + x1 * (t639 + t230 + t229) + py1 * t29 * t169 + t1 * (t840 + px1 * t850 + t13 + t752 + t825)) + px0 * (y1 * (x1 * (py1 * t875 + t872 + t871 + t870) + py1 * t869 + t866 + t865 + t864) + py1 * x1 * t499 + py1 * t29 * t174 + t1 * (py1 * t838 + t829 + t828 + t827)) + y1 * (x1 * (t863 + py1 * (t247 + t772 + t244)) + t859 + py1 * t723) + py0 * t161 * t183 + py1 * t29 * t160 + py1 * x1 * t152 + t14 * (px0 * (x1 * (py1 * t855 + t853 + t852 + t851) + py1 * t850 + py1 * t29 * t109 + t820 + t819 + t818 + (py1 * t592 + t826 + py1 * x1 * t646 + t847 + t846) * y1) + py0 * (x1 * (t845 + px1 * t844 + t54 + t282 + t841) + t840 + px1 * t838 + py1 * t29 * t76 + t46 + t659 + t816 + (t421 + t431 + x1 * (t836 + t26 + t94 + t96) + t27 + t661) * y1) + x1 * (py1 * (t325 + t125 + t835) + t834) + t830 + py1 * (t13 + t752 + t825) + py1 * t29 * t36 + (t824 + py1 * t748 + py1 * x1 * t21) * y1) + t1 * (t821 + py1 * (t46 + t659 + t816))) ; trans->m[1][1] = (t161 * (px0 * (x1 * (py1 * t603 + t911 + t910 + t909) + py1 * t980 + t872 + y1 * (py1 * t944 + py1 * x1 * t109 + t833 + t832 + t831) + t871 + t870 + py1 * t646 * t1) + py0 * (x1 * (t514 + t157 + t364 + t363) + px1 * t974 + t202 + y1 * (x1 * (t917 + t153 + t55 + t339) + t943 + px1 * t955 + t143 + t124 + t938) + t978 + t725 + t196 + py1 * t66 * t1) + x1 * (t930 + py1 * t775) + t863 + py1 * (t773 + t283 + t771) + y1 * (py1 * (t58 + t144 + t942) + t888 + py1 * x1 * t36) + py1 * t21 * t1) + x0 * (py0 * (t29 * (t604 + t149 + t346 + t774) + t984 + t190 + y1 * (x1 * (px1 * t972 + t970 + t313 + t144 + t960) + px1 * t958 + t950 + t787 + t12 + t939) + x1 * (t607 + t983 + py1 * y2 * (x2 * (t328 + t88) + t293 + t327)) + t189 + t188 + py1 * t650 * t1) + px0 * (t29 * t925 + t982 + t866 + y1 * (x1 * (py1 * t969 + t853 + t852 + t851) + py1 * t947 + t829 + t828 + t827) + py1 * x1 * y2 * t334 + t865 + t864 + py1 * t651 * t1) + t29 * t913 + t859 + t981 + y1 * (x1 * (t834 + py1 * (t633 + t54 + t272 + t968)) + py1 * (t946 + t46 + t945) + t821) + py1 * x1 * y2 * t606 + py1 * t737 * t1) + py0 * (x1 * (t573 + t238 + t237 + t236) + t29 * (px1 * t980 + t773 + t978 + t283 + t771) + y1 * (x1 * (t965 + px1 * t971 + t765 + t659 + t959) + t550 + t670 + t669) + py1 * t537 * t1) + px0 * (x1 * (py1 * t977 + t858 + t857 + t856) + t29 * (py1 * t974 + t862 + t861 + t860) + y1 * (x1 * (py1 * t966 + t820 + t819 + t818) + t973) + py1 * t539 * t1) + x1 * (t890 + py1 * t191) + t29 * (t891 + py1 * (t202 + t725 + t196)) + y0 * (x0 * (px0 * (x1 * (py1 * t972 + t833 + t832 + t831) + py1 * t971 + py1 * t29 * t916 + t829 + t828 + t827 + (t926 + t817 + py1 * x1 * t693 + t823 + t822) * y1) + py0 * (x1 * (t970 + px1 * t969 + t633 + t54 + t272 + t968) + px1 * t966 + t965 + t29 * (t163 + t35 + t34 + t33) + t753 + t12 + t935 + (t962 + t941 + t43 + x1 * (t836 + py1 * (t86 + t628 + (t961 + t291) * x2) + t26 + t94 + t96) + t95 + t93) * y1) + x1 * (py1 * (t313 + t144 + t960) + t888) + t821 + py1 * (t765 + t659 + t959) + py1 * t29 * t340 + (t885 + t884 + py1 * x1 * t338) * y1) + px0 * (x1 * (py1 * t958 + t820 + t819 + t818) + t956 + t29 * (py1 * t955 + t853 + t852 + t851) + (py1 * t415 + x1 * t953) * y1) + py0 * (x1 * (t950 + px1 * t947 + t946 + t46 + t945) + t29 * (px1 * t944 + t943 + t58 + t144 + t942) + t406 + t392 + t391 + (x1 * (t421 + t941 + t431 + t27 + t661) + t416 + py1 * (x2 * (t554 + t796) + t5 * t940) + t665 + t664) * y1) + x1 * (py1 * (t787 + t12 + t939) + t830) + t29 * (t834 + py1 * (t143 + t124 + t938)) + t937 + (x1 * t936 + py1 * t666) * y1) + y1 * (x1 * (py1 * (t753 + t12 + t935) + t830) + t934) + py1 * t653 * t1 + py0 * t652 * t14) ; trans->m[1][2] = (y0 * (x0 * (px0 * (x1 * (py1 * t1016 + t872 + t871 + t870) + py1 * t494 + t29 * t932 + y1 * (py1 * t1025 + py1 * x1 * t599 + t820 + t819 + t818) + t953 * t1) + py0 * (x1 * (t1015 + px1 * t1009 + t727 + t326 + t724) + t29 * (t1028 + t148 + t364 + t744) + t491 + y1 * (x1 * (t600 + py1 * (x2 * t933 + t940 * y2) + t503 + t800) + px1 * t1020 + t1026 + t676 + t45 + t1017) + (t1027 + t10 + t27 + t25) * t1) + x1 * (py1 * (t1014 + t283 + t1012) + t863) + t29 * t931 + py1 * t802 + y1 * (py1 * x1 * t801 + py1 * (t1024 + t13 + t1023) + t830) + t936 * t1) + py0 * (t29 * (t1007 + px1 * t1002 + t727 + t201 + t724) + x1 * (t1011 + t641 + t731 + t189 + t730) + y1 * (x1 * (t1026 + px1 * t1025 + t1024 + t13 + t1023) + t550 + py1 * (t5 * (t293 + t327) + x2 * (t1021 + t697) * y2) + t670 + t669) + (t540 + t648 + t645) * t1) + px0 * (x1 * (t982 + t866 + t865 + t864) + t29 * (py1 * t1006 + t872 + t871 + t870) + y1 * (x1 * (py1 * t1020 + t829 + t828 + t827) + t956) + py1 * t415 * t1) + x1 * (t859 + t981) + t29 * (t863 + py1 * (t773 + t246 + t771)) + y1 * (x1 * (py1 * (t676 + t45 + t1017) + t821) + t937) + py1 * t666 * t1) + x0 * (py0 * (y1 * (x1 * (px1 * t1016 + t1015 + t1014 + t283 + t1012) + t1011 + t573 + t238 + t237 + t236) + x1 * (t639 + py1 * (x2 * y2 * (t1010 + t300) + t142 * t894) + t230 + t229) + t29 * (t175 + t159 + t155) + t1 * (px1 * t1000 + t994 + t660 + t45 + t986)) + px0 * (y1 * (x1 * (py1 * t1009 + t862 + t861 + t860) + t927 + t858 + t857 + t856) + py1 * t29 * t515 + py1 * x1 * t638 + t1 * (py1 * t993 + t820 + t819 + t818)) + y1 * (x1 * (t891 + py1 * (t727 + t326 + t724)) + t890 + t889) + py1 * x1 * t374 + py1 * t29 * t373 + t1 * (t830 + py1 * (t705 + t752 + t990))) + t161 * (py0 * (x1 * (t516 + t372 + t371) + y1 * (x1 * (t921 + t156 + t346 + t345) + t1007 + px1 * t1006 + t773 + t246 + t771) + t381 + t1 * (t989 + px1 * t997 + t54 + t245 + t985)) + px0 * (y1 * (x1 * (py1 * t1004 + t924 + t923 + t922) + py1 * t1002 + t862 + t861 + t860) + py1 * t499 + py1 * x1 * t174 + t1 * (py1 * t988 + t853 + t852 + t851)) + y1 * (x1 * (t912 + py1 * t746) + t891 + py1 * (t727 + t201 + t724)) + py1 * x1 * t160 + py1 * t152 + t1 * (t834 + py1 * (t143 + t272 + t987))) + py0 * t770 + t14 * (px0 * (x1 * (py1 * t1000 + t829 + t828 + t827) + t973 + t29 * (py1 * t997 + t833 + t832 + t831) + (py1 * t539 + x1 * (py1 * t996 + t817 + t823 + t822)) * y1) + py0 * (x1 * (t994 + px1 * t993 + t705 + t752 + t990) + t29 * (t989 + px1 * t988 + t143 + t272 + t987) + t398 + (t416 + x1 * (t962 + t43 + t95 + t93) + t665 + t664) * y1) + x1 * (py1 * (t660 + t45 + t986) + t821) + t29 * (t888 + py1 * (t54 + t245 + t985)) + t934 + (x1 * (t885 + py1 * t28) + py1 * t653) * y1)); trans->m[2][0] = (x0 * (px0 * (y1 * (x1 * t617 + t586 + t877 + t1008 + t876) + x1 * t515 + t637 + t1 * (t475 + t136 + t620 + t1036) + t636) + py0 * (y1 * (t579 + x1 * t627 + t251 + t811 + t248) + x1 * t379 + t233 + t1 * (t455 + t87 + t298 + t1035) + t232) + x1 * (t516 + t383 + y2 * (t804 + t1013) + t142 * t595) + y1 * (px1 * (t1047 + t979 + t1046) + x1 * (t921 + t368 + t157 + t156 + t1048) + py1 * (t1044 + t728 + t1043) + t505 + t607 + t502) + t500 + t381 + t1 * (px1 * (t611 + t135 + t1038) + py1 * (t330 + t128 + t1037) + t423 + t125 + t54 + t1034)) + y0 * (x0 * (py0 * (x1 * t1050 + t533 + t226 + y1 * (t529 + t224 + x1 * t349 + t128 + t1040) + t781 + t220 + t99 * t1) + px0 * (t528 + x1 * t1049 + t874 + y1 * (t517 + x1 * t916 + t520 + t135 + t1041) + t1005 + t873 + t693 * t1) + x1 * (t514 + t382 + t157 + t156 + t1048) + px1 * (t877 + t1001 + t876) + py1 * (t251 + t776 + t248) + t608 + y1 * (x1 * (t917 + t358 + t56 + t323 + (t716 + t967) * y2) + px1 * (t118 + t609 + t471 + t1045) + py1 * (t204 + t327 + t249 + t1042) + t503 + t144 + t143 + t1039) + t504 + t605 + (t647 + t357) * t1) + px0 * (x1 * (t528 + t1047 + t979 + t1046) + t643 + t29 * t182 + t571 + y1 * (x1 * (t475 + t118 + t609 + t471 + t1045) + t701 + t895) + t570 + t569 + (t468 + t441 + t466 + t951) * t1) + py0 * (x1 * (t533 + t1044 + t728 + t1043) + t642 + t29 * t179 + t566 + y1 * (x1 * (t455 + t204 + t327 + t249 + t1042) + t289 + t288) + t214 + t565 + (t436 + t450 + t69 + t677) * t1) + x1 * (px1 * (t877 + t1008 + t876) + py1 * (t251 + t811 + t248) + t505 + t607 + t502) + t984 + t743 + t29 * t274 + y1 * (x1 * (px1 * (t520 + t135 + t1041) + py1 * (t224 + t128 + t1040) + t503 + t144 + t143 + t1039) + t897 + t303 + x2 * t634 + x2 * (t690 + t52) * y2) + (t1027 + t808) * t1) + py0 * (y1 * (x1 * (t579 + t251 + t776 + t248) + t581 + t735 + t240 + t734) + t29 * t169 + x1 * t209 + t1 * (t461 + t80 + t683 + t1032)) + px0 * (y1 * (x1 * (t586 + t877 + t1001 + t876) + t591 + t976 + t588 + t975) + x1 * t499 + t29 * t174 + t1 * (t484 + t849 + t957 + t1033)) + y1 * (x1 * (px1 * (t874 + t1005 + t873) + py1 * (t226 + t781 + t220) + t608 + t504 + t605) + t573 + t568) + t161 * t183 + x1 * (t639 + t235) + t29 * t160 + t14 * (px0 * (x1 * (t517 + t611 + t135 + t1038) + t615 + t29 * t109 + t560 + t443 + t1031 + (t399 + t613 + x1 * t646 + t418 + t995) * y1) + py0 * (x1 * (t529 + t330 + t128 + t1037) + t625 + t29 * t76 + t112 + t438 + t1030 + (t623 + t393 + t102 + x1 * t66 + t754) * y1) + x1 * (px1 * (t136 + t620 + t1036) + py1 * (t87 + t298 + t1035) + t423 + t125 + t54 + t1034) + px1 * (t849 + t957 + t1033) + py1 * (t80 + t683 + t1032) + t434 + t29 * t36 + t1029 + (t962 + t711 + x1 * (t836 + t100)) * y1) + t1 * (px1 * (t560 + t443 + t1031) + py1 * (t112 + t438 + t1030) + t434 + t1029)) ; trans->m[2][1] = (t161 * (px0 * (x1 * (t616 + t180 + t512 + t1003) + t586 + t510 + y1 * (t475 + t854 + x1 * t109 + t117 + t1062) + t621 + t508 + t646 * t1) + py0 * (t579 + x1 * (t177 + t626 + t354 + t778) + t507 + y1 * (t455 + x1 * t76 + t91 + t203 + t1061) + t299 + t506 + t66 * t1) + x1 * (t921 + t368) + px1 * (t874 + t979 + t873) + py1 * (t226 + t728 + t220) + y1 * (x1 * (t917 + t358) + px1 * (t118 + t843 + t1063) + py1 * (t204 + t127 + t1065) + t423 + t144 + t143 + t1060) + t504 + t21 * t1) + x0 * (py0 * (t29 * t1050 + t581 + t241 + y1 * (x1 * (t529 + t87 + t577 + t292 + t1070) + t461 + t1057 + t80 + t1056) + x1 * y2 * t741 + t240 + t239 + t650 * t1) + px0 * (t591 + t29 * t1049 + t589 + y1 * (x1 * (t517 + t136 + t306 + t428 + t1071) + t484 + t1059 + t849 + t1058) + x1 * y2 * t334 + t588 + t587 + t651 * t1) + t29 * (t1028 + t809) + t903 + t321 + y1 * (x1 * (px1 * (t901 + t117 + t1072) + py1 * (t317 + t203 + t1073) + t503 + t125 + t54 + t1069) + px1 * (t1019 + t837 + t1064) + py1 * (t789 + t111 + t1066) + t267 + t433 + t1055) + x1 * (y2 * (x2 * (t56 + t323) + t322 + t273) + t983 + t742) + t737 * t1) + py0 * (x1 * (t642 + t215 + t214 + t213) + t29 * (t533 + t226 + t728 + t220) + y1 * (x1 * (t625 + t758 + t111 + t1067) + t396 + t395) + t537 * t1) + px0 * (x1 * (t643 + t868 + t570 + t867) + t29 * (t528 + t874 + t979 + t873) + y1 * (x1 * (t615 + t999 + t837 + t1068) + t404 + t402) + t539 * t1) + x1 * (t641 + t243) + t29 * (px1 * (t510 + t621 + t508) + py1 * (t507 + t299 + t506) + t504) + y0 * (x0 * (py0 * (x1 * (t455 + t317 + t203 + t1073) + t461 + t29 * t349 + t767 + t683 + t1052 + (t436 + t450 + x1 * t99 + t69 + t677) * y1) + px0 * (x1 * (t475 + t901 + t117 + t1072) + t484 + t29 * t916 + t992 + t957 + t1053 + (t468 + t441 + t466 + x1 * t693 + t951) * y1) + x1 * (px1 * (t136 + t306 + t428 + t1071) + py1 * (t87 + t577 + t292 + t1070) + t503 + t125 + t54 + t1069) + px1 * (t999 + t837 + t1068) + py1 * (t758 + t111 + t1067) + t29 * (t163 + t162) + t434 + t553 + t1051 + (t421 + t104 + t431 + t43 + x1 * (t836 + t100 + t690 + t52 + (t270 + t886) * x2) + t1054) * y1) + py0 * (x1 * (t625 + t789 + t111 + t1066) + t29 * (t529 + t204 + t127 + t1065) + t544 + t543 + (x1 * t624 + t410 + t409) * y1) + px0 * (x1 * (t615 + t1019 + t837 + t1064) + t29 * (t517 + t118 + t843 + t1063) + t548 + t547 + (x1 * t614 + t414 + t413) * y1) + t29 * (px1 * (t854 + t117 + t1062) + py1 * (t91 + t203 + t1061) + t423 + t144 + t143 + t1060) + x1 * (px1 * (t1059 + t849 + t1058) + py1 * (t1057 + t80 + t1056) + t267 + t433 + t1055) + t406 + t398 + (t416 + x1 * (t962 + t711 + t431 + t43 + t1054) + t412 + x2 * (t793 + t1022) + t5 * t594) * y1) + y1 * (x1 * (px1 * (t992 + t957 + t1053) + py1 * (t767 + t683 + t1052) + t434 + t553 + t1051) + t550 + t546) + t653 * t1 + t652 * t14) ; trans->m[2][2] = t644; } static void print_trans (const char *header, struct pixman_f_transform *trans) { int i, j; double max; max = 0; printf ("%s\n", header); for (i = 0; i < 3; ++i) { for (j = 0; j < 3; ++j) { double a = fabs (trans->m[i][j]); if (a > max) max = a; } } if (max == 0.0) max = 1.0; for (i = 0; i < 3; ++i) { printf ("{ "); for (j = 0; j < 3; ++j) { printf ("D2F (%.5f)%s", 16384 * (trans->m[i][j] / max), j == 2 ? "" : ", "); } printf ("},\n"); } } int main () { struct pixman_f_transform t; #if 0 quad_to_quad (75, 200, 325, 200, 450, 335, -50, 335, 0, 0, 400, 0, 400, 400, 0, 400, &t); #endif quad_to_quad ( 1, 0, 1, 2, 2, 2, 2, 0, 1, 0, 1, 112, 2, 2, 2, 0, &t); print_trans ("0->0", &t); return 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/demos/radial-test.c0000664000175000017500000001307214712446423016605 0ustar00mattst88mattst88#include "utils.h" #include "gtk-utils.h" #define NUM_GRADIENTS 9 #define NUM_STOPS 3 #define NUM_REPEAT 4 #define SIZE 128 #define WIDTH (SIZE * NUM_GRADIENTS) #define HEIGHT (SIZE * NUM_REPEAT) /* * We want to test all the possible relative positions of the start * and end circle: * * - The start circle can be smaller/equal/bigger than the end * circle. A radial gradient can be classified in one of these * three cases depending on the sign of dr. * * - The smaller circle can be completely inside/internally * tangent/outside (at least in part) of the bigger circle. This * classification is the same as the one which can be computed by * examining the sign of a = (dx^2 + dy^2 - dr^2). * * - If the two circles have the same size, neither can be inside or * internally tangent * * This test draws radial gradients whose circles always have the same * centers (0, 0) and (1, 0), but with different radiuses. From left * to right: * * - Degenerate start circle completely inside the end circle * 0.00 -> 1.75; dr = 1.75 > 0; a = 1 - 1.75^2 < 0 * * - Small start circle completely inside the end circle * 0.25 -> 1.75; dr = 1.5 > 0; a = 1 - 1.50^2 < 0 * * - Small start circle internally tangent to the end circle * 0.50 -> 1.50; dr = 1.0 > 0; a = 1 - 1.00^2 = 0 * * - Small start circle outside of the end circle * 0.50 -> 1.00; dr = 0.5 > 0; a = 1 - 0.50^2 > 0 * * - Start circle with the same size as the end circle * 1.00 -> 1.00; dr = 0.0 = 0; a = 1 - 0.00^2 > 0 * * - Small end circle outside of the start circle * 1.00 -> 0.50; dr = -0.5 > 0; a = 1 - 0.50^2 > 0 * * - Small end circle internally tangent to the start circle * 1.50 -> 0.50; dr = -1.0 > 0; a = 1 - 1.00^2 = 0 * * - Small end circle completely inside the start circle * 1.75 -> 0.25; dr = -1.5 > 0; a = 1 - 1.50^2 < 0 * * - Degenerate end circle completely inside the start circle * 0.00 -> 1.75; dr = 1.75 > 0; a = 1 - 1.75^2 < 0 * */ const static double radiuses[NUM_GRADIENTS] = { 0.00, 0.25, 0.50, 0.50, 1.00, 1.00, 1.50, 1.75, 1.75 }; #define double_to_color(x) \ (((uint32_t) ((x)*65536)) - (((uint32_t) ((x)*65536)) >> 16)) #define PIXMAN_STOP(offset,r,g,b,a) \ { pixman_double_to_fixed (offset), \ { \ double_to_color (r), \ double_to_color (g), \ double_to_color (b), \ double_to_color (a) \ } \ } static const pixman_gradient_stop_t stops[NUM_STOPS] = { PIXMAN_STOP (0.0, 1, 0, 0, 0.75), PIXMAN_STOP (0.70710678, 0, 1, 0, 0), PIXMAN_STOP (1.0, 0, 0, 1, 1) }; static pixman_image_t * create_radial (int index) { pixman_point_fixed_t p0, p1; pixman_fixed_t r0, r1; double x0, x1, radius0, radius1, left, right, center; x0 = 0; x1 = 1; radius0 = radiuses[index]; radius1 = radiuses[NUM_GRADIENTS - index - 1]; /* center the gradient */ left = MIN (x0 - radius0, x1 - radius1); right = MAX (x0 + radius0, x1 + radius1); center = (left + right) * 0.5; x0 -= center; x1 -= center; /* scale to make it fit within a 1x1 rect centered in (0,0) */ x0 *= 0.25; x1 *= 0.25; radius0 *= 0.25; radius1 *= 0.25; p0.x = pixman_double_to_fixed (x0); p0.y = pixman_double_to_fixed (0); p1.x = pixman_double_to_fixed (x1); p1.y = pixman_double_to_fixed (0); r0 = pixman_double_to_fixed (radius0); r1 = pixman_double_to_fixed (radius1); return pixman_image_create_radial_gradient (&p0, &p1, r0, r1, stops, NUM_STOPS); } static const pixman_repeat_t repeat[NUM_REPEAT] = { PIXMAN_REPEAT_NONE, PIXMAN_REPEAT_NORMAL, PIXMAN_REPEAT_REFLECT, PIXMAN_REPEAT_PAD }; int main (int argc, char **argv) { pixman_transform_t transform; pixman_image_t *src_img, *dest_img; int i, j; enable_divbyzero_exceptions (); dest_img = pixman_image_create_bits (PIXMAN_a8r8g8b8, WIDTH, HEIGHT, NULL, 0); draw_checkerboard (dest_img, 25, 0xffaaaaaa, 0xffbbbbbb); pixman_transform_init_identity (&transform); /* * The create_radial() function returns gradients centered in the * origin and whose interesting part fits a 1x1 square. We want to * paint these gradients on a SIZExSIZE square and to make things * easier we want the origin in the top-left corner of the square * we want to see. */ pixman_transform_translate (NULL, &transform, pixman_double_to_fixed (0.5), pixman_double_to_fixed (0.5)); pixman_transform_scale (NULL, &transform, pixman_double_to_fixed (SIZE), pixman_double_to_fixed (SIZE)); /* * Gradients are evaluated at the center of each pixel, so we need * to translate by half a pixel to trigger some interesting * cornercases. In particular, the original implementation of PDF * radial gradients tried to divide by 0 when using this transform * on the "tangent circles" cases. */ pixman_transform_translate (NULL, &transform, pixman_double_to_fixed (0.5), pixman_double_to_fixed (0.5)); for (i = 0; i < NUM_GRADIENTS; i++) { src_img = create_radial (i); pixman_image_set_transform (src_img, &transform); for (j = 0; j < NUM_REPEAT; j++) { pixman_image_set_repeat (src_img, repeat[j]); pixman_image_composite32 (PIXMAN_OP_OVER, src_img, NULL, dest_img, 0, 0, 0, 0, i * SIZE, j * SIZE, SIZE, SIZE); } pixman_image_unref (src_img); } show_image (dest_img); pixman_image_unref (dest_img); return 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/demos/scale.c0000664000175000017500000003132014712446423015457 0ustar00mattst88mattst88/* * Copyright 2012, Red Hat, Inc. * Copyright 2012, Soren Sandmann * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Author: Soren Sandmann */ #ifdef HAVE_CONFIG_H #include "pixman-config.h" #endif #include #include #include #include #include "gtk-utils.h" typedef struct { GtkBuilder * builder; pixman_image_t * original; GtkAdjustment * scale_x_adjustment; GtkAdjustment * scale_y_adjustment; GtkAdjustment * rotate_adjustment; GtkAdjustment * subsample_adjustment; int scaled_width; int scaled_height; } app_t; static GtkWidget * get_widget (app_t *app, const char *name) { GtkWidget *widget = GTK_WIDGET (gtk_builder_get_object (app->builder, name)); if (!widget) g_error ("Widget %s not found\n", name); return widget; } /* Figure out the boundary of a diameter=1 circle transformed into an ellipse * by trans. Proof that this is the correct calculation: * * Transform x,y to u,v by this matrix calculation: * * |u| |a c| |x| * |v| = |b d|*|y| * * Horizontal component: * * u = ax+cy (1) * * For each x,y on a radius-1 circle (p is angle to the point): * * x^2+y^2 = 1 * x = cos(p) * y = sin(p) * dx/dp = -sin(p) = -y * dy/dp = cos(p) = x * * Figure out derivative of (1) relative to p: * * du/dp = a(dx/dp) + c(dy/dp) * = -ay + cx * * The min and max u are when du/dp is zero: * * -ay + cx = 0 * cx = ay * c = ay/x (2) * y = cx/a (3) * * Substitute (2) into (1) and simplify: * * u = ax + ay^2/x * = a(x^2+y^2)/x * = a/x (because x^2+y^2 = 1) * x = a/u (4) * * Substitute (4) into (3) and simplify: * * y = c(a/u)/a * y = c/u (5) * * Square (4) and (5) and add: * * x^2+y^2 = (a^2+c^2)/u^2 * * But x^2+y^2 is 1: * * 1 = (a^2+c^2)/u^2 * u^2 = a^2+c^2 * u = hypot(a,c) * * Similarily the max/min of v is at: * * v = hypot(b,d) * */ static void compute_extents (pixman_f_transform_t *trans, double *sx, double *sy) { *sx = hypot (trans->m[0][0], trans->m[0][1]) / trans->m[2][2]; *sy = hypot (trans->m[1][0], trans->m[1][1]) / trans->m[2][2]; } typedef struct { char name [20]; int value; } named_int_t; static const named_int_t filters[] = { { "Box", PIXMAN_KERNEL_BOX }, { "Impulse", PIXMAN_KERNEL_IMPULSE }, { "Linear", PIXMAN_KERNEL_LINEAR }, { "Cubic", PIXMAN_KERNEL_CUBIC }, { "Lanczos2", PIXMAN_KERNEL_LANCZOS2 }, { "Lanczos3", PIXMAN_KERNEL_LANCZOS3 }, { "Lanczos3 Stretched", PIXMAN_KERNEL_LANCZOS3_STRETCHED }, { "Gaussian", PIXMAN_KERNEL_GAUSSIAN }, }; static const named_int_t repeats[] = { { "None", PIXMAN_REPEAT_NONE }, { "Normal", PIXMAN_REPEAT_NORMAL }, { "Reflect", PIXMAN_REPEAT_REFLECT }, { "Pad", PIXMAN_REPEAT_PAD }, }; static int get_value (app_t *app, const named_int_t table[], const char *box_name) { GtkComboBox *box = GTK_COMBO_BOX (get_widget (app, box_name)); return table[gtk_combo_box_get_active (box)].value; } static void copy_to_counterpart (app_t *app, GObject *object) { static const char *xy_map[] = { "reconstruct_x_combo_box", "reconstruct_y_combo_box", "sample_x_combo_box", "sample_y_combo_box", "scale_x_adjustment", "scale_y_adjustment", }; GObject *counterpart = NULL; int i; for (i = 0; i < G_N_ELEMENTS (xy_map); i += 2) { GObject *x = gtk_builder_get_object (app->builder, xy_map[i]); GObject *y = gtk_builder_get_object (app->builder, xy_map[i + 1]); if (object == x) counterpart = y; if (object == y) counterpart = x; } if (!counterpart) return; if (GTK_IS_COMBO_BOX (counterpart)) { gtk_combo_box_set_active ( GTK_COMBO_BOX (counterpart), gtk_combo_box_get_active ( GTK_COMBO_BOX (object))); } else if (GTK_IS_ADJUSTMENT (counterpart)) { gtk_adjustment_set_value ( GTK_ADJUSTMENT (counterpart), gtk_adjustment_get_value ( GTK_ADJUSTMENT (object))); } } static double to_scale (double v) { return pow (1.15, v); } static void rescale (GtkWidget *may_be_null, app_t *app) { pixman_f_transform_t ftransform; pixman_transform_t transform; double new_width, new_height; double fscale_x, fscale_y; double rotation; pixman_fixed_t *params; int n_params; double sx, sy; pixman_f_transform_init_identity (&ftransform); if (may_be_null && gtk_toggle_button_get_active ( GTK_TOGGLE_BUTTON (get_widget (app, "lock_checkbutton")))) { copy_to_counterpart (app, G_OBJECT (may_be_null)); } fscale_x = gtk_adjustment_get_value (app->scale_x_adjustment); fscale_y = gtk_adjustment_get_value (app->scale_y_adjustment); rotation = gtk_adjustment_get_value (app->rotate_adjustment); fscale_x = to_scale (fscale_x); fscale_y = to_scale (fscale_y); new_width = pixman_image_get_width (app->original) * fscale_x; new_height = pixman_image_get_height (app->original) * fscale_y; pixman_f_transform_scale (&ftransform, NULL, fscale_x, fscale_y); pixman_f_transform_translate (&ftransform, NULL, - new_width / 2.0, - new_height / 2.0); rotation = (rotation / 360.0) * 2 * M_PI; pixman_f_transform_rotate (&ftransform, NULL, cos (rotation), sin (rotation)); pixman_f_transform_translate (&ftransform, NULL, new_width / 2.0, new_height / 2.0); pixman_f_transform_invert (&ftransform, &ftransform); compute_extents (&ftransform, &sx, &sy); pixman_transform_from_pixman_f_transform (&transform, &ftransform); pixman_image_set_transform (app->original, &transform); params = pixman_filter_create_separable_convolution ( &n_params, sx * 65536.0 + 0.5, sy * 65536.0 + 0.5, get_value (app, filters, "reconstruct_x_combo_box"), get_value (app, filters, "reconstruct_y_combo_box"), get_value (app, filters, "sample_x_combo_box"), get_value (app, filters, "sample_y_combo_box"), gtk_adjustment_get_value (app->subsample_adjustment), gtk_adjustment_get_value (app->subsample_adjustment)); pixman_image_set_filter (app->original, PIXMAN_FILTER_SEPARABLE_CONVOLUTION, params, n_params); pixman_image_set_repeat ( app->original, get_value (app, repeats, "repeat_combo_box")); free (params); app->scaled_width = ceil (new_width); app->scaled_height = ceil (new_height); gtk_widget_set_size_request ( get_widget (app, "drawing_area"), new_width + 0.5, new_height + 0.5); gtk_widget_queue_draw ( get_widget (app, "drawing_area")); } static gboolean on_draw (GtkWidget *widget, cairo_t *cr, gpointer user_data) { app_t *app = user_data; GdkRectangle area; cairo_surface_t *surface; pixman_image_t *tmp; uint32_t *pixels; gdk_cairo_get_clip_rectangle(cr, &area); pixels = calloc (1, area.width * area.height * 4); tmp = pixman_image_create_bits ( PIXMAN_a8r8g8b8, area.width, area.height, pixels, area.width * 4); if (area.x < app->scaled_width && area.y < app->scaled_height) { pixman_image_composite ( PIXMAN_OP_SRC, app->original, NULL, tmp, area.x, area.y, 0, 0, 0, 0, app->scaled_width - area.x, app->scaled_height - area.y); } surface = cairo_image_surface_create_for_data ( (uint8_t *)pixels, CAIRO_FORMAT_ARGB32, area.width, area.height, area.width * 4); cairo_set_source_surface (cr, surface, area.x, area.y); cairo_paint (cr); cairo_surface_destroy (surface); free (pixels); pixman_image_unref (tmp); return TRUE; } static void set_up_combo_box (app_t *app, const char *box_name, int n_entries, const named_int_t table[]) { GtkWidget *widget = get_widget (app, box_name); GtkListStore *model; GtkCellRenderer *cell; int i; model = gtk_list_store_new (1, G_TYPE_STRING); cell = gtk_cell_renderer_text_new (); gtk_cell_layout_pack_start (GTK_CELL_LAYOUT (widget), cell, TRUE); gtk_cell_layout_set_attributes (GTK_CELL_LAYOUT (widget), cell, "text", 0, NULL); gtk_combo_box_set_model (GTK_COMBO_BOX (widget), GTK_TREE_MODEL (model)); for (i = 0; i < n_entries; ++i) { const named_int_t *info = &(table[i]); GtkTreeIter iter; gtk_list_store_append (model, &iter); gtk_list_store_set (model, &iter, 0, info->name, -1); } gtk_combo_box_set_active (GTK_COMBO_BOX (widget), 0); g_signal_connect (widget, "changed", G_CALLBACK (rescale), app); } static void set_up_filter_box (app_t *app, const char *box_name) { set_up_combo_box (app, box_name, G_N_ELEMENTS (filters), filters); } static char * format_value (GtkWidget *widget, double value) { return g_strdup_printf ("%.4f", to_scale (value)); } static app_t * app_new (pixman_image_t *original) { GtkWidget *widget; app_t *app = g_malloc (sizeof *app); GError *err = NULL; app->builder = gtk_builder_new (); app->original = original; if (!gtk_builder_add_from_file (app->builder, "scale.ui", &err)) g_error ("Could not read file scale.ui: %s", err->message); app->scale_x_adjustment = GTK_ADJUSTMENT (gtk_builder_get_object (app->builder, "scale_x_adjustment")); app->scale_y_adjustment = GTK_ADJUSTMENT (gtk_builder_get_object (app->builder, "scale_y_adjustment")); app->rotate_adjustment = GTK_ADJUSTMENT (gtk_builder_get_object (app->builder, "rotate_adjustment")); app->subsample_adjustment = GTK_ADJUSTMENT (gtk_builder_get_object (app->builder, "subsample_adjustment")); g_signal_connect (app->scale_x_adjustment, "value_changed", G_CALLBACK (rescale), app); g_signal_connect (app->scale_y_adjustment, "value_changed", G_CALLBACK (rescale), app); g_signal_connect (app->rotate_adjustment, "value_changed", G_CALLBACK (rescale), app); g_signal_connect (app->subsample_adjustment, "value_changed", G_CALLBACK (rescale), app); widget = get_widget (app, "scale_x_scale"); gtk_scale_add_mark (GTK_SCALE (widget), 0.0, GTK_POS_LEFT, NULL); g_signal_connect (widget, "format_value", G_CALLBACK (format_value), app); widget = get_widget (app, "scale_y_scale"); gtk_scale_add_mark (GTK_SCALE (widget), 0.0, GTK_POS_LEFT, NULL); g_signal_connect (widget, "format_value", G_CALLBACK (format_value), app); widget = get_widget (app, "rotate_scale"); gtk_scale_add_mark (GTK_SCALE (widget), 0.0, GTK_POS_LEFT, NULL); widget = get_widget (app, "drawing_area"); g_signal_connect (widget, "draw", G_CALLBACK (on_draw), app); set_up_filter_box (app, "reconstruct_x_combo_box"); set_up_filter_box (app, "reconstruct_y_combo_box"); set_up_filter_box (app, "sample_x_combo_box"); set_up_filter_box (app, "sample_y_combo_box"); set_up_combo_box ( app, "repeat_combo_box", G_N_ELEMENTS (repeats), repeats); g_signal_connect ( gtk_builder_get_object (app->builder, "lock_checkbutton"), "toggled", G_CALLBACK (rescale), app); rescale (NULL, app); return app; } int main (int argc, char **argv) { GtkWidget *window; pixman_image_t *image; app_t *app; gtk_init (&argc, &argv); if (argc < 2) { printf ("%s \n", argv[0]); return -1; } if (!(image = pixman_image_from_file (argv[1], PIXMAN_a8r8g8b8))) { printf ("Could not load image \"%s\"\n", argv[1]); return -1; } app = app_new (image); window = get_widget (app, "main"); g_signal_connect (window, "delete_event", G_CALLBACK (gtk_main_quit), NULL); gtk_window_set_default_size (GTK_WINDOW (window), 1024, 768); gtk_widget_show_all (window); gtk_main (); return 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/demos/scale.ui0000664000175000017500000003607014712446423015661 0ustar00mattst88mattst88 -180 190 1 10 10 -32 42 1 10 10 -32 42 1 10 10 0 12 1 1 0 4 True 12 True True in True True 0 True 12 True True True 6 True <b>Scale X</b> True False 0 True True scale_x_adjustment 32 right 1 False 0 True 6 True <b>Scale Y</b> True False 0 True True scale_y_adjustment 32 right 1 False 1 True 6 True <b>Rotate</b> True False 0 True True rotate_adjustment 180 right 1 False 2 6 0 True Lock X and Y Dimensions 0.0 True False False 6 1 True 8 6 True 1 <b>Reconstruct X:</b> True True 1 <b>Reconstruct Y:</b> True 1 True 1 <b>Sample X:</b> True 2 True 1 <b>Sample Y:</b> True 3 True 1 <b>Repeat:</b> True 4 True 1 <b>Subsample:</b> True 5 True 1 True 1 1 True 1 2 True 1 3 True 1 4 True subsample_adjustment 4 1 5 False 6 1 False 0 False 1 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/demos/screen-test.c0000664000175000017500000000256714712446423016637 0ustar00mattst88mattst88#include #include #include "pixman.h" #include "gtk-utils.h" int main (int argc, char **argv) { #define WIDTH 40 #define HEIGHT 40 uint32_t *src1 = malloc (WIDTH * HEIGHT * 4); uint32_t *src2 = malloc (WIDTH * HEIGHT * 4); uint32_t *src3 = malloc (WIDTH * HEIGHT * 4); uint32_t *dest = malloc (3 * WIDTH * 2 * HEIGHT * 4); pixman_image_t *simg1, *simg2, *simg3, *dimg; int i; for (i = 0; i < WIDTH * HEIGHT; ++i) { src1[i] = 0x7ff00000; src2[i] = 0x7f00ff00; src3[i] = 0x7f0000ff; } for (i = 0; i < 3 * WIDTH * 2 * HEIGHT; ++i) { dest[i] = 0x0; } simg1 = pixman_image_create_bits (PIXMAN_a8r8g8b8, WIDTH, HEIGHT, src1, WIDTH * 4); simg2 = pixman_image_create_bits (PIXMAN_a8r8g8b8, WIDTH, HEIGHT, src2, WIDTH * 4); simg3 = pixman_image_create_bits (PIXMAN_a8r8g8b8, WIDTH, HEIGHT, src3, WIDTH * 4); dimg = pixman_image_create_bits (PIXMAN_a8r8g8b8, 3 * WIDTH, 2 * HEIGHT, dest, 3 * WIDTH * 4); pixman_image_composite (PIXMAN_OP_SCREEN, simg1, NULL, dimg, 0, 0, 0, 0, WIDTH, HEIGHT / 4, WIDTH, HEIGHT); pixman_image_composite (PIXMAN_OP_SCREEN, simg2, NULL, dimg, 0, 0, 0, 0, (WIDTH/2), HEIGHT / 4 + HEIGHT / 2, WIDTH, HEIGHT); pixman_image_composite (PIXMAN_OP_SCREEN, simg3, NULL, dimg, 0, 0, 0, 0, (4 * WIDTH) / 3, HEIGHT, WIDTH, HEIGHT); show_image (dimg); return 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/demos/srgb-test.c0000664000175000017500000000357014712446423016310 0ustar00mattst88mattst88#include #include "pixman.h" #include "gtk-utils.h" static uint32_t linear_argb_to_premult_argb (float a, float r, float g, float b) { r *= a; g *= a; b *= a; return (uint32_t) (a * 255.0f + 0.5f) << 24 | (uint32_t) (r * 255.0f + 0.5f) << 16 | (uint32_t) (g * 255.0f + 0.5f) << 8 | (uint32_t) (b * 255.0f + 0.5f) << 0; } static float lin2srgb (float linear) { if (linear < 0.0031308f) return linear * 12.92f; else return 1.055f * powf (linear, 1.0f/2.4f) - 0.055f; } static uint32_t linear_argb_to_premult_srgb_argb (float a, float r, float g, float b) { r = lin2srgb (r * a); g = lin2srgb (g * a); b = lin2srgb (b * a); return (uint32_t) (a * 255.0f + 0.5f) << 24 | (uint32_t) (r * 255.0f + 0.5f) << 16 | (uint32_t) (g * 255.0f + 0.5f) << 8 | (uint32_t) (b * 255.0f + 0.5f) << 0; } int main (int argc, char **argv) { #define WIDTH 400 #define HEIGHT 200 int y, x, p; float alpha; uint32_t *dest = malloc (WIDTH * HEIGHT * 4); uint32_t *src1 = malloc (WIDTH * HEIGHT * 4); pixman_image_t *dest_img, *src1_img; dest_img = pixman_image_create_bits (PIXMAN_a8r8g8b8_sRGB, WIDTH, HEIGHT, dest, WIDTH * 4); src1_img = pixman_image_create_bits (PIXMAN_a8r8g8b8, WIDTH, HEIGHT, src1, WIDTH * 4); for (y = 0; y < HEIGHT; y ++) { p = WIDTH * y; for (x = 0; x < WIDTH; x ++) { alpha = (float) x / WIDTH; src1[p + x] = linear_argb_to_premult_argb (alpha, 1, 0, 1); dest[p + x] = linear_argb_to_premult_srgb_argb (1-alpha, 0, 1, 0); } } pixman_image_composite (PIXMAN_OP_ADD, src1_img, NULL, dest_img, 0, 0, 0, 0, 0, 0, WIDTH, HEIGHT); pixman_image_unref (src1_img); free (src1); show_image (dest_img); pixman_image_unref (dest_img); free (dest); return 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/demos/srgb-trap-test.c0000664000175000017500000000554614712446423017261 0ustar00mattst88mattst88#include #include "pixman.h" #include "gtk-utils.h" #define F(x) \ pixman_double_to_fixed (x) #define WIDTH 600 #define HEIGHT 300 static uint16_t convert_to_srgb (uint16_t in) { double d = in * (1/65535.0); double a = 0.055; if (d < 0.0031308) d = 12.92 * d; else d = (1 + a) * pow (d, 1 / 2.4) - a; return (d * 65535.0) + 0.5; } static void convert_color (pixman_color_t *dest_srgb, pixman_color_t *linear) { dest_srgb->alpha = convert_to_srgb (linear->alpha); dest_srgb->red = convert_to_srgb (linear->red); dest_srgb->green = convert_to_srgb (linear->green); dest_srgb->blue = convert_to_srgb (linear->blue); } int main (int argc, char **argv) { static const pixman_trapezoid_t traps[] = { { F(10.10), F(280.0), { { F(20.0), F(10.10) }, { F(5.3), F(280.0) } }, { { F(20.3), F(10.10) }, { F(5.6), F(280.0) } } }, { F(10.10), F(280.0), { { F(40.0), F(10.10) }, { F(15.3), F(280.0) } }, { { F(41.0), F(10.10) }, { F(16.3), F(280.0) } } }, { F(10.10), F(280.0), { { F(120.0), F(10.10) }, { F(5.3), F(280.0) } }, { { F(128.3), F(10.10) }, { F(6.6), F(280.0) } } }, { F(10.10), F(280.0), { { F(60.0), F(10.10) }, { F(25.3), F(280.0) } }, { { F(61.0), F(10.10) }, { F(26.3), F(280.0) } } }, { F(10.10), F(280.0), { { F(90.0), F(10.10) }, { F(55.3), F(280.0) } }, { { F(93.0), F(10.10) }, { F(58.3), F(280.0) } } }, { F(130.10), F(150.0), { { F(100.0), F(130.10) }, { F(250.3), F(150.0) } }, { { F(110.0), F(130.10) }, { F(260.3), F(150.0) } } }, { F(170.10), F(240.0), { { F(100.0), F(170.10) }, { F(120.3), F(240.0) } }, { { F(250.0), F(170.10) }, { F(250.3), F(240.0) } } }, }; pixman_image_t *src, *dest_srgb, *dest_linear; pixman_color_t bg = { 0x0000, 0x0000, 0x0000, 0xffff }; pixman_color_t fg = { 0xffff, 0xffff, 0xffff, 0xffff }; pixman_color_t fg_srgb; uint32_t *d; d = malloc (WIDTH * HEIGHT * 4); dest_srgb = pixman_image_create_bits ( PIXMAN_a8r8g8b8_sRGB, WIDTH, HEIGHT, d, WIDTH * 4); dest_linear = pixman_image_create_bits ( PIXMAN_a8r8g8b8, WIDTH, HEIGHT, d, WIDTH * 4); src = pixman_image_create_solid_fill (&bg); pixman_image_composite32 (PIXMAN_OP_SRC, src, NULL, dest_srgb, 0, 0, 0, 0, 0, 0, WIDTH, HEIGHT); src = pixman_image_create_solid_fill (&fg); pixman_composite_trapezoids (PIXMAN_OP_OVER, src, dest_srgb, PIXMAN_a8, 0, 0, 10, 10, G_N_ELEMENTS (traps), traps); convert_color (&fg_srgb, &fg); src = pixman_image_create_solid_fill (&fg_srgb); pixman_composite_trapezoids (PIXMAN_OP_OVER, src, dest_linear, PIXMAN_a8, 0, 0, 310, 10, G_N_ELEMENTS (traps), traps); show_image (dest_linear); pixman_image_unref(dest_linear); free(d); return 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/demos/trap-test.c0000664000175000017500000000255214712446423016320 0ustar00mattst88mattst88#include #include #include #include "pixman.h" #include "gtk-utils.h" int main (int argc, char **argv) { #define WIDTH 200 #define HEIGHT 200 pixman_image_t *src_img; pixman_image_t *mask_img; pixman_image_t *dest_img; pixman_trap_t trap; pixman_color_t white = { 0x0000, 0xffff, 0x0000, 0xffff }; uint32_t *bits = malloc (WIDTH * HEIGHT * 4); uint32_t *mbits = malloc (WIDTH * HEIGHT); memset (mbits, 0, WIDTH * HEIGHT); memset (bits, 0xff, WIDTH * HEIGHT * 4); trap.top.l = pixman_int_to_fixed (50) + 0x8000; trap.top.r = pixman_int_to_fixed (150) + 0x8000; trap.top.y = pixman_int_to_fixed (30); trap.bot.l = pixman_int_to_fixed (50) + 0x8000; trap.bot.r = pixman_int_to_fixed (150) + 0x8000; trap.bot.y = pixman_int_to_fixed (150); mask_img = pixman_image_create_bits (PIXMAN_a8, WIDTH, HEIGHT, mbits, WIDTH); src_img = pixman_image_create_solid_fill (&white); dest_img = pixman_image_create_bits (PIXMAN_a8r8g8b8, WIDTH, HEIGHT, bits, WIDTH * 4); pixman_add_traps (mask_img, 0, 0, 1, &trap); pixman_image_composite (PIXMAN_OP_OVER, src_img, mask_img, dest_img, 0, 0, 0, 0, 0, 0, WIDTH, HEIGHT); show_image (dest_img); pixman_image_unref (src_img); pixman_image_unref (dest_img); free (bits); return 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/demos/tri-test.c0000664000175000017500000000233214712446423016144 0ustar00mattst88mattst88#include #include #include #include "utils.h" #include "gtk-utils.h" int main (int argc, char **argv) { #define WIDTH 200 #define HEIGHT 200 #define POINT(x,y) \ { pixman_double_to_fixed ((x)), pixman_double_to_fixed ((y)) } pixman_image_t *src_img, *dest_img; pixman_triangle_t tris[4] = { { POINT (100, 100), POINT (10, 50), POINT (110, 10) }, { POINT (100, 100), POINT (150, 10), POINT (200, 50) }, { POINT (100, 100), POINT (10, 170), POINT (90, 175) }, { POINT (100, 100), POINT (170, 150), POINT (120, 190) }, }; pixman_color_t color = { 0x4444, 0x4444, 0xffff, 0xffff }; uint32_t *bits = malloc (WIDTH * HEIGHT * 4); int i; for (i = 0; i < WIDTH * HEIGHT; ++i) bits[i] = (i / HEIGHT) * 0x01010000; src_img = pixman_image_create_solid_fill (&color); dest_img = pixman_image_create_bits (PIXMAN_a8r8g8b8, WIDTH, HEIGHT, bits, WIDTH * 4); pixman_composite_triangles (PIXMAN_OP_ATOP_REVERSE, src_img, dest_img, PIXMAN_a8, 200, 200, -5, 5, ARRAY_LENGTH (tris), tris); show_image (dest_img); pixman_image_unref (src_img); pixman_image_unref (dest_img); free (bits); return 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/demos/zone_plate.png0000664000175000017500000067657414712446423017124 0ustar00mattst88mattst88‰PNG  IHDRÑ‹&bKGD˙‡Ìż}5IDATx^Tŭu@T]Ĝö ï=MwwJ7* Šb  ŠŠĥb`b£X`£(ĥ( ( „ ÒŬŬ50={ïïœë~žû{Ŝùç …XkŻuĈqüN$ݜ|T%w·âžĦ˙ûC½Àkş…Ĥé.â_A_OZYŽgPaÔSÎġÚöEr‹ĤrdOĉ+ÎëŻ1ÔĞġKÁ£Ñ-Ĵ_şš9ĵƒhD£3ŭ–­£ôœxŞúœÈ“ÁŻXˆnêfž’IVú" ~?Ü·˘l5ÙŝìĝmòŠxëa%ÚÊo‚Ħx ÄĝbQ{T1>U‹?Y*NµŸüރÈ-ğ×$ä”_]˘€È-<;B°Ş‡XI"’A7òzy³écìĉùêt„˘`êµóÒËÂĉQ.N˜M%_^Üıphû:_÷yÖ&úÚê*J òRPRQ×Ö7ħžçîğnûĦ w^|)i` œ;Ú\ĝòÒN/S BWŸż9öc“àġĉŬ²€·³ y\Í"FrO/”C–\-ç›î-“CtB?NâWçSĊ—>ĈkO#zGk‰Ž‹ĈˆEüàÛJšÒáVüÇ ²ö•ñÙû†äĠeDü"à×ħñ­ĤèHœbvoBVoHëeÏUU_ϕt²ħktçĈ‰ ô /GS÷k §ˆyVèÖôÏS̟\,“3µHhĜ^ğħ^yê5Œ8DÉ·´š|^á/ÒJGoħlĉr/P˙ü£ŸáZıĦ*£÷ÉċyH9ùÁ°ânÌف%јCşÍ4uDS šċƒˆpFağÚR~£BßTA”m“Ŭ[KT푧-Ëà0_zReÖċ°²7(ĴÏĠb“ÁjˆúĈ7ƒÄpĈNc2Ŭ&"kŸ­¸³VŸJÒXzúSŸ(O9°H›Ž0´„\ùP5Ì%„ӝ%é·£6.ĥ֖c”".§ĤoîèĥtU`pè}á"#„ïÛ¸jİ›£ıšœ8EH 9mëĊ£n§—tN îpĠ‡+! ´]{с”ò œ×ġéôR U흊Y|0+†N6Ŝ™1L Ù¨Ž¨gLbµçĴI ²YœŻëe¨ž/™œŒe4ù=UDí^YħMeDĊ&şÊ™av˘1Ù˙ŝi.ê[†ż3§lï9+İŭ/t$‡MôGüş&v ËğF7"[']è7×rĉ˙ĤƒÓĜKI—ŜkÜÙjĥ½FEb‰ƒè}a0= žÔĤj%oî}$žżT­½NÉ7P£nĤÌÛ¤œF‰(ƒ3ĥ[qĝı!?RĜ'œë$ —éÈDžŒéĴÀ÷IV˙;€*4•H„Ô/f–#_[ħ+È|ċ)/ŝ–³–ŝ‚BşIœEö Òçwv9K<Ċc)Ë;ÜiWħLuLü”éOaœ¤ñŸÇóBìż˙!äÍN ´Ud$ĊÜäeĞ-CEH’šk'|ožÂéĤOħíäIdµ…‘/ëYÄäßہ˘÷zÔÀ4>SAW$ĥ‹Ž)-D}ۗŝŸÄX Ŝoƒjk"j#”İË> .h‘}óĈ] '9 )ê…ŻÍÈÁ­ĵğjÒfğ6Ħ J„÷ĠRñ/Ĉ’qŸĤRwñL ġLì*ͽcp%*áÜĠ9_:]°9KÜ$…_×rŜÒyO)ğpX$l+ëˆÙj*QÒ¤ˆû[-ı_Ħ3öÉì :ÍîSz@Fî+„ `ğzß oúU6›;‘/ò=ô†2Ég°5òĠµŠËx÷‘KĝVzÖ´›ryŻ…^C›‰~}—ÚŸq/ñׂp$ {%ċÔÑëAżK<–°ŻÚŠl›lZH9Á-ŸK=É]„†ì"Ï-Ä_ê‹E×Jàvî kße —Üß.ÌrCÍĤg’í³[#ü/~ İ ?İ4)˙Lŝï=ĝ•~šĈn,” Ïٗ=F gí·˘“tÖŜû7KŒ]ó7¤£rĦ÷ŠĝtCfìf'M1„,k¸0ĝÔ£/•½˘7=\šó.)îìĦ]›ü}=Ŭœl­-áemëàäĉéëżi×Ħ³qIïrJ›Ĥy˘K£·òË£SÁ eɈ˜ĤÓĉĜ̆i\0Xt/ÔAú_+#f˙Ŭ[ĞC˘[íÏ&Ĉ²÷Í!K,ĵрM‚íŞñ›Ïüà/EóHüĜ ĊûÂıe†Ĝ%ÏL'˜ĦnYÂöŭ’ò'†Xwµé;:ı Zâ‡GÇ£Äô_â…sÉğF†CÑEµì“ÔıċÜ”…M“ې­Sµö‰ğtŜ'İWX.x-î5ŝGÍĤĞ^ߤ­AÏ˘·\Ùm:‹ż„Üç-SĴ­–_ƒ!géy/’żu(n"6¨ô2÷*ìĈ8†ï#£ŠûönĜvÁ§HV‡Ü|•ĉP’0cc;óyÈr6ĝNÛK"§1]Ġ뛴Fòe–Î|’X>ûYz1ĵ·ek§ƒ|6wrK“ħ­[M:%ĝޞ…Ŭ³/ç£,¨ĉœ˘Ù`•”KŬрfî-™³S#Q’*WĤÇÎ(J†w ŜÍC^pGè‘ÜÓı÷m£˜.ü_„:j{½‹hılC’[ûno¸êÌ ÚGsġw|•yÏ˘ÄhŜ9/UÍ`Ml^żŭûxż‡6ĠNÁ^˙íeá8{¨.÷ilx ğĥĵŽw޸Œ²Ĥİ5Ĵş‡ç/Ż%ž°ĴM 4•eÄİpMĊäµ-ŬcŸĉÖ ħqœĠû÷ġ…`' ¸Z´=ö?ŝ;Š úób×HŞ^çòF ĉŸËžòˆ’ïz§8ڞÊpڀOĵ[+G²ıÜBt]·EĠ#ŝá]1FˆÍŭqnş;IïÊ÷…23\RñÌĜôɨ‘ݳ2*·¸Í¨{݁²ÒCĴÀ†vŠS½€rŒSn/vËR×ú*8EZ=Vo+“†Gvp³ċ:[-á)\,ŭyvıħ™2ù#ZMġêÌ4ò!b/íû€3ë!òhĈÖf&IÒ\…ïëÈBž*mÇÜìûG•ŬB7B^è/âǐ>Œϛ}‚Ä ŭeŝvè9Œ $@p 9E\D˘Û¤­ĵR Êßı;‘ƒüÏŞż8û‘ ÍÔó‚|SİÛXމÄ~ı i×ÈÈŠm‘iAÙŜş,u|„yEU|oğ Íqŝ è9L (Äۏݓħf^/ĤÊmùɟzí+N÷Hì'Úoı1hóbŞì?§“ŬïpfqĴ·**f’T=K0ĞR¨À`“Q7á³]żžÛâ1G‘Ž˘t9]Û%AûÏŬ}‘]TÙÒ3<9Ëá 0üü9³“=-•EÙ/îžÛ´ÄVWNôuŠs<ĥœ{öĞkÇ&ê2b‚láPY–RĊ$fĞ“B,ÄPUïĜb&>~· …áxê[P3ĈpğĠNô'zĊ}_Oñn‘£.~=J[DV?֎”÷>8#öi‚ö½âŞW˜#Ç”Ż³zĥS,2‰[ʞ‘‘]$—rŝ “ìĥ”ià<Ġµy|²ŸóË@ġ3˙ ²“û]Ù˘aÀUêo+é6…\$N!— H‰}wëĝ+/ŒEžÌÎ3ù@Šá/Òy<š4òÄ"¤šż˘I*!¸›ƒp· ŠÍ]€…¨Œ$˘ıġâG.–Ĵ8Rv·Ê<’^ÔŞĵBx}ÁœŻ×ŭ[*@p9G„“’ùAĝ3Ŝ‚) ÖG°†×H­ıR6?Ù6Sj*˙Џ#n_Ɋ@}: †YÖÊêĴñžĈ²Y/cNŜż+tËĤ AA6m Ŭµ˙ݘĝÄY?Ê{ĈYpa§ûjrŸ^Úík£&†’¤ ïġ­u–à÷Ŝ u”Giş+br‡pncjˆ)Ĵż1 c˙½¸@œl‘7+Ĵ`O‘\– I@´ ˘}Ŝw 18׉o–”ÜZFÔl—” o>ş"éÂĉm4ŬÜÎíT—XÙÄ·†[Qċ!öˑrˆÙş ˘è4‚Ui/~‡x%oZ:³ ‰äg+™×Žx1R°#&P‚yoŝñ“IáÄ9äŽ @êw·Ŝ|ĉ ô˘p…rk=_£ÒMŠcYşŽŠ×碉#*!Ĝ‚ıX(‚;;aÛUF’~Ö0˘yŽÜ ”‚F™b‹Lm™d(FÏëĠŸÏ|M>BD_²½ċ~ ÙiĠtšéĠu[Ğ˙ġ|Ïہžї 5ÙÉĵÂ/Qĵûz–’£ùùĈ²‰tMT"S_:NPş€:0qˆ>ç^ä†ĝV`o-IUĝ''Äċ#Öĵ[Rö@–ğ‚˘zĴ /Ù*ÍËĉN=_L•Y—Íĉ|ߪ‚Î9ZÊgċ…˘²>÷›1Öï3.’¨ĈšğUl|àó w%”nä%·O€W<=ìm(R,Wz½aŒG`³ŭ59ÏŻ öž;G]V ‚|„DĦ‹KÉ)ިihjiij¨İ(ÊI‰Ó)$Ò1Yġ9s½ƒ]{žSÓ?‹ĵħ†ï­´T  †Ŝ‡ŸVŒ‚Ü+ŝFtTÉŭÄçœ]uw*érĉ7 kï#‹†ċħĝGç *[żsĜÙëd¨‹ŸOq³ŭÒ[KĥcŞ”ıXÛYÉŬÍĜGÄé^@²|‹Uĝ"nEĝğ9ôCĦ¤‚8iŭL"UI3x,kœÏ&/íéóĤ\Â_ÉĜ5 -Ħ?ÂÏĦ;xï%=F˙Ş[w×é™uÖhÙ ŭ’ófż$GGÈŻ™óġ{óèaD¨dY­Ì"DĤħ€rëàȋfÔü$=Qَ99ˆ;œJ‰¤ü*ú9Ž³à$½ôŸxî§Ú•K=†Ż•Ğİ–÷^CîñWÉüé55í)SrÈ•^2#>VĴfĠÖaĞ”ŜŒ^ÀŸˆğvġ.˘Ċ/äL‹9‡HŬ# Ħ duçT$ĊOĠ’ÈêÜL²Í& Ŭ‘E…ÄÏĊˆĴÂá£=‚Ì…¨á^Ĉ²êÁ:˘é„.by“h:oŽŞlûÎâŭ94•[‘ÔMŒÚcLs8š3NŒ˙<ğH‘°Ŝñ¤f–˜İyéĦAELV{ZÒÏ%ĝcġ_]żÀHŽ"T)5“ùË6î?÷8ísŝßêĤŽžáÑħ xôt4U˙Í˙œö8îôŝËĉ›¨IQ”`´`ŭÑ_ëÇĝ·żäéħ•&R(U#òyÍ 1[ód‡µ˘¸èìOĝ 9GÄÈĈ{>ŬI+ä9‡ŝXß·İ ĉ盈Î+–ˆî‰&˘î *yIoä†!ş0Ss’Ÿìƒ#²ĝ'Q¸q/$²mI›;YµRñ{JäTçj$h`(Ù0ÒíK:Ä)6•{AÄÑġvıŠ?Á/ ›Ù9JĥmVjĊcî29ÓK¤s'\•ÊzLM{˙ÈĴâßC ŭäĞkäÖâǨı]Ş~x„ĝżRúI³ ç½*Ÿ”8Ĵ·ż;bĊWLBVÑ/°-Ŭ„‡ĊĞ˙N =ġ†ßĦ×x‹´{~÷@&˜9aŻßU*ż”ġ†ŠŬEàqh8ö€ĵ‰›)í:PoĤúƒ½=‰½“·¨œFöħK-iŞéDŠ’Ĉ"]Wú:żr1ş#£z×D²~˙[M²x!èĜ'Ħpĵ˙fbŭx†ùÈ1‹äĴ_“Í™I_!A÷„zJŭE[²”Ï£nb8m“&"ïsğŽÏŻğïŻIbXî|ŜwÀĜĠ† DÚ2èʗ6Á(y~*Qo M;ß]’>—µ²„!äLt7U˙ĝüáġ³äĄ{wïŜKHL~öúçĊUMŬ#ÓÑßbĥ—}Nş°Ë×NS.uÇÀSÏKxĞí˕ Ki„a¸:öÜMÏwZ2Hšŝ÷EŸçĥ<˘ı)m˜è~ä#Eĥ½X,OşÄŠôNöqñµ9‚áx3Äösĉħ52ï żï¸‚ÄÁ Òêĝ{kRP3ïžŞL Ğ}ş¸’]Z7x£Ħ”B¤k¨Ĥ,KÙûàéJ ùwĜIt+û‡ŞYŭ€Ğt&wùŽĈáğX(ġ kİ|i—ŭD&ä€{è?z´ñĦï†ġ<…'iŞĊ Ŭ,ÙèU?ѤA…p’é}ŠĉÖÀú›{`’ ?)1<'³İdˆíÌ'ÒHç°@éÒv-çétên<*ûH„!”ÇÂŬ¤8â4r{&ĥd¸ĈLċ+/ Ù)ÔWŭˆ]¤z´mB‚GğV Ħc½ÈêvĉQşùg"Ӕ>ŜK{dî+îÈUtÁkötòó(óOó[t4˙|4_K Ħ(ÙĊd5Íœög—ëRI*‹O}À95uHböQı“DïóujˆjPÚ(ŜvsMÌ ĥnġ1ÄèL ÑxL —<Í~½5¸:Â}5ħÉ닒•>ÜÏIAxW]òĝÄp8Í4“ĝlN?Êl_öŽ…˘+şFƒ‘McmԋĜGUŭBÎ$Œ÷UĊĴfx‰Ĝ3ìršˆ#í>Ĥ„‘öA ßMMŸvÖj/•ÄΑÒ&Ìí ŸKž2sâĊP~6HF`ĉ°jrѧ½2Q\äŒDÇäk=Û|&ŬòtƒmkυĤ;³Žğf{‰t€0‰ĊBhé³^²ùcÎJżGÈç1—3ž ÷#'ñ'ŒĊƒMv²o°óä•Cmn´+ĜGMĠwÄ#9ŭlÑ?2:yFVáÂäÄyÈğĝŻí§t~ßiuÊòïÂ}Ô1qœ˙-P’áófš_°C 59Q!œŝ´UQßôv˜HÛ˘‹J8ü1wÀ9OeTÌlsBé$ÁiË:ë7GĦİ;oğ–U7&€^헇'‚™ĞIR”&jhğpĊĈ=Qçßò*ŭ—œĵŸùù?ór|LġäŝġsQ{6Xhk¨*ECФšù˘àżÔBAQ0V—um›³: ‘˜w6ЍCL–&l6C•=ÏÁ 0ñ¤“Şğ%m€~ğ >ĉÖOÓŠ&¨ÚŽŝô†dà7ŝx˘#ŞÓ'üœ˘~şŸî„ĜżĉwAx~bò‚‚ì™ÉÑ#â:O„žÈêöy £LâµĤB"ž­/÷ˆx§ŞùğBskZI>½‘µk\Ìx‚ŸDö Ÿ3–3óäŒŝVr˗ġšM§…`ħHĵ0@ş¤]ӝ Èv6MCoħímÙ7Hß[¤°EĉìsŒúŻÈ›‰3½1ÉjĉXz`ûdÚ wf̝y—HŸ{5½1°Ĵ,Ĉ>RĦеXħĴÛdNGĤm…†uw½Ħ~ġàé ŜNäö\Òı£w1í&‘*mY>ğY;Ô³Ŭ3Ġêîëۀĝ4pb¤´ŸáܐĠu‚$]úŜÎ=Úĥ&á‡yÈütA÷Q%úúb˘ñ€"my6üĦ#Şq° ïğċH–ߐ=Ë˙iˆ(­K!zRüUċU ^]B€6‰nş%݆EL”ÜÛb-u½?-à³í?ŻĥƒS1ó:tíÙײÖÁi.F˜€331ÒßŬŜÜXW[SS[×ĜÜŜŬ?21ˆŝ”;=ĜZöġÙµCAf*˘ŻW³[}8ñGû,.){zĜK—ŽH[oıW2A°j’ĥ˜ÒIÚ u̒-Ŝœ…d6ґ9°¤Žòlܰ0™öl4azŽ;4~ĥ*àG¨9}şì$ä2ĥ^²`À̤·DŜs&K,€ûjïĊ½§ UĴÛ:ċ>òö"‡YŠĤċÌÍHğĜL&™xĤ ÷ż+môORRMÁs­İĈ‡öP,??Ñ••Ĝ;+’_9ŝ}Ħ(î>˘ µ’¨Ĝ*)µ­o;ݍ:>mW"9Ŝì&ÚÍHú$÷})~ ˆVë~b2'ÊAĠ\}ğ ħĞàDVZ°?b —¤ö5•%ĦÚNëO$~­`a„pv¨ùï—ç·ÏFlYíéd=GWCY^FJB\ ^âR2òÊşsĴh²$eîµ6˘ûĤ#Ii{‘pä#Ş}² /Ŭ&%ıµ‚¨Ü*p¤[TYĝ/÷#Y½*W˘Î?‰–”=C¨ÖıxŠŞRŝĊHú.ŝYOá‘,cVÌC63ËM³‡½ĵrŽmÖ*…SŜâïĦ2˙† –5)_Òkb6P ığŒ$ħ]uûr¨GĊ֟ pŸ3€f·Ë„an6ĵ£RpôçüCŝžôËE…ÒmÙèICOa½¸Nf+ħKĵ¤Ii”’Ĝj µ*î3ï ŒGŽCŞy*A‡ñû䍜Ï9:ĉÊĵEĥΖ+}À.A0„„L´y‘ÂĤ!—Ŭ<8²ƒäR‚=R“½Ìî&Ù|$ŝ,A|'JVÌSy}ÇÄCk‰ßtĊÈ&˘4DŞĤĵÉ7Šü–<ĉ‡²ClD`i‡dŒŭoĥè‘Ä£Aĝú*-2EgEÌ÷>6RüpÏu*"żhÏÍìz¸ĝ-…ŻEĴu5…¸ʀ4)%­9ĥΞ+Öoß~ÈÑcǎ9g{ÚžÎĥs´”¤hP „œÁÔumĵW…-|¸ê³oîY¤/ŽPĠìyX<‚ úÇĴĦµV]/†jä·(Gq’Ŝ–7ŭÄhFˆĥè¨Ċ›bÈ2˜ĵ[ä)n)“<¨o˄”M‘ŠôÀßDm¨¸Âħ>^Ş9iE ñ}²äñцÜÁ,Ğö+q!íÜ 5”é0’WÛD4 AÀ%샒qÉìV䨝ÌÜxò>s6’ï‡ĦtŠ4Ǒxß͸ĞÔ6¨yÀƒ+\ĦÔT"‹Ĝ*SWLzN>D³Û¤#„ÎŽ‚ıŝ'¤?ġö˜ÊÌĊApX²ċ ĴżÁR,RĴâŸÄ^b£lm…T0~„’9fm:ĝC|„ŭ¤Ç‚M´—ĵ Ôánô*q Ŭ{ѵwĦĜc"Žş¤·g ċ˘³ĥê[âĦŒÑwì d³/5ċn *<À6ö98NgĠĝİ y+U÷îìÈYeħmµDĦU=şG˜³ zf˙ˆĥÓúˆĊ•.˘á´ ˘ħ§€ËÎŬ ]ĥ÷cD÷¸}o×-İ[ŒİdmżëżĈpNóÛ(OM**cıöüûZ(δ˙L:ħÎIWš‚%Ġ-ÜÎ'¤ċ–5ġŒ2ᨇ Ô˙„Bĵ„ ŭ7\ ÌўĤ²Ü´„óaîê’d„"­ë´îDÒÏö(Ö?żÖRjzF½mĉàcżûi“İĈ[R[ĵšÛ£òÒMŒ½‡ö\6·`brşèşbèŸn#ŝAŸsUް'ZêWHÔnS>;2{W—şµYԘÒɉS’ğÂn D=*·ä4_BEĉ.öŬHĉ!ñVUû³"eIOïjñXlaoĞ=œğÛÑKÄUt·0…ş÷’ĥI˜´żÀñƒĤÖc™”#x°TE­ìFbŻÄż ħHlİì€/-’‡.Ĝ•ħÛTäSgîiÉ=)ŜM˜4ĈÂ%Ş˙2 t¤ÜVÈ#vˆvë:Ïĵ†[ċ)…ğJ*wÜEİdhžòŸħ…2ŸÙë(÷ˆ³èvîWe³Ş‰52ĵW2úĊ=„êï÷CwMµxĦğ'şŭ! ˜9Î0É$X“·÷Ì^–—ż8=rLZáÜó†mC%Q´Š˘uqŸħˆ¤u f!²4ŸôYöGIšGÒ0^wŜš$ğêÙ ÑûÄOÑz֍3‹ÎşÉ Š‹Ï˙…ğ8y‡4"fĵúüÇVÁí-LŒ\f*GFŞÖ{b_ä7üw s§ú›Ëò>¤Üıt"bÇĤµ+}  àş`+´|VŬ´#âÄ;)òʚû§ T€K£!˙Eì_kUB–3]™XĜË%X­ÏŻ6C¤ív$Cì1úóübEDĈílï~¤(ú=é%Ÿ­’%YŸŻ‡“4Ù(qžÖ"-Êà^Ô˘Ĵ*"*7´n0ÇÎ)H™(/yĥg;Ùú ‘iÂ8>I€÷ÄnÔĞejê×ßż =Äŭe¤ôû5UfÊ_ıÛѳÄ=Ê:ög™…c”ç •(ıŒçJ­âĤ`İÈŻgœuğ ĊvaŒÂ6e(ì2ŝVK„cŜ“ è÷ñ“\KO|ƒóÂ5]AĴ2b] W˙$Ŭ™6\Ší—ĴŭC;Šùjô|£ÇeĞê}ùw Ĵ°Aüû˜£VC³EO†moµŽiK‡µjá´5ˆ!m˜ù£Ż‘;€lšŞużƒCàš‰?1ĝŒ?VTN|1£cv“ĉç”­ÜûZŒ=]ĵ'&¤UĊD‰?EëÒ÷µ:'n„˙ɗ&ż/Ŝgƒjì/Žżô•¤:ŬhÁGŜĴSF47ż"F²ö˜R¨fğÒş0^cêv+qDvîž'•SŻëûÍŽÊDBÏ}û•ôò~èċp†ò_Ċ öv4R•Ĥ“EĠ>ş„Ĵ˘š–žÑ3s s³9FzZjвB>HĤKĞ9z‰{•ß0́nRyú•íîzEÙqóï]·c+Ċ"ƒÈŸO îf£›}!žĞ+>Ĉ?È<À3!Âïˆ;ÔNmBr4ô˙Ìl Ċ TżéBUëŽSê^[Ê ŭĉ-ÇħïpX_Cî}ëŞdñ”o=ĜQڟZÉŭĜRé;¤ŸĠô ,£UÄ S.2× ß¤9G)ŭCı13Ç —Ĵû• %:ƒ¤‹‚eŞ­%’›3h*ÛSıŞU×n¸@Ĉ{6S̏!îË̕Ÿ×Wo˘ù{z %ž¸C[:Ĝ2_<‘x(aW5³ êY •ìΕÈĤ\˄÷”oò× .ùÄGtE9ž=q˙ŽWo¤İ_ĉpD,N³ž;£:§Û‰ÊpUÒÜ{ƒxe”>˘³˙•³KQ z3D ĵÖFÜ.üžÁ?š'È:EĤµrññżvÌS$‘lƒ.݁ûŸÓ_–~}ß ;-) BS³]ĵv×ñĞ‰Ż³ Jk[şú‡Ç'§™3³³3ÌéÉñáŝ–Ú҂ì׉WïZğĜVOAŒ„P¤´ìVìğž^Öρ8 ĉŭĊ [IqŜŽÇqnkZ¤“,"=ï§A|ĉ÷7)T;ĝġ1ô&HÑٕâŭÚŻƒèGUâƒ÷ĉ’T+‰öÓ:¨ósÖôCKÄñwĝ‚:mc5ŝŬ™Ÿ—Ż@żù.èšFŜME{Â2W6!+;Ħ›²²g(Ù5Se'ñHŸß2¸”v‡ˆ§Ĵ™ŝ­iRß7O>—é+žÁö˜ġ–)ĥÓm­Röd§˘gM’%­ŞËIƒ:K ôĞN2óœ3sƒò§”7İı w›‹Ĝ@Opèİà-†mî.Œ”¨ŭñâ"HoÜuzsiaÄ~êÇq[î9ö+Êì6zżĠÀûäÍ \8Xk˘žÏ F£ħ4YĞêݍH8ğÄ\úñZEó‘Ĵ¨ù–xĞ#LJ ¨ô ê üĤu$› ĵ|jġZĥG\ñäï•#bódvúĦ5bq{„˙Í_\bí.ó­Ż8+uî²´or/1ôzƒ:˘à{ŻAÀнğZ³Ŝŭ²…‡ ĉ^ZOGdĴ7^ûÖÁ&xŭROĜĞ1@Ħċ°rOLÊ׊Ž1ĥ4œİĦĈżs?żL}üàŜÛ·ïÜ{8ġeú§Üß˙ğ†Ĥ8 ²Ç:*ĤÄìYé ‚†š}À‰Ô?ŭ<‚ŬñíÚFk„żêRî ĈkyıÛZ ĠX}·–'h¸çĞ€¨ox=Dô&ûJ“màIġbˆûerżĴ•÷˙ĈımX?œž}bƒ8⠜TßÓ&xm…*Ç3lHëšĝ ê é ‚ €'£ż8MĊdâƒĤÊk⑴y ;Ù8Um%›†E£ÁĴ|u“ÚÁ…Ü aàô:~½í ĵbûȕtڎ¤î'Âhı½:îĵPÔ1XıŬŻZ‰HĦğ9;†öŻ€ôh”6+Z²# Én?ɳuD‰A%ò£ñÈ]ĥ“ñH&ù8,QÔcà0ñ‰ğħĉ!ôĥ‡t@/1èÁ[I·ŝ&ùLÈMĥ͖ÌQ|‹_§;7L…"ƒŭÈĈĦÁ`ħ‰uFÌĝñĠš²³ŸyNFùĞoż˜Füì)y)ĵw‚ċgMÜ6AĉĤÌÌt#İEV—Í­eĜĝËRÇĜ|2k›\²;™˘hgIT}ĠÍòYbäÇĊešd²Ĉ’cïDeşŽo7ĥ·Š MĠn͑_k‡àçMt–}vĜö5‹L´”dÄhä˙JP ½ŝû7™&&£¤eâ°hÍöc7ž}-ïœàÁŭ1TûġÁ‘5vŞ Ġço½ñ­CTl|wl‰™Ĵıìâbĥüĉ*uTÒ9şhïx !ŠÖĥĴIĵ!֑"µâċ8Vv@ 1żÜITGŞ‘Ü^ÎΤÌELnO°` 8½9€”üİáÙx ħŭ}ĴÊ2ç˜ŭ;)Ö_‰ĈbgXM>HàF$ 0 jpĤ_Çß*Î)™ŬwĤĵMSż›ä[Á4†¸ŒFà÷H{°;è!ÈÏc[éŸ& zŠ$‚ñäÌc'ö]äĊ¸d÷P ‹8ÛòNŠ7f#i’Ñ,äµúòş[&BèdϋĤWü„ġŸxŠ<œµ·˜xĈ@KĦŞAÎÒQâ0é1#sv™äç™ċâéÜ`òâ ²‡Ÿ!ëÖçÎx@ÜĦğµ­$ċ™È@K]ċñVK>+°£˜è!9ŝ 2Ì)Ħ]ì›jXwuhÁµĝ·E¨ùى›F¨kgâ52'ĥ˙ğK‰ìü`ŻK.›ÍdĴ?&&Ž’àĈVi¨‚6^ úzÂÂ~ö!òtvÀIÚß z4ÏŜI!ÓŭùVM½… É Ĉ=ÍĜ—¨Ċ˙G…‹Œ&R‘ÇLkf Ï[˘ÖR â‚8ĜZÁŒOSnJŭ6Z5ŬVšƒÎòßg×RîçĦ7‘§nĝ—ı =ÌÏÑV˙„]˘-h˜Ŝ…ì dvĉĠ6Oѐ‰ċ´Cç˙…~•ĝ{ti!Bdƒ;ÓÓwŒÑ…ïy׌‡G“܏+Rëq™ï äñh€hıîDs‹oÂĤż‡›’Ċĉ,˜&2#ìÄQŻs˜çµ8áEEe­ƒ.ƒvNÔşıÏÇB™ŽÄUÍÜ׈Mŝĝğgl–Ġ>P˙ħgĤĈG‡úû‡GǧfĜ˙)1ŝìXOŭïÉħÖğ›İŠ“ş²…Ï›Ÿê'„8è/YˢT-ŻÚy C8炊ÛEdÓ'牑MżOcMñnb4§ë-ÄÀ#è½crż­“bĴĝȝ|ä€_ĉ½_ˆ‹~b2À…KQûxŸ(ĝ1]×ʉ•ÑHÁómİGf „2Ö½Ŭ5Ŭ°€v û¤?Œnbŝ5TσîÛyâ>eíìwyçÁ MĞî-›ŝR%·İOŒ`8¨@ÄSÚ˘ĥ„¤0mĴ™‘Ô £E£ŒĊÔKl3O$˙ Hz³ĜĊŭ5„ŸŜÔ-ÒÏ:‰Hl‰ÁĝS$iÚÒ~6 Éş=ùbÛ@öÌ…Ë×̨£F~°TĠĦżRÛ´ıÓVùÇl 9b’•c5–òoħ‹ èġ!çŭ6—ĵƒ1”ıƒŭ´˘ŸœmY…,ŻçĊ)(ĈsÛ6‘­Ŝ+Q›·ÂĤ†ZÌë‘9êúž?£Kr{žNq&İĴüÙUßû[ÀÌX'š˙Ëg6'‹ğ\(ċ›jĦbĥaïş„‚ŽôCŠ(UÇçdz Áë+LŒXj,CBĊµĉ­=r/Ğĵ{Š’À݆? ÷ıxd÷F?/×ıÖfĈúşÚšÒÖĠ76³žëêċ·q÷‘‹ úÓ7˘@ŝTwyÖ½#kçi‰£$‰…}ü-z‘2J7ĵQ0$ŭ~°ÛUƒ†ˆiğn‹MŻààĞżêkò…½k˜iÈÂÑiž¤ĵގħ…Ŭ|W%Ŝ>˖ùx/ñpogaĴ£*/)JItY ³kö^HŝZĠÏÂpÎ@Ezì6Wm1„ĤáşûÁoÑûÜ4Ĥ£Ê‹˘ż ³×ĵA˘¸4†Ï-;7N›S‹Me)˘–ç‰Ŝ[$•½xïˆoGy™ž$틃ĵ´ùˆŭ+~ÏAiı“37ÔÄ1/Ê)^ç´­GcIŞ ÷„ż¨ÇXuîäCĴŞyôX쳖ú'áYòŠáfG)‘盢 È1T '—JfħüÄ2XP¤™ŻQפé4ġž‹FŭżşñàĉeóŒ•Ċáˤ´ìWEÜÌŞ†o7P’ċk$ġ|N~żüEïîA-WĜ–¤…J/ş^#`E™“¤}S‰Ökdı ÏlVĤŸ¸ĝêÏĵ‰Gލaì€ Û‹¤;ÊIħBÜżâ5İúıŬ{èşĝkÑù?‰tcĈ1f×zħ‘uœĉ—žîXÁ}Mo˙rÜ~VÓ˙Ċ EŽßIÏïn³QʛYMKî"Ŭí=,”ò’+óğÏÔb(W,BÀ÷ĉĥÓİÈ]–£%ó!’6¨ħ†ĜŞĜ›†ĵ뒉äÛzàAşÈ""Po&†^›‹>Ġ 6*÷d wg­íaŭŸMYYO½BÏc뤊;ġ'²›ħ[ÈIâ$r…8ij H¤úϨ™ÖŒĝ@nzîŜŬçÇÔ'Mġ üĥ¤E‘ VÌâ'v_^9žcEÙO3}‡˙YŒ,ŝE|sAÜs‰2²ŝíéİ[†¤ĊŸŭtHnŻX3ÏŬÈÊÔ Ŝ›ÍÀÏ,^a˜.ôXÁ|g‰4Ş·-­Ÿ˜Ê?í*ƒ(xœŝ.fĵŠpQ"Q4EĤ– ñ݆×ĥğëK‘P†ŞċÒ3²ËÚ˙ËAĜ×Rġ;'óġ“„[×.?}êTô™ó—ŬJxò:3çwUK(˙ËÛ˲ ]jİÊ@IRúîÛŻ}l˜Â…£ċİ‘‹4($%—ˆW"!Ê÷Ó ˆŒëéü)˘?m›*½äN38 s­VÈc}†và|¨f”îU&ğ=Ÿa½r#é\è|ZL2ĵ55}[Ÿì_Fäş#.߈_+ùƒż3í‹ ZċàÏ5äïc?-ÄbE’·ñ uÍOp½zġuğÓïAÍĊg¤ĈT­`ĈŸš€³ Ó=B\ùoina›ÙŽzĊRë°óèĞ)kĞİg°ì­gï˘=ʉÍÑGhn-=fF/X„€ ÊEeÔk3ĈŜĝ.Ùĥo °·™½<›´°e>EŻVÉW5Şy°DR³hxƒí”§ü ´çü­F!ÇÇ4ßñrCŸìí˘2•–Ö7Á)²w÷P²c²Ġ‹´wŞc%²g2‚b›Gĵ1[ÀÈA1Çü–`ŠÁÂ$OqžX" Ŝó.h“àŽ'Ĝ"gۈş£şˆÉÙF˘/a!]lQB71l dY[Ŝ‚?ܒF6ܜÒÄvg^ €2Œücż÷ñİÚ´è5r$TRáĥ‹Ż~wL áLŬÏ´;§÷z:šh)J1¨˙£!‘Éx‘˙''DIT†”˘–‰£gàžÓwÒ~ÖġÏ ÁTÇïW·-ԗDIrk˘Ój§p~Ï÷X#ްàpF·ß”²ÙL³ ³ÂàÛ-§y6@t',£/Lè#Ϛ şGëˆĥ³ˆmÂ8÷b’ö…ŝûˆċÎiE‰0(‡Q‚[ĝuÄŽ€1À ‘gK‰˜ìYĴì˜ÚKòjÜ u{“O ii‰Š­ÛÙ?5 ËÇ}iñc  ½OŜÊNÛÀJÙh´HĵÉòPkĴ’_%¸‚>eÚZL>CîÏÚĜƒà[›ì.ÜÛxĉµĴ  B0š2‘­’Çxv΂#ⵅ”‹<' f"’:eaÇ|‚Äñ½UĞ|y3Ä>òsÎ*ÉŻS‹dó&=>ħ×R8(|-éÒŬá$‘Š_uƒ–RÎ rġĦ…ù\¤h|ĤŞò˙nN?Îìڀ.ĝż6˘í˜‰U”>1ÂĵĤÎĜьeş vÏı#pxfñ‡LË[CÂÂ-²T”1aI„6˘ù‡ÏÊŬ£‹(ù?ë#Ĉ²vÏ!3l~†fàĠċ$šáÚ¸˘ÑÂ\Yo%‹’•ì֟K+ÇDóϧwúÚëȂă,.Że6oɚ­aÇ.ܸŸü"íÇì/_ż~Éŝĝ!íEòŭŽ…m]³dž™–<œŝ(MVÇŜwçĊ§?›'@Y2Xžvn½•µZE´ÑFŠâÖÒHËŻB;pĝóA[yÎîĴ1˘ï™ż˘ğ'—Ċ˙ݏhG”ÇR<¨²[ …C·,Ó¸a~–'Ü#ÜçvˆK&ÖĵƒĦ~9rBZ1vf`Íè5ŝ{şĦ‹yœnŝ˘˘úL¤Ğ}­uŭ\ÁYÊRQ/èž*áÔÑí"ùZxV˙1u-û“”çdžì˘İŻ’Ğ8ÏÉûˆ3èžŻBU£Š7?y´³˜JE™Nĵ‹”ÂZñ#g;Ŝ1ÉÖHf ‚–Ž(‡â ‰Ħ–T0Ž ܍'R °²…ġżĊóToŭ ‚ È á&Fö”ĞrYŸµvm—ÖżA'ùœYj~ á䪗MoD£ĝßµ4 qEumœÚ‰ĝġŒnE–·1QAúD]ö §%PÔöŝì€ÂŜÛ˘>żˆ?+I†w™S·P÷ ŝU#Ô1iŠ›µœ.ôÇL[.N_ôxh6*ĉ~³Ÿú6‡,6?ş‰÷gì‡\ué…ĵaœ×öŝĜhÊÙlˆÍnžÁ…u÷z›+‰Ò@5s È+)ŸŠzÇY˙O824ß××?04ò˙¤ĴñŜ†âO)W"ƒ<ÌĠDi ’ı÷ŜĝuB|Ĥ9;vƒ´—{ßĈ‡ó.,_‚íŝŒ~œY=_Œ<'ìÛŜ|Ó]Œ:ïZ31ĝx]|y“÷=HŠ<‹;•äˆ]âg¸£F·§˜w I+˙ż|PÛ÷XĠjÔá³H(ĜÂı"ĞŝTĦÔCÌĥċÈÖÑ?dçT£+ZĴZßùQèĈé2cµ\NzO úÏĉÈ; ŝÓ²ìŞĠĥî+SvÊflúoħ¤˙ĥŞ{ònÁ°µ‚$ eÂĜ]pœQQB…’ï"ô›Ë->aCeÌ=ŭ‡…µ§nÔ#Ñ-v½jğ³Žy¨(}]+2o?9²ÜLÔ T·ó‹ˆO/n‡šÁgĥ”~OKĵzr˙Öµ‹œĴ̌ ôtuttġ ŒÍĴœùŬş˙äĠÄ´ï-ƒLQâ8Ñ^œág§.jš-?ò¤Xd<Ğ} }g IĊXV·Pöj—¤·1µcŭ9=—Aµ9QÌċŝ>dŒ*mʞċċïPAíâzñŠŭ*ä…/fgŸ9Ħ†×F¸oœëÎeñŬm‚—$˙jìe[4˕ïòĞ—"Ì(ŞŬ/pVÎÉĊnˆ[˙ċ&­èZL½‚ż”ħm€ˆëq…´‘ŭMÁħ§ŜHżĥÇF_ݵNC‹Ġ`‘ôJŜCio’ĝĠİéÁ½ ;ÀÚŒAŻGt–áaRM9¤„ íµÄ cÖ‰ö×ȤZ(ĉZ.œÎ VE´B²&'ŝސuߨà`Ŭi{­ĊQ%“ŸşĜP~<ôiPŠšshܗĉi„t=;żÍ„}˘Ĉžš‰Óò-‘î>Ëüñ·Ĥ{ptrz–ĊĉÀ‹Íšžìnİùû#óÙŬ ‘[–;1@ÔBT1óÜvŝYQÈAĤ›żÄ…:ĞQPè8Ċça‚îO'=”Pqë½iŬ§âT&TŭŸtáY!ZˆjpÖ´°ü¨1˘V,{²˘°½ë5FìM³žBÛ*ß7t)kŝE>Pê֑ 4Vâ‰ĝT ĠdŻój½}c;Ñ%M3¤%m[‘à‰&z<ñ\ĈĤv܏|…xH÷Ğ×Ğq—ŝÈ ƒB‡öš³Zâ넳z]£† ˜;?ğµ=xqèĞqcW^ 9·UvBŽú5ô‹³kˆVˆ%?B~à šW +WáħŞÚqáŬĦ÷ԄġßMDR2&ÁnVĤ°„•FŬ&ĵ‡†7Ñ0,‘²óQĈżÁ\%—½ =.|/o^Á܂ìe•YI&ïÔÔßO•Ġ_ôòŞ=‘µm xRıkÙHĥLÇŞüIĉÏù`‘=ÚËë„X>b2“ì PäJˆŻŝę}żJ‚(ioĵ^Љ]ÄÈğÍZˆœ÷­Z>żáHjħŭ)x†^…¸‘²Xw9[dà,}qzŭ\ tJhĜ.Ûqîñ§Òĥ‘YQ~ÇÑOKuIŝ×Ì´ç)&ÜżŸqÊó´ÌŻù%Ġ- ⊲ĊّĥÒOÏíXfĞ!²PıëOż(™Î²/ݳB ç¸úsœaO·  Ïݽċ-‡hm~7Bt%.'ÛF|(i]bĠûYΧĠây‚!p풘ÌG–ˆÓ[~ïQ0FwòŸ›“üаtKòĈŜ̵­E<ĞyWô?ŻÔ•ŸoĠĠŜ ’VeĴ½Èf…ıü{áqt;WĊĵĦßEĉ#g% Coâáè=á6jk‰B9'3(‘ÄnĜšžúx?¤ğDxœVR%vDèjΐê<ôɀ|ߏ§VG^ôÊïÁÜ-Ĝħ”Â:É0ÜO­ç3ùĵÀ[½³Ö?ô6Ĉ}ż¤Wñ’IˆèžD.@éĝÒÖħr”Ĵ›ûÜÄS²ßhƒ£D"‘$mQÂ9;wrêÛ6Av.“z2Vì†mĉŬQ“:9>qZN.z”§I Áĝ âĈÙÏĦU?Â˙ĵ’.³E$¤t"+lÉċ²+£s˘Jĝ삃Ĥ$I÷kĠ|^uĵ"ğtŜ1Sqoƒ ‘sÜġ¨t —$ô6’?ŸÑ˘KŻ~·OòÁ0ÜRüñÉĠ¨5‹ÍtUċ$TÈŭŝkŝOSDĤ2$ċTuÍŻ şúäcqË0Ĝĝ“íż_] ]d>C)#ïƒÉ%BlĴôÑ.G9„a²á^Ċ 1–wzĦ,˘ä_ÍW_s—$™,`óK˘ĉ ÊÁ_XÜÜ- d'‘Àu‹ }ċgŝH<4<Ÿ³Ç H>?š`šfs4ZNîôÄĝI)µ;ĵĉµ¨[1–Ĵ şRgrÄt›/şcNÒ#œ é$"QÂħaԏƒ§ˆğġ5[+ċ°Ö?ó,È(œCbˆ¤dŜ*é_}Ĉ6 ċŽ€Pİî-8OŝÜ£ĉ‡‡IÖRbÙîĜùŜÈ÷jjü8’|İĊ2ġŭˆ@Íħ$$³LäÁò-ùÔĜ*•Ö_b;aŭ³60ĝCb½Ĝ‚£ íĵ›ôۋĈ#ġ0–L÷(3*šŬ g@–²Ño°Żk[D9É+ħe\â—8PÎĥĴ@V·²ÎKj§`e^ȒżĝkĜ÷xÖ<ÄZb[ʧúĝÀvm˜Ÿ½ŒŞ°§ïşlŠè]Ħ?Á•*ħ,uˆhğµPŒbsòGPwÓ[Qö˙ÇĈû2͗A¤ìv&W2 vKĈY?poQT6œS6ÀBŠßĈEş™‚3A19uCŻĠw„GE_ˆ½ëöí[ñ×c/DG…ïĜ¸ÚËĊĈP]N ’@p˜şFĈ½-!B8eoÎopPĦ€ïÌïlF ›`V&ﴓBdĉÊìÙ˙â}•yï›uΟ“6ħ…·ÚˆĦÔeTׄ~`\è!Ĥ—ğò= ÔeÙüat~à÷RflF¨;2/ ‡³úŝw âU†hKžgµFV´Ì:”/1lKx')‹ÚĈփ9ĝ·‘r<˙›g‹´ Ê&|éÉĜaX’8t/ö€´„Q°$w„ë%~ Ó#vÀNħ_­*аÔüù`@?tg"Icš„Ÿ>3–Vû–ŸrXqŻpŝ|·„?ÔġĊ÷KT•Š…ëä˙J„‘°ŝĉßĦ xıŒï!§ò×ÓÓ¸´ç‚íà :îĉ™‰İD ~…â3éA‹Ċ²4 xOÊä/e”Mş7@-6àfñ$Ánxu]‚ÊÙn¸œĠ„é–œ‡GüµŝSvjÙxnˆĵüèŭÏ­ŭ˙_B^˙/!¤żġßÏ÷.Gn´ÑúOOŞċèäaN ÄӍY—7ĜÊĦu÷ƒŻĜÄdñͨlxÜÒ}°(­zÔAÀ6MD#ô+˜ök#F'jñž֨Ήf˘&\‰êûE^‹ĵüŜ°C\ñÔûecƒ•)ys÷ŽŞÌĊÙ΍¨[)ôNԞÙFRñĜİ{PÔÈÂbi>”+xŠ”]ĉÉ|à`iğà9-›F_ÏO%ïÁ/#WĦĝ}ĜÂv@$"ñ·Q~.VZ%ħ÷Ġ…‡üc;ȂaĊ÷*ß§ ‡¸fË=ĉ5ji ġ2Ûڙ†ZÔ ³‘ŞŞ’Ŝ˘à#ĉ–#_èĦ 7½)ÜLKg-“úVÄ/3+Żğ@p‹0]ĴĞ]Ä A"yyÚêÙ T÷\ÖġŻEÖ÷ì.5‘éñ+ÈO?<Ì,^n² É·€¨Ú"Ĥp¸ûħšĤ^ 6J5’sÒ¸  D‘äp£ƒèş F+÷;mĝĜ‡­Ú` ĵ‚ ê[ËUIÇȌ!żŭ‘…j,:ĝìß&ĞL;ä(JŜd \ƒŽŬÏüÛ12OŒ3Ñ×T–—ñìŝĠ3GöïĜĵŜßo…ï²e+üü×oŜħ˙șĞ÷Ÿeä•5ġMpà/sÇ;ŝfŜ?äj #J(ƒÎUŽ ħ‰ÏŠjÁ* ĵïà {2"%Ġċ·ŞAt́Ú[?ŒámwÜÁÌvğ‹è¸á@R )Œ'9“ÔÀòZH[ŭk?Ĵ ĥŠ(%™$s{*ƒ9|X\3ß€şŝ™h!dĴÊH˙zdm˙z¨[Üİ„ÙêÚyĵH$„U¤­[<@ı‚œ]‚WŒ•3_ÀûMj+ĥYx´ÛĦô/#–ĉ#AĵIşŞJ*„Ĝ&ÓPD=wĥf_Ĥ–”RŻ1ġü‰efÜCámÉ;$RdĥK‡cĤ`ùĜ£â[Ĥ¸Q!?Iγ´ùLۉŸú@ ĝ—)7Ċ?CsUJF\äsgVÑSà:(|#5ż£gĦĜ➘sËèZ4œ]n T‹céÛPfœaƒÓ% u^J+IX>èâ›âUDxĦÖ/ RßVMüŬ(&𧚨‹PĤ,z5Éò“`ĝĵšGè"ZğE–ë“Ö1—K˙಍÷VDHÌŻ)b˘èÊ ÈÈ4–œĝGólSĉ…@ky2T‚=ĥ_yWÚ+2x·—d%‚Ò{ħĦÚ˙ĝQ2•..)#'Ż ¨¨ /'#)N§BĝŸ;\Ín1hȳJÚÇEĉòŜÒwWĥ{@-˜,ox!TGì–'@DÑ^qh‚˜ú€ïxMŝ]r£XŸYÙwk!şĊ‚ÉW> ż,ÎÌĞEċˆ:˘zĴĜĈżDġ6q}_X£^D‘âô(×|•0IKê<Ğ-œTì3 h£Ü–6Îڊm9;];Úâ,vx ĥ°§cÔ(ĈRèĞfrċ]FJTĉŭQt›ú" ÓĝNÚç Ëħ òI<@Ħħt•ž`û2ġÀÂÛ3E C‰-eDc¤2Ċ+=óʓ*ğ)‡7óa4e~\1˜şB–4ç@Ŝ ˜—+"JËâ+9XOú~{IDÎ92­ìzOÂÜÔÁ>gù‘ä_=,¸ڋ^ÄìZf'Â=ÙCĠÎcġֈè+÷RÒ²ŭüURŝŻ ^˙ÊK~ŭü–•–rïJtÄÖĠv`„‚ ì–íŠyQÔÇ?ĞçWò‘ċsÀîö¤Œˆ-i‘Îrˆ¤ŭŝôŒSżL Q\肙ĵsH²+R‰ĥ¸ùé5fx9›dݞŻfĜé^ċÈF˘l‹„ÌFü§/ÉŜìĜye‰N‚c_ßlĴ‚Â5NÛ:ta0˘T“ñ3ñËüb[Ĉeá#ıT­­ŝ1·!áĵ\uƒ’é@ò5Pl`‘u޲èĴ×3ïŞÖpù)µŠ÷=Iì˘ÛËf2tAèĞ֙K‰zŒ§"ŻĠÖZ÷Èż˙’ï(…!ÔêŻ “:·w’lĝAĈĥu`ÇAâ çjÂgLÁïôÄ ô ×Wö÷€•^sĵW­ikĞ™VÙèb‰7@‰:,|+cß<$BX½‘Ÿó‡s]Ñ;€lëZÊĥámè˘*ŝ ĵ˗ >˙°ĈÔv^²jÏnˆŬKŜÀ5êê| Ĩ‘—ĵçL, Cz¸€"íŸ>%,?fŠÊù=í'_o@ä–BÌ-hIŜ!€Áú{ċLʒv¸ƒŞħ0,ñ7¨79½żŸžZ?_$4Cç5û.<ÊüU•`HqŒÏfNŒ ôtĥµ4755·´uö ŒL0Ù|‹ˆ†½ġż2]Ĝ·ĈÙP²RíùëO=ŭŬË­éïİ…€ wܙA'³üŜzÑ'Ĝ” ΐêĝrˆĈ†×ƒD˙S?9ÔôXıp*Ŭ_š²àáÒò‚”)Îû%d5`ÄäŻĤޝུCÜ>5€JKⵇR_`˙|%ċBOŬàW-B· ƒšrU×Ĝ6$`°wzógŽüˆm¨Ù^ĉ­00˘ŜH,-Ó2km5Ġ­…§²İYÏjà·Ĵ/÷ z‚ĜI˙>hìÀ|Ú^à}½GĜĥìkä ’‡„Îöü#R ŭZME>41.1ġˆUúÓñä‚ZñCàEXò(B Ù.z}?ĦĝI4…-˙·ÇtNg•şœ8Zĉ-6*ù3˘ËèĊoĵÚB>üÁËúr’Wdx³Ïú²÷°–ŒhVóJdE#÷şĵÒMnë’u^ĥµMÇ·ÑĠ/Í>4Ef.êÜÓĜSÉóQµ  µ@5ÂŝĤ߯•'Yœ,ƒ<bbÊʘ–<ğ÷k{é˘Ôßè÷p@s{ „{‰7 íùGïŞè™Q'{ë~}zvûÂáA+=8X™êikjÀKS[ÏÔÊaçÊ ‡/Ü~öéW]龜-9ŬSñéŝÑÀù"k ´‘Wĝƒp†Ì6½ö‚KäÛ6èE]šf@"À+Ëb€cv²À²“$ùµï§Â4P „ÖÖ@ç'OħÓÜI: QӇ³c—ĠéÛħt[tEžaMÚʽİ$Û¸YÙ̊fXŝÀîÉêƒÁRÊĴˆw!ŭËÀœ.oQ=îGı! ½fòUl:Z̵*àNrPŻêœcÚóWŜ›“‚žÄC?úô\Ĝ‘ ğê¸ŝ‰×§ġWzÌKŒĤȔĈVÜĠA!ӕŽĵJÜċîO¤ë\'Ӊ—°y<5:~Il†û˙ gİBY—‘y_İ’ëĝY‰|§ÁJ=£šA7İ·P™ pI9à!p ^àS .ÒíŠùħsÀċ ){ƒWğ ñİ>Ö?<2u^^6zLÔ iÄ>{ ĉ‰3SwM‘ıOggRP­MD =Ää\Ñsۙ ÀA˘ž§$Ù,2‰w$j RÎ'sLjéâ~`ÒÓXĠÁ%Ĥk^ó5–D)ŠÖkŽ%ŭlü‰ĥ?éwNlógüŸÚüâ²JzĈĉ6óœ],pqžç`cnĴ§Ħ$+À˙´£Ĉó|·¸“ŝ§m‚O§Z&[c­HA%}½™&¸YÑK!ñğQš}Ùä3•JeÒëħ èc·By‡ĠàoıĊÌl Ä}’\:•ŻäŬdş„@òU"¸ŻcĜ}"YÊş‚†è÷‡; g ²o|ÙİKT•ğĈmY‡ÎË%rœŸâÛdÁgĵn+Cċô/Í 1ğ;ÁĠœĉñÑi öàĴ=ġÔ=ù ² ÓG”^€?„ΧA:üġ˜ x3^6BPž´×E…ŒÊY­=“öo¨.C•™·ĴžĞ+ŠèrÚV W‡Žı÷<3·¸²ħ½gpd|rjšÉœžšìioĴ,ÎÍ|~/ĉpèê…VÚÀƒ(@wîê·2+‡€73ò/íÌZ+9”Ĵâ²7İ‚€Ĉ—aŽàJq9öu$Ħ§%@¨’ô‚Jˆ~Șó÷@²À[öàuŻ!ZŽk‚â‘3q× qJ œVal­?ŻàQâƒ8ċıóu-ÜkrމXħyÏxßzdMÜŝŭk0V…µT2q_lAGßbzq•JóĤnüİ’Ÿ€—Í\,÷{Ê £\Á›û½€­—.kTZ)>>żk…ƒ6èÉhJ–Ë#òÚg ÁPIR¸ğUt {VÇ"Ĉ‹,WGĊlögS?ŽÚÑİÖGA´öïĴĜٟ@û‚%Şşû·pô‘3I5˘†h<ĴJñÎôŸÓ¤Ĵ*$Ş7ÓĠ.ML_SÛ×ıĞÇâÄq ¨”•¸Ħ›z'#)Ž„qRĈ߰ےfEühÊâŽáĠ¤c‚ŻêúżgE<žîƒus´ŝŽ-–Êbûƒ;|-=ËGĤpÈĈ £LŜ——€\R¨û'½‰ˆ`ün’ &ĥʵ~'ĊϘ,ĈvË÷DÔÓŻLi ġYHV“Ĝ9ĥéR\Û2F!ž4qÁ2ĠS“˙ŝQlԎ@ïĥ&şêJrÒb : ^t†˜„´œ’ş‰íïÀQħŜç×ô€.çŽ5<żâac ŭEûîçŒzĤ>í¨'ĝBÔ½NÀıu6Î2 PĦ£™; Pé ÄÈĞ5r$ĞóġĝàCWŠì†ŜdŞ+Y5²ž¨Ù+ÇX÷oĜ#-Ö*‚„ÍËÄê‚(sž ֑ì>ıޤ-=LÑ~"Ò´eï`ùâW…?M¤Ż ~ò˘µu62o°S ÄÏUĥlî™ÊüĠ@íŬBy"Ĥ½á,—Îĥ1èú+ğJpı[˘Ŝ^ÄĜGl–İ+aĈ|tÇRw½Š!ĝRSö9ħ&Xúz¤‘~eZ+ˆ6eĊÊJîhdžë˘ì„ÑäìA M&#—…Rż{ŒĴ†òVr_PB… PwLcĜ[ •ŝĠ6ĴġĦ%@è;ÜhĈö"ŝtÚ21‰5ÙÜé§$c­xñ&qYÈ V’u§ĤiÒCš…/ÍÈA‚d]ħCcc‘t“ "۔q’Ġĵ Ù2ÜżÙ8 Ĵ•VıµÔcâĦĝĵĉa_ˆh>£•†ÚĠûÌa‰PO†R^pWJċYġü– ^F’ô:˜MŽĞo/vl;GÀ…—”ÑbXĤŜDÖôz#˘ ÄHA¸\Ï$½[>_5ü ye%bŒœÂ7‹˙05,”^É}FŜ5çÜ+LĤe}W°ïèp”ŭ |%§@G˙iĠ`·7)ŠSjǸ$ĝcKf×{Ħ{§Žˆ~6ĜA-ĵZbû{Ù÷ti›ëħlWÄö9w8F‹ìóUĜwQO.s?dHd³Ye(óâğ™ä-Eĥ<ŝ‡ŻŒ](‰ênzցqŞîúëİ†·€×Âi~wÌK›†HšĴ<ŝ˘lB€Ánì[n£!ò^IUç•ÛŽÄ&Ĥ}ûMżaHú¸˘/s!1PhġŸoi‰ħGĥ­t6Q…2YBfùŝĵŒ?TöâĝJI„Ĥíuì]3¨4· İd˙ğUĴÙ&]Tral%Ÿóç¸%YÊ; àĤqó(2YììIĈʏ\Qr£wħOĝĠ‡Ĵ3Ì}n‹¸fcġ›iş÷Ĝ½ûĊ´Z‚Dç#<%GĤz7˘^ġìhşíÁ%†])'ŠäŬ=¸ŠtŒ˙MC§€PVÁYǎ{…ïĴµÔdá^h€Í.ò3îJéÂAsӁŸâ›ñS"ĠÔsägÙÔcäĠ°Ö*/Ĉ°êӖ$ٕİŭD˙ó@U(ı?hrĞnPƒD0,­ġ=/JÍäìĥÄçvsNOQR”Ÿ !KëÎ[½˙ʳœŞnQ²ü‰öş²‚o™ož%=¸{+>..ŝÖŬIÏŜd~+(Ğk˜U  hÜ]•óìÊŝĠótÉ]ĊĈ/*ݨqwnü;9QÊyŝk?ĈoO ƒ$PmĊÍ*°ù4)TŸ‡L])K²<]½gxżšâċĴ—bĴÊĉíÍâŜ$ûİb÷Œ;tFYlG³­-êû˙dú× iJž™éF]K·ċ€­ġ^G.ÏÒQ|N<‘5ùĊ=ˆ›¨É΁żHÓ¨jd‘Ä[`ĥßep,…ÄA§ĉw9èÂçX|Ħí!Pżèğ£ürÜȍŽR µó4™½B-ûEJQŜ%´G Pž&)…â^s Ìm–ÙAĴSêȜÄV)·Kl#˘ÈéS 3.•óáĵ€ġżŽœ ÎC*PÉ´ĠÌ5³şĦ%ŒDâ:Ô(Z]ÄîÏċĉòOQ62Hž³G¨ż°%ċke ÀĜÓ1ŠÒGYwġ(ċÄŻUíËÜ4`¨ĊóÄ%×ç ĈŸ,¤ÊÏfqóĥĞ#…^ñq+Š˜óĊ2 áŸ"ċtìËÎyĵÍR‘ĥÙz÷א˙Ş™¤ğ ĝLÊÏ&QÀïĴĝŝâVôŜ k#M%‰˙é˙_‡¨¨,!£¤idíâ´7ú֋ï˘Żkú™r&xdGËżqL8ôëîViDÂrÛ6>ôċ˜“˘ĉwżAÀ-ğè,Fħ:^ÌF êÛó¸Ĵìġ²Ô…OĈyë%Ċò„ħ@ÂK_ÖĤĴúE”RôJ+ĈLC3@+QP·RRKPVJÀ~9PÌ6z’˜ )§ĝ…säžwÄ\ZĦÚvHd,Ş3S+`Ĥ%$0š€ç‰ÈuĜ/8>r âŸJ'GÛ$Š[”Wa')ıJëˆ2ÍıÎñÂC•†’Ñürè"E"\ÜA+ĥO8ҏÇġ½ħCŒÒİPbğdYƒÂjĤ²Ü4šĞ”³Ŝŝ:Ĵŭixğ³ żMŜÌŭ( {ı 0‡²~Şda(žÈŽİĉĊ¤3ÍKѐáĦÔ­Lĝ@EjHöŸ‰"dañ{jġ‚ßsDNbg=^¸†Ĥ|¤/Ù"%?ŝĜ…Ĵ ”µw~ ­şŜ‚żôW@tw|'z_nÒA%ĉŸ€2ÀÔŻ+0¸Aoul¸C‡~Ŭßᤠùşá’°ğßAÎ˃|vyßjç9*˙ġƒÒ*şföKŭ‚ĥîÚ0êĝÉS§N:¸×Ö żöfş*Ҍ˙şÁ*sœWïğü öÄĊßî†-1„ú‚ŞÓŽû°Í„yħĞġ`P…ïhGŒċž˜/êlzÙKŒÜĦ‹(ĝżĈ[ƒ’ÍïpîBÉ.ÇùßĤĥ”à­G”ik ñúrGzĝ/Ĵeż‰ĵ…ˆGñٞ´*ċr*„enhÈpş´yĉiqóÔ$p¸Ó“rËR1ü C‚ 9{È~än&ßmĝYx$O.¸óŜ°+U5k¸ħRÁÙğZĦĦLr;*USÊ8„yë?&ĵì>ÌĠŽPˆ,Bäò,–B@0q—”_#v\àb1{Íì‡$ċkŸŽ;çr›żLħş äïiĦXĴżL}Ŝ4Ŭ+x ƒKŝiÎC`V>ŠlgV;2c_q‡ԗı-üeO90Ù·…äTŞpzĝôy9ùó“g¤ġqS,Ċ_ħÖ9ñàR˘!B‘ş |”É.$ċŬĊĜh2hj–=Ċ.;RĊĈ7bÌĵƒ–TšġÁ/£€ç;.H9ì{ÙÄÁÇKBäA à˙­4\SÍ9‰Gôd@Û%2ÇiEÈÑkIïŬ549á˙ϐˆ˙ó\Ÿ33 耲ï“ Yá4G$Á=§À£‰9˘y3­ßâCœ@ ï’P2Žsš^îsBäŬϰpôËAkĠò`kŒ_(Fuĵ܀>[zĤäQĴx·2É%ŻË¨Š Di°¸\D+öu1j‘Âí;$­pv|òĵĵÜùéĦp:苜H[ú&Pì oËèÄĥĝğÎpĴfnGBgÊÍÒ!ž-Ô4úz^ öÂóeÂŭ‡…ÒŜÙżİZq˙6rÓ÷s_ŝLôöĴ…‹à¸XM>éîŝK-xċŝÇtIŽ|4ΠMJ=™èp°î–ÚFΜÖKŭéZ|6#XxŜêÔÎ˙u­a¨›Ô@Ÿ3¨Ğo)lÔl‘3<…‚Xq‹˙ÇdM 9ƒLŜáĊ‹‘EE"68ƒÊ(z·Ĥ§Aĝ#8ƒtIà Y°òžRlŝ|²`˙sİmß}Ë/TwkZÔ^΀3HŜ=Zä j~}Èm )e˙ şŬŬ@Ĝ*^!g?•ĥŝ_gPkĠŸÜĴ×)ŝżÎ )ݳr˙Tµŝ_gh{é§Ä3!^*@™6pß~ŭœAe)ŽW…ġLì#ıU³S\jfĦ¨Ò׿‚tœÎğ<î~d/PÙÌëĦ›Šœ+ Œ"£‡á8˘À'Üċ²%]zs§Ó@Aú_­ìbżXiäÜOcè%ÌR Á—˜qNI´½…ċ—9Ê1Y’ é8Êï úiŜ<[¨}îĵĝZ^—ĈŜMäw‰J}­ŠÇìÊn<Ŝ$ ‚ @ĵ€D€"Áw˘TO·˜ıòÀײVUSÁȞ٠[ñÛĝGmĊT"][ŝöӚ5 Î0¨?Ó;<:-­ Ž(YكìG&$o˜Ħ´WZ2´rtżŻüħ‡sQġr|èĦ+UjĠ› Ĵĉœ-tYîĥâS_÷›’GsáâŭvÜE‘qŠ|6‘˘[Á  §‚ƒûŜN!Ż˙|7r5@~@&İnîĉż;úĉ³OEpʏN â˙ ŝ› ĊcOöµV}zv3z·ż›ı:…Aĉ°:òîçúqÁêüqĵç€"´N4^ÛğH'x{—ĴœÈ=êÀ ›î˙:…·Ŝ…ž•íılâÍ*)Şë!ĵ<\ûpŒ˙Ġ.‚—ÊPIé½0%˛dòˆ=x\VsġĉŭäWßU6÷C @‰*˙üĝÜvo+U`ÎKé- ½šU?‰acO. š˘s¸(òÎ9³H‘^ îà·!àöĵĠŒM}ÚĤèì+=óFyòĵğxÙ>²Û3&ëġB’öù>Aĥ'ÉĉóŽ>yM™ètŝFüöDŭĈÓMİac¨V߉šr÷°|KħËà–¸c4>b1”%½Ŭ˘É\wiŜ#µĤj‰§>Qâ2ÜW˙ Ïnb[4W·³`á—S6–“Ï‘;,Ğ™{hF/ô×jŒ>Fsêgġ׋‘ÉkôúÏțn˜$ĥÀšs‘ HùĝrÍŝ,Ò%§VhäÄ^¨Y˜äKúóá 9 w@Ĵ4rżCŜÄù"‰‹ä Ĝ˜µĥROˆ‡’6ċìH’O'´²VuNî%ıŭ_—ÑzŠ9£Amœxe™3“c'¤ÎOL]QglĞÇż/AoOÎ>ħGÌïŒñ²–ÑäB˙`ƒw³ú“7ó1H5?Y.`ŭ7&I{Ĉ× ¸˙-‘Gä_(˜ Ĥ˙Ŝ „ñ Nû@݃ ~%†{B+€,gâµûêے.Pġ g~}HĵtpËʅv€ƒ•ù˙ĞÂ˙Ӆ`DÍħ[¸rËÁK‰~5 \R8ŬUòöên/924< pPísR€!%·ŝNGYríWPï)M2˙Á”Ÿ4G‚>Î~†ïĝî ö'TŽĥ,‹7vÇħ2;yÛ]òŻßĈPż25q^AúÄĜäċxN[ê\„?Ġ’ıÎ˙çFÚ;Ùı ޝ>¤HvıäC≔míDµ/ÀÛf/˙…³‰|?ŽàËü_‚Äì0šÈ÷—Ì0µ€J^ÑÀ—-Oŝ%RVżĉrü ƒ ^äX/€ŬoÏġôk“H>ú˘K:Šgí-˘î÷ ŠD„xE$fŝĠÊ@(ù|ĈEеBa)ûy/Ügá ˆġ?IēĥħÚ·ÙFá#˙²“]¨§– íÇŬƒŝèf#à-f<Ñ£[DüÓğJ qÜĉu¨7"ß YT@zĦ6Ż]‘²R{›ŸĞ¨Ş —ù½IRb]ž`ôá|’òÎ_‚É×+ ħßN =ó“G vžÄğŸAÊ%œa1Ŝj`¸fá@ÁíıÊP²7ö9ÛjîH}nÊĊŬ+ç*‰S’?}‹ıîËü7mß{àÈñS§Ïœ9}êĝ‘{·oò_ĉ>×BDƒ Ħˆ+Î[ıûbJnŭLŸœjÉM8àc Mċı!· „`Cşĉ`5ïĜ‚?£]¤€Yñ´Ÿüĵە÷{6D´ÇƒŒaĊëIÁŻÊ¤ùGyë$$7ŭ͓*uĠOĵiŻ”ld—ൠêUH,BÜò‰oŽèşfnœ‚Ò]UwËÀèÔ³a +Ì=¨˙`÷RhÏgĞé²w"ĝlš{]€ÔQ`4Ü'àьĊ÷’_°—*T´jıÌ<‡.@öŸ`Ÿ%++`ŒŻtƒŜwC×Ŭš%ŬġÍG÷ómÁêcHyġ0ObĦ)ó>š>¨½p$Ÿġ°‘8ÁJyĝŽN08t³6z Ŝèœ˙'‰è~šÄ˘áµ` :*ÌTšSÂڅl™Şw˘_ĉÉ$ïZ*NP<66¸4÷'™Îžŝ™Ë ²§Ç!ü•Œèe$²“°°#¨˜h>¤BñúÀ™~ĥĴ¸ŭ—pü™/ áÍ@‰b¸\­ÎµĦQ-ö&Ĥ /x*!6ğžÖƒHğüÑ.àD’•7_ÍnšĈ0fÛÏäSÊÁĝG†˘Á\ŸÍ/=Lû^\Û>0ĈdóDCŝ· œ`›96^[ü=íმ}ĉ(BÔH•3\°áTòÏ6&†M7e_Ŭì¨DRäG `×?Ŭe#(y^(œ"†?î· ÒlŽÌ ëş0€Ġ =l/1†×³qáŻíŠä…ÏĤ9ĵ(*‡š‰â †Âáᧅèağ#$!?-ĞpyĤäÈÄÏı¤íƒcaäyĊ‚8)`­'Èċ /ӝê§ĥ ğX%s”2…GÁôCͤfx‘DzvÀˆÏÇÛÂÍ0:Ô žWù•‚8$‘½@`{–jĤ£÷™Ĥ a&L}éÁ˜z0Ĝ†ż_™‡…¨ßĤü-&Ç1 ½ñŬ2-ıäË\gÓÉTä>ÛÉh”ĊÄvFù`Ôj°…pâż°ŝğ ´tĴ\_·dz-ùž*éĜ8ş=ÊË7T€òµĵŝüžŒÁG♚böljÒ7uB†WŻDçµç…8f` [éjGÙ`–œ˙Š3vÓħHXĉ2šLOÁÄÓĊ4ɕi“ŠĤ¨ĵ˙Ë!˘çÉj(¸$²kŜʈÔÜÈ÷€‰êŝtj)8vçmż[ 'ˆ×DúZ(RŠĴKàÁ¸×ùu}˘aofĴ·êOŜ§wϓ܉ż~ġʕĞ×ïÈÜżèËCcEÁ0Ÿw]3Úè(™Š_"Ż.ÑĠ/[ • ]°ŽC$xžêĈ[-•?hn6”ËĜN€ĤĜȉ}I4uĉ^&çĥÈìĈ½ ™qä⿔Ûj!Ĝ<¤?‰ôç)aTmħ\ê>úıC~3Xƒš i‡ñµòġċž"½‚; Riñì[jôŽäë˙œá;YĴeT9ĉ u ;4O¨SnˆŒĴy‚ 4ç DıĠ°OÑ­âÉj`Y†x„d=ĈŝŝÙëŞz9ŒÉ~ĊÄß@ŞúÙ+Zç\V¸QRlÍŜôË%4IĠò‹öi#ÚğsY‚г4şC4¸[“&n™Ù‡qŸíT´„y07lŞöŬÙµ ß‚aŸs%~Ѕُüê­É÷àbä–Ufşj Ò §ü× €‰L•¸´‚š™ƒÇŞ-‘ĵËŻéĦĊĜCµß΅1¤ 2[{öB}?n™CóÁn÷³F.֗i/˜°¤V‡G;ig+ĴÜŬ‘÷ñAÔ,I[òrš÷e˜äĈBĴ眎ˆ=pVĝ—(ö#?âôP½>ÛżŸĦsµ½‘e5`VKĈZÓOħk܀ULğ ÈYo Òĉù߯ÚwĴÒHĞxҗñv@Òï*Ğñ(jNĥhêHèAD†CÀŻċ'Cp Ĝ”wH;`ŭ—Oİ›Ö -K†mızĵÎV*…¸'îP=³ñPèĉa07ĝu0ÓĴsˆ4ħc“Ca4£çš5$›tĴn#U÷öÌxĴ&5 ˜¨Ŝ.%RA4DŞPż™e½÷eHĴΘċĉ…¨ &ÇË3_wè"$µ‡˜ĝnAVô[d×> 6\—{Ô;†3k_[fîpĠıνúÛ)ülßżìÄÓĦŽJ˘f(0FVó.]µv–ğ÷ìÙ½3tËĈµĞ–.œgeüà„Ħ %GßӉÙ˙úfĦ˜÷÷Ġı sUÁn¸ìĜëZ&ˆÂßEıœÌ$ĝQ-ˆÓß-šjŝe‚èI@ˆÎŽŻ3‚ò&¨JHw6cµ÷=köÍbŠJdQ"%µ½š( jĈŽÏÜÖnĴÒmHkj„ύhaC“ÇÄ ÒˆkÚafLŬîŜ ¨d÷Lµƒĝ="EÊĥn|5ĥÉb‹‡êLĠ‹Ĥ`j`0˘"[p˙'ó×Ò[däEHœhŜvO5 _­Òù<ßóĴÙWɅĠŒÓ\+|ĞÚĝMj b@xYò˘$Û2‘çƒ*‰uŞ} lk;“ˆ<žuœî’Ä.ú—Q£îżò^Ĵ·´Í‚DÒ.ì!y˙%c9àŞ-šúÀ“èĉ™#ċLìĀ~èŝÙj'j4·Â‰rxĤu%âß>{Z\˙ŝËġĞĤĥw’Ék˙yKP³G³q¨Gĝ† LXċçÀÈ ßw3ܜ͊$›˜FۃÖz΁Ÿ³B(·É"ê “6ÁÀ€•ħù–w~:·ÒP Óó:XÔş…9?_î˜/’’ úöK‚ÂÎÜJÍüQZ×Ö‚–öß a,ôµĠ•ŝÈL½u&,h‰½‚(ÑpĝŝÑü!h1?>ào`¸òܧN(<äDŽqF›’ù)Ğ#²P¤Îŝ<óíü€gcCRܜyç Cƒrĝ£0×äĈ0?5ˆ›˜}d†.É#ŝ­%'ó:ĥÓ R…µ~¨ë/ü•ĝéÙvdeëÌaŠS7šêT=ğġ€(–İlT2³a/%ô5YĴ}9%ù!ĥ‹”(ĜL{Ëò’˙Ûmd3ú…‹8 9޳‘Ä[[¨èS]GlT|ŽdĥIFñ,½ˆCDoöĴ8àbŜw@-òÀK”Âzİ=ÄZċŽ<èŻP_!#‚M°ĞÔ4iÍ˙.ħšû’şYBŬÄKó*Tµjéž/“zŬÜÁĜ@/hx/Ĥĉğ Ë+tktU‘Ĝ½ĉEħ-˙ˆ!’’[ŝâí§tP‡û#Â_;”Hq]DûyIo(Żö?]­ˆhn|ĠGLĉ+ŽjĝŬ,ƒváU?=*ŞäĵïIù8..MZa&KPèüuÇ~İî%ô³-__Ü9w`Û/g[=MYiIÑ q IiYM=[gŻ5ÛœğóâkiË_Ô_ŭċáħuóJ–5[•Z:,ÄÇ˟ìsVBİz~W Áœ^~ÓOŸ{4o’è{µQQ\ ö•$oIÊĵíDWœIiÇ/áÈ}TçT;ŝw‹¤dü `…<Úm‡xäeĞPëtĵb9ŭL êŜaèÙüÀŸ¨*'Ÿ ! ÌÖEH’Œù_¸•Yf˘ôdĝûĝY²sğZ­T‹&—Jdpèiœ5b™‹ŝö́x]bƒ&³ÀĠñ nĥ”3‚…&Ó÷ÏmÒû1W[ŜİÎ×Hn2Ş´âĜgHF§ÜvÂOs­ˆ*f3óHħ.úŭ9T8L~3ĤV×Ĵm?R ëşp_#.+V·hér”ûsŽ ßÊÚԍù“NiĞeâqbve(dWq˘éÖùX"h„eˆïĥŠ4ÌH;*|yzìœ˘dX‡h~–ŭsÎèU}tá;îPâ /tâ•`ÒµN˘5֖$voĵĉ ÚÁ|faŬċ€ĉ\óDù?.,…B€ŝêË09ĴûIá‹t`z¸úüMç_•ô̊xßġyÏDĴó°Ò‘ùIT€‚kè›X9Ìwu_ìıd‰çbw×ùV&ú€M§ˆÉëXyĴ‹¸ò,Ż^ÄŸí)yu~Ó|u˜³(< 0;öòj}(,½Ĵ żc ¤´ünƒ&[ÛQ.×ññ·r$ÛĜV˘ó̽8P‰w^0NÜ÷ŬBT˙ê(ç9ÀQÓa’ŠçĈĤ/+K…IzÒ*·yMŝˆG™Ô3‰X5=šSíN™XžİĤŭMp‚ä?Vg#ûVx f†}’sìjħP/[N]_P…{ÉŒĜk7×İıÍĵ!†&^Nż °bÏĜˆ¸o5ŭˆírȳ¸Aö*"^ւpù4§^ì$Ïёw–úğVr¨ĜV@;ˆo”hPòċßBâaqÁ aG•ÚüÑ9÷‰|y—ár-ÓĤîırÙÜP@Ó>—œß6à Ŝ°·ŠF…€ĥ]Ò> èقĝĥÎYċâŻô$N3{C)â%ˆkñ˵}#èĜ5‘áç%$Ŭ˃À(†UWÏ1ıJ´ĊÂEşéӌ ì88½´',ܜB·Žü<rKKUP†ù–‡ÓĞ1í¨— B ½‚~‹M'›ó’£7{˜*#†,Ħlèà¸ëXlÂóŒœßuÍ=ŭC#££#Cŭ=Íużs2ž'ÄÛèé`¨ BB’˜²İÇĉèäĵĉI!Á•ÙA˜JŒuĵŽĤ5²ˆéЇ[̨ÊÒK ùiM§˜‡#Ĵŭ7 M— f>m‚%ĥh½`‚÷°* ¤قÁËş¤%Ÿ…PŭÚß!xc‹z˙"ò\‘%Ċĝ Jh/ó´„Ŝ+<׊vtĤĠÙ2Ò€lo_ċB# ñÇRĵÚĉK>xs(7[nnw“İVù°‹|ŝ„ğ\Áè|µŞCËÁ˜ôÜâû*5THoÄÒ Ú‰=’µżİgEk|RĴ>MX{!b-éHv̍„~àNıΏ07Îčžü­ĵĦgIïÇÌ­'>P-ĝíŒğr%ˆğ˙İ; –ŞÙ÷ÖèĠġ͗ŭÄËRÄ]ğğ2îIRĥ•3;aÌU§şŸÙäIÚ3шĴhfŸŭd…Î"RìCmħˆ~Ö-MĈÎVá[dAĤ°óĵĜĤR˘.Lžî÷M0r×ĠŽŞ%şoĜ‘·|ƒùKazˆêF˜Ġùx•"ÀxµıĠ·áeXlO­‡Nżom´Dˎ½ü7`ûVX*$Ħfé|ôĉĞÜ#˘$ˆàlàÂöv´4ÔVWUVVU×6´tô=́§ĵ™‘ŽıŻn ö´T“NœŠĠŠ÷sZA>öïċħe DÒb­ß0¨Ş>uğ.˘ÛĠ\aË#@)zÜ S£6Ş"za0éêÛE’Ŭn˘6Jµğ;"ĝĉG—Ğ#J7‰Éêf.@Ŝ [w24oħú#Ä´ŠXħ΅˘çäğyĜ=ħ‡äÙÄ܏útÂĥ3•ĥRIÄ]ĈÂînWàħ›ûIv~_‘~MݽZ頃úżn`yW*ğÏĵRAʇ kóħ÷¤³àíŝF>Ïw3İq;ċvB/&G6d#é-bHJŻt× ßĴ:üùĜwÀġŝ 4ž3ßtü5`B}TšËd`Í-A Df]›‘IG½ži[³‰nċ™,^(ŒOb,êo›/‘LÜf,h D÷ÍVÍÇŠĴÄbĝeNäŭà YX*|Ĵ!SöÒ S)˙ö’;ÚÇíˆĜ²&î˜FiŠ“îE•ßQŒ %)6ĝ›“ ˘@Ŭ}Pb˙w~.˜/˘~L½oBçP`bד.6휏Xö]÷§”ƒfoìßĞè[€zє,ĵw]~‘ß(˘½ŭµ³ş0ëéí Göûûx¸8ÚX˜Î1622žcjaèâáĵçȅÛO³ Ğ;E\YàÊ4ĉż¸ĵËÛü%TeۀèW°µ°ñò”ŭ#ò9÷m6<Ée”9ĦozñéQ`Z™{ŝ4-öÚv{.‡ŭ-Xħ CXñyŞW:g Xfw&Xİvˆk~ßQ9İ= Q66 R i{a–†”ĈcaéBp†Lì';•ñcÄĴŠ1ôıU³ûÀіŒÛD²ÄüĥŝEŒ$Ê˒Y0TİkÒÜfŞWßabÔV§é8š+(¸3ždʚU|úzÜt>'ÍèW_7@ÇGäɰêfÜ˂%Ŭ›‚à.óħÊ#5ŒSĵıöÜJ~“ìVb›tMıdħŸžÛg0oZtDB•ÙGĥhÈ^³ĤË\·ĥÛF­dlħd:'<˙žƒÍöÒ/áˆZÚÛ TS~Á™Dâ½& ?Îҗ!€!8Ûú'Ӎßâż"ËÊq˜ıĤÏvFœ³°–=’²­xŜJŞêÑVüï6ĈŞOÜéž4éÀO,NĉDŭċC/È•ñ ˙œ[ …ŞŻS żœô€ŒÜpMl”€ßpÄÇĵü +&|­9üjó^\?ĵÙgž‰†œĝ˙Ž—UPVU×ÔÔPWUVŭßññâr&ó|6ŝ"Żv@D­˙špp……0Œ|ށJA9ħk Ħúàq†³+ïĴVGœƒúTó]oÔşAüż R Éá°>JÓ<_Ls?­bÈlû‹·UÌ[#d%÷´`Yg•k`r$^ Yĝ kL?<Ùż F, nHëgÁmÍ÷D˘Ìœ>w{{—ÂûRÚy“ñ΀ütÉĊc%j6Ŭµşĉ]5šöCE²>ħ‰mÓó úréû‰ÉòémÄVÙĤ|J ×~.ï£ĉúhDy6ßG˘%;_!ßkav´Ùb Xâ_Iñ, Ê½1tAàCĥ›VG1Ĝ nzd+íÌıÂQ'ċâ…ÓP‰Ü‡Dóżd¨ÁFî-v–ä7ÒêJż†gi¨ev˙@^+ĝ` q‘Óä‡xŭ&i‹›ˆ–QŒ™Q ÔnŝK[Äċ½ ÷”ue.Ö{Aûx‚÷5ê'o™NSAÍNU §²6Ğ!›ÓGD:uD3Zò'tf‘"*fħċaùTg2NŻœ7McAè фpVg'ÛèaĤ WşÈîadïħ*xßħ qRß|Èŝ–÷³ °°àgŜ·ìoRÄ]8ĥ/x•‡½Ñ²„Ş™ÇĈc ÙĠà ͟¸ş@ôF+Og@ÍiŞüá 1TqÑàTMĉ‹&ĜèˆFšŽ¤oÖ@Ô6gM +O™Ħ*̄ÁG ‰€Żĵ‰ÇsQ½ ½XîJŞÚİ^Á{Äö%żû€H§be˘'ĈŽŠk' ˙y!~Mœ‹ˆ4-ù0qAö!‘Ĥ‘…_£ğĥŽĝ‘Îboċl@†„G#û ï;ŭCÁe¤XÙi´PnÉÌÚV@ùÜ+Oq‡–xƒïq\ GŜùÑÁ†Oú °Ċ˙?Ĉŝ:.޵ŭßĈךĤğS$¤KP”²ħQ 0ħ0QğT@A @ [@)Aşğc``r­ßıÜ÷Ŝ÷ŝ|ŸÏïù>ó×~ı÷Vd†™ë:Ï÷û8ŒÀ]ŭIn=† ÏÚĊ"Ù³=ħġĀà%ħ0^+Ŭü™rœï­Òñ‰އëœ;A|ĴĵQ#}Ê[ìsèÛéċIŽ€pcĈĵŝZ3é4ŝa"¸n%rO’Öùo]VœtóoüRŞIĜ'sÊö‘} ­~•?j…—ĝ˘ĤÉÄIX 4ŞéÎP™yîDR?ւ•^Û10Û•cU‚‘ç0^× ÏeñĞcŬÄPÍuÏ:0Vé%%TÈdĜ•ÜŬ` We§;ı0âü|³ĊÓPŝBòŽK·Ÿĵúıĵħg”Ċù£„ġ{†ĊššbħfĜÄjˆĝ5k´§ħüsêí“Û—:‡GšĴĦ疘W?¸0 ʽê )Ë wAV=]óx£‰Şäsİ”…u<[§‰ŠıĊVóYıàĥ‘^ú|DPuÌQŜ À8G€Ÿ—c-ÇÔINÏgFˆéœş\)â”lŠú–àYÖ¨?A‹ħod`;Ċü–¤*uƒ˙͜~‚Sj_t¤“ğ"VDíâ0?MÚĴĥ1ā# ™ż|ú­èĉ;1ïİtF¤öŽĥ‰ıš߄CpÚ§oŝqÊçfCÜ%1[yÎfGе?C‘`½`‹\˙#ô]= \Œ€“Ñ­ì'ĜOûÚŞD|„<žrPoŭ!À½ƒĊ#`Ŝ´––Ì^I{ ó§ °ç' ğ÷Ġšòş–ùÓD"BsJqïI˜½êwežËݘZÄíö²™§M]û[1ħK·EH Ż˙‰Wlú&èŒÖCŒb:°_f!:‘•‚ág~âd³(˜ħçí3Ĥ͍‚wÛĥ¤`]8Ĵ‹Ğ™Ħ\Ìbm:"iîê—NĜµ~µs$T:äço:˙˘c VAü™ħžĈŸùÙİñ7.œŒŒÛşqŭşµk׭߸5,"òä…ñİÙù?{ĈfÈë¨xjÓ|Â:C–ÔŸżóÖçVĜu~ışÎ\Ħk/ށñLMÜ:8è'µÁçRÔ\!Šñ<ĜYD™‘Ċŭž *#aÒvàÖc„èEw  ‹oŞÀ–Žh§Ù!s3ż×Rµ¸Íë)†/°nˆ-ç²ĴòÌVïEĝ·=Á½P:" &?™kÑe5ĞísN⇃şŭÌdŸÒV²“Ħ³yŸEïp$~´Ş;L=&qJ­_iû~ÊŬö4r Hŭ;ôQżÜ½ a!Ĵaġ` \ĝAĦÊ?Ö0ágȍż­aëk˜ċHX.ŭÇĥtşHÑ ŽŬÖ0§žÂĥĴaĊú2˙ħ†mB–÷#ju_?XêĤ£„t_˙ħ†u3OIÈÇŝÛÖÊ{m˜Ö0˘".´Š!qP ِ ĊĊP ıâäe²˙£ùŠ!ßc|TĦ@C··Ĝ+R˘rŬßĊ¤óaKô P0hAċ5 ‰bÈŞġ[Â÷:|äÈáCûĥ_EC 5ä ı(à‚ġ–„OúğòîQ A(Šö[n @ôäóy˜>’T}bC1äcä˙(†È.KuèE(†,†bHî(†ÄA1dݐ8Q Ù'O[ú(†Hïïâ$™  ! é˜ox–Q ı@CJœHûĴ3żıû?ĊA¨ÜŸd(èB1d/íżĊĜAĝħ$|ŜD1d D=䪔]X ċCDŝCŠĦ2_kôO1$ìŸbHÁż•1Ï˙QĈÜ%”1·ŝeŒÁ˙PĈÜ$mù·2ĉ˙À•1‹ŝ‡2†I8ŝRĈ@Ŝá2ĉáßÊ€½seÌôP (c2˙(cĴŝOe̅ż•1û˙ż(cÊö˙ĤŒ ?sûYVŝ˙›2&?ëÙí3á˙›2†M(cŝQĈì˙[sá˙TĈXŭQĈd‚2&fh”1n Œ ú[ó?ʘ—+cêY‘Ô˙ĦŒYô_eÌŝ‹+cĥÀòż•1˙‡2ĉĦŒıû2ĉùż•1Şç‰P`2ÁŠġ0œşLúĜ(‚Ż‘n( ïÁVKTTƒ|.ıËY(]ÖŞ5Ìħ££Ż`§+:CtŜÈUƒš^‘g‚hĞ`–ò{.¤ZÜ)Çıù†b7°÷À:$ôGĞQ—bÁ#u‘cƒ{…UoÍtlRŽ9ĞDŝ…çùQTOÁ7g>Eq=^µKäp˜_°U5=S÷ĊlŬâdo:˙€)•nyè+XÒÌ` sĦ`gU'ì˜+€·Ŭ@sċa“ÍŸnïYD\àÉâÖ~ÛNÇe7ġOŝ1ÙTS^˜û1çmzÚë×iéos>ĉ–×4uü1ÙßTœwz›ŸµtCaœ°hÏíO͓8´ğĦ‚ÊÌŬ‘PÍÂG ÀW2ÌÂÒÁ÷ġ%jz šWq âûú3Ĥ¨ÂÖŝC’Â*ĵ~ż"e>ĵÈOİRüò_Át³#ħÊBÛ;fnİ ï;,˘ŝHPì‚î T[@Ù|Ŭ3Ì秸· @ֆû^yV+=,x&âÒ[c úcdžh˘SĜ+è€pwì­Ö2酜ğH (Ğ+$Vc{è ÒkÉĈ¤ËS†)6™ˆžŸPE>ŝ¤ÜV Á܍gNÑÊ~POrĉšÂĉĊ€–+ç"*=ô°ğ(Ż&•k4Ìûż/ŝ3@‚òċĴa Q:ĵ€wÒµS…ZÊÀk·r¸Ŝ`Ħ9š² ĝse…üĥĜEn…¤ĦĴÏŻòEì?áıˆÓ{ĵl)YûĉähĴ&yÑ'AËŞOoàš)˘sŞ˙İ…èĞ·ĠĉüoÁxÖfMTrp"Ù?cçË"’NG? `ÓĠñ›ĉˆ!"F+/|ì€CCÖĊġöŞ ĜâÌÛt&)·~pˆO“ŭ%ï“oŸŬż%×}î# EyYxÈÊ+Şhè͙ëî¸e˙ÙÛÉïKû'+53XŸ›tfÓ<Ĝ*2Tí×_Îéĝxa‘"6gS|ġ46ᨓ$";?2Ħµ7H˘š›³ĈżÏÛPEĵÁy[uXъü·œÒALŻ r|¨ ZŸ‘5cG'oj“—–áï×\ü“=â[ĊWƒz0äçŬ*¸Ċ´ßà ÊÏñYÍÀ„:ÔŻ/e­BèáÀâ †K5`k-—ˆ Ï@‹˙ŜoÑP£ì8ñ îlá”7C³‰X÷EŽĞÖÀ Ĝñ›Îċœ¤ŝ(£š1vÇB‡oR~~D8³⋵'£iùäĞL=ODG÷ |•:ˆ$A Dy­ĵ7çr^°VèŬˆ­JM“ŽQ{•šiGġ,ŭşvsùŻÌ”›ĝyHŞë(½çîEÇ~Û]Ċ^+)żĈHÍz‹=’Wĝ½1ŭÀx×z’ġgü)`Q9÷4è[Zı F$ŸĵdUċt?ç…#Ş{ħŸ—G—ÜP(è½f*m/ä=÷£Ĝ\¨Ç†_)"ÊĞA<œĤNƒMÉ­|nCÒÖ9˘ˆ„ċ–‡ ĉt~bK$74œB˘SKşXĴÏO½vhŭ˘ıúʒ B@†Œ‚ІĥĦ‘‘Ħĥ†Š„„hŝ%CRYî˘ġ‡ĉ×BC˜ĠU’â¤AäLlC~î-Aù-–ˆèœ­I \~kò&0™†eƒĜzµ2˘ôrĞż`Có{>Ĉ/܄Z\ën¤ûċú/ê˘Ž/8ŭ§U¨+J’Q·u ]ıĤoÏÖ¤ġ]èĈ XP”„½%ġ­Ĵôğ*dó{,ŬË}ŻŠÎ•¤óĝMÊRĉWyóö:ŭYĠĤjUíF:M5*ĥ#ï„Ö Î#·8ŜòµċD§CÉW°ŝßO=ĉUr~-zR{1p6‰ê{H*™ô„–Në5r\ߙ{†œŬ­ĉ  ñ$ĤµnïgPGžBnó– żuP(ë6SĞì2Wú1ä*–Á^_ÊuJÀèO}…lĝùĞĥı'ËÎz'¸,bœÇż$ °`Hı_áVz"‹ë`(=Èpa•kĴŜ}âҀË}`ˆz4‡K3 aTĤLvMžd½òĤ‹-çZÖ*)²Ċız8.•AµCĦyÑ·D‘šÖÀŜ·+ZÜzGÒoÎüġt݇:´@tûÒ2 {ĵŽ/bwĜjIò‰,,Ğiâ¸(hہÓWî'|ûáë·ÂâRx~ûúáíËÄûWNĜ´ÈÑDS–ѵlvĈĝÑ;Ċɖ/Àž]ŬcïÓ_Lœġ;i0˘€ŭĥ́1ó¤Ċ%qàµ}ލÊ,…S`ŭ9 ²´Û§³—‹Ñ½_ħ&“]ÉÊ0è, dH‡7 Ŝ{˘†”,-Ż—uME8³°Ĵ[ŒxVrŻMŠ,]ÑKüŒ#{iKXYbžĊçOZɽħí=äçL{ġĥï"Áĝ>ê‡.5/0N}l‘]MtӐ'ŭŠk Ŭ°Ö Ä\-y;ğŸ"o›Ċžş÷ŭê ĝG)ï;Ġ<À={ĈMıñ§ôBv<éĵn€ž w-ċŸˆ£ž„iä 1Ç&+‰P[^9Vm!ú¨Ĥ³ 9‡).µàğp­ԉ1Ô]ĠÄÏÍ4.Góñ S0q È+*ħ'Äê9·+RŽTŒ×CiÙ~oìlDµÇt͈RŝĜKXúÁi9œ#s™XWêi­ĵ]1…|żşÒ@|ĞbŜ=žŬ™˙p/LïİUĈÀmŬ‘ğoJZ† >‡9Z][ßûWÏEŠĜşeÓĈ›ĥ„îŒ8uîê}ĜU·0‰ĝÌpKɛğGÖıüùmŒ}÷>Ìïdƒ?ŝ]Ì*0FĴĵú}ŸŞ¸½Q†[Rğ0fn$œÍAÄùqÈ" /ÇĝšˆîħZ8è!³c{ù_–Ğ/bÈEvqŸ[!N9Xċ ²A"0“HĤx#şĵqĉœ¸h­Sí ΕZÊaNálàï>µ¨[ 5üVM]ŽÀgÜ@góQÖrŸ€Òó<ûHñì…Ò?•ŬfîƒûÙC­ó=ċ(z˙ äŜ¸§ Lĵù-ò´[z'ÏÒ DF/ÑŞß!ɝR;Î&ÓÑPÌïÂ`l”I:›ŻÒŝ•ßIË2ĠïɇTĝC˜4†r$4C˘`p‹8ŭ^ÖĵħËI$;MZ2üÛž˙bĤ%3{Ió[F7˘>­;Éeĵ+’*àDŜ5(•”ŽžˆUĦ6˙Ó|èJ2£kCÜLšdH`àĥ-I6b•Ykd½ZÎŬc@sżô‹Ëİĵ´*ın'żŽàÌÒĞô¨´íĥ8ñú !ş,°ŭÎÛ|ŝy!!°úë ßĝ߷!ÀÍÊ@CAò/@ÈżËÁ"DRAÀÊ-`óqo ëûYBXĝüüĉy„‰XT†…ŭ<ˆĊm³•FúĞn”2ñ‘Ż'Ŭ ¨ĵàR%‡ûë’ğÉ`Oî4·ĥ²`b‘%ÙŜ„HÒ|2ıC׌1Î(4áĉQ]%vb8JZòèÔ%EÇï‚Ç*’Wxe䝭>èĈіù¤½3%Ĥbà`ù{x é4– âÔĠh.û~:| ĈQ ‰üóÄDB.<żGßt(‹ĥßN˙Ú2Ÿw†” ]l—Hu%zÚÄY°CŞ3yWMğ4 ”° jA8pÎʊ¨gĜVQ”}G)Ù½ZĴ8ä<ıxa§ÙˆÙI 'ĵĥú{ä`NŞ| „éV - ßBÌ˘z†< (ağPżÎ dU/äZ6LaeáiúôˆáĦPÂjW‘g'óBèŞħ—˙PÂj·K݁IÉAe’S"ĴÏ -JXŝ6Dgo¸w@éĉ/JĜ> „é†>Ô'‚ ŭ`)YFöîŜŝ˃Öß°q†ġkƒ–û{ğÛ[ÍR’"şÁWÚÀ> ô̏Ġ(aíOŽ,3T¨˜˙ñ×0odĠ%L2Ŝĝ¤žoy˘ ”°}Q ´ĉE{u•mù@ [$úöD˘‰8üX#,ħ½ö%ìòĝxĴ*=¤Ÿ<›ĵŞ(aŒˆĦáş~žeÄ82”°u½Ğ N?è‡%ì(Clꁋß/ŞJĜ|áTN0dBCIw@é™Äž÷€™ngĦĝrŝy$Žċ Ġ›M9*Qŝ‹fĊ>C-*£* ¤6~(aö‘B‹J€eäö)T­‡(”ĝÉĠ˙E RlyĞĥY_ÔÙÜáğÖ¨jD%ÖuÍêÁY ßĜ£È­JÄ;/•aȝ:§ĉ6a2\GŒd~ÜZg&ħ`Ż} ƒ´µ7B=´Ħ.˘f½$ü*0Ÿ‰# D‚:Şżg§ÜğxbrıWPŞ@²„TxSŒ%YÂ˙éŜrËSŒ¤żóŬŜ•²^›L7Ŭ²N]˜‰ÍYğp¤aEqğ=µDŠŒħÏî™U}° d7ú÷ìîĠ ĉİˉ1(ÀW4… ”XxiÔż€1$ CLNŬhî‚ĠğÏ\¸™ÌM÷UeŜĜíc,CAD´ön§>IÌóV >Ô_‚lÚË˖$…bAïeD?Ĥ‡˙)€&nœ Iğ61ĞF[W+x9 ¨¤R6uNW~È/vBCú†ĥÜkb_ɂótÛhXf ġd_$žñR•ĵİ@òċ?ëĜ$à6gû²Ò+¸H`ĝu°ŻTY£²;´şnìħ?tžh0ŝyÙ£ˆ/WGÊ­ ^SY/@²@!,tŠ·_£ĜŸˆĵèğäF‰Úïô½„k¤Xt=4 ÓGÍuğ‰-;&ĦŠEŬu,=çΝü$!ÏĦJ}¨oBörs”fċrö˘KúÛçƒê °QD5(|˘u)âS7s^Bù>aŒq…Ġ—ɇC€ŭ[Aŭfa™íüL°*Ÿéá Kn,Âş/™˘j?ħpï Í{ƒ·ßñ%â†ÛrˆŒW \żûs" dkŬÒ1ŒŬš½ÂDò`s–M,$œ!S]e7ñ0U•€”àċ5gÛ¸-ZĵmÏÁc§Îžżpñâ…ógO;¸g[òEn6³5aÑpš„Şİǚƒ73Ê@:ÂÄ£Ëĉ@*LÂdEtv++½L j#T8U%ƒÈù‚0ÔĦĈdQÏ;íxσyBà<À~F¨ĦĤ—şħ˘’ô€üž3àÈÎ䷐Ŝ\/xkGB` RWÂs_YâüL²´u"œ(EÁ0|óÛû— {9ı³”r¸{AŝEIżrÈS(‰żŽŝçpì:È ‚ߘĠ,ûÖk>š½Ŝġ˘Ċ„ùg/ŭ{­ÄF°Âöĵ@û×à‹ôX§„@ž…hĴĈ\,yğ%;R‘—2 Ÿ‚<˜˜m;}I73} ¤™ġBà!3êŭ&î;“DÙ ·ÍŬĜ5R(˙ ÜĊ²-m6P ڋlžÎ×\ĝa°Üvzó_fI?-²¤™uN ëżĈ?Û@ bÁ @dßѤ…s 2'af0Zĵ ›ß­…ÚčA„ğo§§3WJ’m.5½ñŜbdCßgx•\ĊPĠ -vĠíċšdŞöòĞ…€Šm|9Rá˘ú>‡K`néŻHżĉkĦú‡ùިoç³~_ôŬç9• ŭ#LĀĝXħ|>„ƒX̑ŝŽ†Ê‚œçw£÷­÷ħÓ'RddQU ß°ËéŭÛġ•$òÑ…\ĝüÈW -ĵş\›JÖ\~ğŠ-hIX­Šı^¨äÍ|?dLóŽïĊ›/ِ%WfNOżê1„ÇâlP­è~ö²zôàLÂÄNÀkİšàÎÙÁ€P0l>Żġé&:ƒùµÓGh–EÜ3tË2xxw‚‘ù0ÀÂ5ó§7(•„M[‹…,ĜğiĝĦ¤kPĵ!ĜLIšñ˙Ök–?Ħġ@wz9jb6žˆ\Ÿĥ=ñIé‡bÀ™Î—Hj‡änž ĥê–C7 TèA˜ùν(1…¨,ŬŒo.lVò„MȨĊġt–Èş€|ÑÔ[oĉ'iûŜ*ÍÒoú}ü,iùxı!ĵCËĴ€F{gÊ,À71eœĉìbËàÀ²ġW,U{`w—EÄĉú0‚e†Qĥ§Ê8ìï IRD&Ĵ"RрwħžkĉàkÄ+BéKZÊÑKñâeÍk“K°›—Š}µ&oÜXÎi„‚Ï1,Ê`Şâ "Hĵ–5,_N:‹ß§{”jêTġÚKbz‹ĵZ$únÜEĥ¤ÓÀ¨ç›Ħ{żĊñTj.Ŝ‚o-ŻàÇ @Ŝċ SßÁ‡(ôĉ’Ô.vxĈRz@·b~Ş}/Ñ ĥÌ%§ró ΝCnò–Š~°¨k‚Şşq[Ò{ì$³fփ:îŬ³ŻÁZ,żLs!ö–ğYċ–ŒóüÜÙÂ1ĵ6”ñŽ@Ô£ށ`ĈëÚ‚~Í*²Án[˜ˆÌ‘^öSÈI?<€>bóˆ9ê£ÓĴ÷†5Ivŭ'Èׇ(+–PħDYÒh‘ù“XûÓ`ÁmHhàş³ĵx%×D0;óK“"›BAœ`ĥhDžçC°äO ĥüüò:ŝʉˆÍŝ^Îĥ&†ú:ÚZZÚ:ú†&ĥÎ^ŝ›#N\‰ŭċgË °™ĦĈ‚çv,2ƒvEÖtqdRé üԉáJ-ŝXv·€Ó°’:ÁOÛħÉüH Í2ŠPÛ)V1ú ŸÖ˒Ĵoôb%0ÚöHf>²AôROÙ½GdDÂÚ¸O ÈĞjĝ QÚÒVE(–{ á›ĵáÙıüó Ër Ż–Ŭ…vO³nèóYóYtŭL–¤]Ô3êڌĠ!Ş­ĠP§a1Ut)ï&rèŽ?š•=ı—€ġúŸèË>U?l+qÑ{0¤„{ÎkOBpCö!ħÖW ‘ ĊĵtÇï˘i=05ÚK˙Ö$1ßcĉŭ!Ĝ€fĵ-Eŝƒì·aW‰Ö2글ÈZÒŞ §7"6µ£AHè$ÔÎñżÂ:ÛÚáİF?Äżaĉñ÷Ğ @aüĊqûŠ.Bg?f÷‚FsM9^^D|S9h”ÈÎÀ‹Ï%Êĵ„AÙ>Dy#4ž´¤ÒmO–Ì~ßô•sì‰\@]^ÙE·ŭİ0Ħ™nÎ:żÚRŽë@Çà3Ʌm@ 䍷—ç<:z­Ş´Ħ…!°€ aèJJIIBGP˜ñ8D‘V5°]°:üü£œòv"ò™m…Ég‚aH‘³\}>ĞÜtuİûŬ!‡´ä2cFsO€;VŜ÷ĉoŜLÉI[:ĠòäOè²oTFtö• FĉÁR+hñÎd%Pa”oYĠ5 Kíe?ž.*Äżş!Î_`ŒÔ?!çfüżĈİ´9ß`•fĝĝ 6•“ĦHh­Èü \šĴ$ e{$ö€şŒ•#á:üMÖBÁÌ7ŒĠó<ƒ…0>ö˜½€q€\Ó78Ñù)÷¤ĦwÇu½°P)½j;Ä6ôÁ‘dôkċú¨j îĞ5~“ôĦXÑË:r@ìÚù‰ĥßÌĝÒ£g>œMĵq4FzÀ Ĥ<ᅐï`{!~‹²ŒùM.ËI§°çRĈċÀŠ ˙mO;Ë5½¤+ĈəjO‡:İGü*Ï (kêĝÏÍyÀLĤޞd?³Efƒ3ĉ;Y‹Ç´ƒS xÛ%k²äÒÔĜħÛÒèsÏüärÊÏı€£gÙ½ß~kJ¨ı"e·;ÚZ€8° ÜtUǍßTqqîpŭ—„³Û|m´d á‹ÒÄà hëá´y×Á¨èĜk·îŜàŝŬ[×b££îÚäïa §@1â¸($£eğíl—úaĝm†Şß\Üè¨J‡òù‚ €U—ĵÛN 1Miċs~ß[#—sċîÏ3sé4[È.Œ¤.•$[_jN ÚÇÁ†İHv Ĉ˜Ùˆí3öàYU*P²ó fÏùuk(Ï•~´Su‡à€zVϜda튨a.ï Íŝ÷xbˍžc§àÓĥTGġsċD÷bwÈ!ĵ'”`ŜR éŭzö°ı^ÏĈf|7íS§Şȃs:–-şñéĉ¸–/¨:zRöMF7ómĦÚ˙Íj‚m—öh<ò"„Üó¤WCú֓ h´`ıxQğ–Ċ{ᕐ;ŬŒ˜HÀRí…MÀ:ö ‡îzSıN(²›ˆĞäħw£Ğ†›\)QÜo³áù˙b$M6ġ…ÇLSò†öé+Š˘&! ³µ™˙Ò™ûjŠ´%ıXë’KòÔTŞh?s&R}Eh.wş6qR­OŭäÎüˆ²"ëm~Ù §ï‹‹”P†QŭŠI|òWâ.g°ğKŬñĠŻ>Ĉê(|vfƒ‡ĦEi’êfî+ĥż–ĝ6Żĵ£tpJ_0"h´ż£<ïmâµÛW¸›İKÒP”!gèħá̳ÂĈùġ œ”`ŽUrŜ•ĝ ŝÀŠûÁF TiÑE¸tżÜĴG²ú1ŭy ¨Ĉ7Ú;.4ßÔ ÎgzNM%ğTŽ´bıKhŠP x9ħyÉoŜ ħ¨É˘ŠWĤÛ7M3‰àlĜP˙&â-lî]ÑÙ߸Q×ĤáUènvşÚ„rräLëğ$Ŝ-úü žƒġÌyldĥW żĥj/_.ˆF&­ġ‡^‘Îs!ĉû‰Ġö‚]PSú¸ÚĦŽ6ü͊ˆâèejĠGôI/âèNÜD³Û`m¸A˘ĤXh;\"JêċqŻ/Ú_²¨Sϸ§PjŜä[áĊÓŻ„ü§r$œÊ5u*ç UµĠÌb]…·üc$ŸŜŽäHv‘İ%xŝEb¸%v䰑Ŝ’}öTKhßH$ü}Y={D˘Ç@šA ,Ç —A]˘“˙Ŝ—*~Íß`Y³ı3 (Ŝİ‚Â6x0ÉWœlz¤ցG-iTӈ£ĝ@V„•”3Ï|…xv}Ê'yYÑ1ônh§ÛóXlœ_p>,ĜxìpßzĈaˆ `¤·­Şô{î§÷ٙoßffż˙”û½´Ş­w° d÷UîÎħ À6ĵaEóĊĉµOƒ8°àn¨£"™$ï´'0ƒ_Ï@9UÄ*"kŭaJY…e`ÉS²¸oÒ ìƒ •Ċ‚;6àÁû NT9Şï{~'T`–âċ4ŒE+ˆìéaÁODäÈ>!­§X=)¤w$ŒlWWÀ%aÓ"v$yAGŻé˙­‚n1s5QĈš7Xİ£Y>à,‘3ċ/ôjzħÛÉyR…=ĈzE’ŝ@ŠÂ]$__ùíBĊ5`ÍߖŜœ]´ĝŜ'èÇ*êċQE8Š™1XˆŞ ßGsš"ä§Úù!–•1Ü.ƒuĉ?‡À[p½Ì0'ĈUê&mµşÚp”Êž †3é]şW_ƒ•Xħi…Càži8ĈsD.ŠĴ)ЏŸĵ;Š’g§:7SġḒl˜@eá0Éħy63|™ĴçîıP8^P ĴYïÖ‡ÀünÉq8Z&IÁÚdšÑĈDâ˜uԓ8şE$UÂ!p $12À&7pôÙqñyAÓ0‘ ›h†C`Üċ¨M~žNĥĉ&†z:ZÑ341·uòô Üu€â?›ĤˆDĜpSÁó‹;|ŝeL" <ÀXeR„qô<šE7ÑÈÚÁIÄ!0ñĠ/áŝÑ(ރÀ^‡C`¨Ċŭ9‹H…ë_žyfƒX$ÁÑG–8&’WÂ!PŸşısêĴ¤âŜOdUÇxĊşˆwAÄ(—‡Àé=pl%L bV }^°|;K žÉ–‚C ĥnm›‰zU‡1q4ȅÍ­œËp›7î'ú˘Û$šrû*ĝBƒ™#˘Ä!0@oú¨HSòĵKf3ĥÈ ż À‡ ˘è?ƒ&OXéö|&RF—BùzjėgéÜħybo§Wŝ “`óúŝùÔ+ĝ#QËêñuÈffä™À% ÈŠz`²e ²¨ 7 C² ¨rŬ§xħ?ÉàáôÀIEÚ*ên .Áİ’ìǸ%¨nñƒp™ÒG‚3™ü޳ĥÀ`:€€úğŠè<  ^féġ:4TÎ)üYġ$În{)˜È„1TíVG%ĉ·ü9ÇuU~HŒŬìc?[ƒ€ŭG,dq qàDŭGL „4fÛûïMüPÙġçْŸµÚ2† ôOV? w’Ci:+>xèÓQ@–*Ü­<À1 Xٞ­â33ƒ}¸ÀĈğQ%?rÇâíIއñ’`QTĵŠĤxr`úĦÉż˙脸LنjßT€…Yí"dIËäŞU\LA²Ĉ†ıY7^m)úżBß_o³àP8ŬĦĴš~+6o,WÚyä‹Äüİ×@s½Lä÷„żöèZM$£gĝ>Š 9DÀ_İ˙…Ĝf™çHF“ÈÑi½ò9Ġ´ĤÖb|…êpšÑ!ğşaM_İ‘?…ĉĦ-ĝnÇdAd˙ ÷‹jé`;zĤ‚ûñ .ĠRûAĊHŝyŬïÓ;8¸SŽp~ÌaDsáù?4Ġè‹,ibU"(ġD<‹ħ43ÒÒJì âúÖâp$îĉĤ9£:¸™ İ2ĦċX{´!Ä)ĞħžÛŽàLèǛŻıQ̏ÎjΗFä}ŝœĈş3" &fħ%’ RçôbEÁ:èÔó²Ŝ }ĥ¸ÂÉPñ/BHJYwŽƒ×â5›w8vú|ì•kׯ_ğ{ŝôħğ6ŻYìċ0GWY H‘`ŒP4tZ~ùEa+Fgz˞Ÿ ²V  b'ÒARÍĴŒßb!‰°ˆŒnlúçUyDzŝĠŜLásŠËµfĵ?̑Ž·{°jˆĥF·cċĦ2ԅ™Ü‹:¨s·.>„xïŠĜĵÁ*—’ÌÒ°bĝĈ” ž¨‹žb5-A|§Á+€͘óƒs„âGdçôw]ù7üHà~SÓ*Tp<¨ğâ—Ñí‚ğàñşŽî~ç ŝ*˜Ûƒ=~7E¨ YÁOIŭÚͰ5²hܰê |ħ3†VO˙ĴU˜ûî^ñĥWHJ7äĊ–*÷@î°àÛÈ#Ĥ…Ñpù ‘ñqÀDżğPrát u˙6i‡à@Âé“_ä͚:ìÄSù‘ÖwmĊ,ŝ1òÂîn ›ŝ´Ħä”Xó_żYÑĈ<,¤ûϵC—ĠòâfÑ·µ³ïkSа÷î¨É£éá ÀMxËë‹ÑEĴîrr– û§³fr‚¤aĤŜˆ÷?ñ• íƒLXġ% TmU|ŸSso•…˘ı$ĥ`c7§[¨Ċ—÷ÇE=Ä·êíġ= ŝ#Œòş6Ŝk÷œġ43Żĵ­wx|ê5êσpFM÷ĥĠ•çe>½uzÏZo]yÂÒÁ€=×ßVËċž˘ÇûĵAĈZx,½™ Ä.ѤP´VŬĞá›âWİĦ—Ş!ĥψ$áû¤o„†tPÎ +Ŭ_Xxigôž˘ÓÇ{ $Œ ӏLP÷÷XU Eû>ğ}}VŻvj—‹ż:Ìl[,Ĵ‡W€u ç$Íĉ'pw}şğ’ñ³µż³BƒüTqğŽ&3ù/“ôDÀ„Ŭì Ŭĉo ĤL/”,ìÖ7ĝÈĜˆ$§ Y0!· TäEŻòRÈ}v§ ŻÚÄ÷rç¸cĞf!â]Oĵ2ÊċqÍĊxbß3$iP[°Ÿŝ½Nzĥ—úĦ[Ë~*‚‹eΈµzoUЁi×ÑŻRÎCEJf­İɅ8À%üĠ{ ŜJ ö•t‡†aàLÔ:SÏü´£˜‚^H`çĝ>Úì·xÎrHûtĴĵĝёħÓ²b ‰7Dç†ĤŒ¨xèoâxŞ‚sˆŝéĵŝ„˘ş£€;•ĴˆhnûÀÔ^ñG5ƒSş°É³ó` gŝލÇïyĈO ö´Ĉ+έ'PUi1Ŭt Ê£¤Ö܀ígâŜĠ÷ŝµ ĉ²Ĉ‡zÚW–~/(ĝ^X\Vùğħ­ghüÏ öÁ½ġEoÎl˜Ğ%I°&uŬ6Ĉ¤UĜ‰ú·gWÖYËïÌû>ŻíU¸- #ç-œÄşR‚5Qq+µĉ‡mšˆbpö·`‡*bp˘o9­œë—ĝŒû*. M¨ÏóQxP†ˆÉž9*.;ŬBž“ƒżMÛ7|}ïúİğŸ3‡)ε@Ünp ĥ]ÌŞ~À›zż€\@4µš) 9K}u•.°RŭŬ n=’c›(4aÊ^Ğûu/ĥJşî;}żÀ[c0 yÖ§„/ÖżL)ËCŸt‰#—F•6 Ĵyá„Tŝñ êb¸,ÖçQ@9¸óäÂW‹•4*ıOÇ*0”’Êò’*è·Tİè4SŭÙk+û•éG$8“ 4)óş~oêEìݤq€ÍW µÎ'…OU;“#&"A]£{ˆĴc(1úÇ!ŭt™Ġ½KXéÂÄÈĦMµĜ/’öċ‘™göéáҒl6”{ ŸòË飲РlŬ|{ÀÂ_ġá£Ĝˆ@3÷"8&Jn6FÄL‚b?u°qVóûk[Ŭ´DQ°GÛ.Û{=íG  p>k¨í׷̧wbŽîÙĵfݏ—Ğ£µ••µ£Ğ—ÏÒ5›÷ıó4óÛŻĥ! Ù#-?Òï]f îhTTËmëµ÷Í,œŬñ)6ÈD 6Z}dEĦĦ,bsà(Ŝ÷ páóo7C€}•,ިœ?úlĦ0È™ìì%àÁúÊïŜ“ŭ³™‘ËÚ$ŻXí&!…3#”„wu³.CNnĵ„*iDzvÏhW²°q2‚ì\=Nšß:´ üeĈ’Oħ‹Tïŝ:sİ4˜<˘û1żÊÚöŝT5ëĴPħì/òbRBa!?íÔX"ĥRá:Ħ|„šW#žĊރ‘Œñpžµƒ`£Òè%„&;pÈ…Ç2u½ħÍR-9¤Ë, óİ[0AœmÉ|‚\àùÊTĠ*ş³’€;p„€QR’8P†I>Èß§ùŽ–jĞĉMoD"¸TĠ?òŽÀê˘o)²eĵѓ6Ŝâ‹vî˘X|ÁR´…Žìa@ĝĦ~ Eï1·m‡°\ÔÀtœ1Q˜î9İFž—Î5çO/¤ú€böèÛÎ4Ħy÷: /ğ ˘ŝU?1x7ĤA)÷ ÄòÛ_ïuFzËb>uÎ£êùÑP; bÚ˘S ÛX8³ûח”ëG·.ó°ÒW•!öÁÄ9 „ŝyÄ?ÓA†˜ŒŞ•Dz­Gݧ|ùĠM¸F&Ú S˘7¸j‹6lvÀÑçU %éú³LH;ìL-·á ԔiĈÄz˘˙ĠzuD†°ÜwŜ›'DsŬîhCD`7D3dέaNú<²(rĦ o7=%'ĵ£ûX²ĤB2Œ=£…´S°/”]£]ˆoËxɳq| ²´ÖkGxĠU?p#ÓyŞÚ£´û Ŝs ¸Ò9IzHI,wĊÚ*_Ŝä Ór6ÌôoM™[°.“rZ¤6cŜşÌX"~o@6Œ‹”“oÊm8ZrŠÔ~"]›4râFQseVağéy­Ê^œkDÇ@ĥ²AĠn,“ħúç{° aM_ ‡ÓȄ&aQ¤UĈY¸Â°Ż…ıU`‡şi2a+:áù·ÊƒĴ‹h³{3ĊèVĉƒZżĊ*–“uï—úê2ĵl°dè/ĵj‡ Ġ;mz2Ċ“&2{z:gµ ÉäÄ/Áĝ›`eD)z!ƒÛôÉt³]½ëç͕ÚT’²×Ñ·Ànûù$| SuXŝġO‚ì0Ñ^œvàZÏ9R„2„"$İ8ËÈÊÉÓwùê-ÛwîŜ³g÷Îí[BV/÷ġt²2š(ù‡$ĈÒ˜ıöà´b‚6Ïî˙ùúüz24ĉ…?ù ”ĥ·G½”ITí•7²°ŜŒ]ft²ŝĥŒAh†)!ÊÁoĈżN˜dVçLOgݧyĤLN§ySevTáżB%…×Á_v5¸ÜÓ%/ŻÀŜZ£>eĜ #Êĉnf”(äĤòĴàıa#›PżŽÁ ˜­ÖÚ3@ÜĈ¸b|) XN] ĤpX Œ†°?— ×[ÍȳSm¨”%Ú<×8^Ê­yôŬĜ*™Ĉ\j×ÉhòéS­ÈA£`ĞÜàmr9âbM˜#ï‘ò*è§fÌ@*E/-ڃù+ĥ¤DbˤĞФƒ=ú6×O²¨ËÀ°ĞDÎy죸×Ä ·áB%³––9rŸĤאÎĵÔݽӍ~O’4*š‰@}:‘ ~H5.ißCħÊÇĞŠŸeµ­%ıy.ˆÇ7ü‹;b“.hĜ",s¨“—î‚ê^â¤Í#+îŭ×Ej ĤÚñş“F¨â†Ӝïz¨”_|'>üv ŬĴ}ÁGDıÉ.|Ëê)œY•¸Ç„â†ŝ‘‰ĊċĠäÜ9°ÒAj!(U\Ùnњ]QWâ^ää•üjhíêèíjmĝU’—ó"îJÔ5‹ì •Ċİ(TCtV¸“SCevoqb¤żĦ8(I\÷$V1ñİêÇ[.ġO‡Ĵa<ıíí0Ŝï'…êE|çLĜ ˆĴÛ/˜"‘uĝjäyiœĦËş¨K:ŻóŒ–Aş âŝ˙ĉ¸äNċµmĴ³âޏħ|+ʞñ6ˆSöö!>hÄL‘‘d~…îÖÙî`Ŝ3¤5ӟäĉ´´˜)ğI|˜˙8ĉ,WÒehU$éǽ îèĊRUĠÒ˰HÊÇvElPy)´pf3§èy¤{„7ÒÚAóï*lÄFâ˜Ĥĉp~ĵʙ§ÜT"Nl/ Ö0²ĈäKğÍ`ġdŞVŜg'ŭqjġàŠBf>)é–À„ê÷ƒšJ˙,Ġİnb êß9”8ĉ>ŞEöHE"f2Ħv_ñl+tqàµşà^èGÖıÉm˜kwà‚.bŭpŒŭĈÔĦŻâ ĉµ‡}xĊı`ğÚ ˜xf3×cùLĴ7}—9ô2ĉŸúÔûà´HO5èlÌ :ŸÙ§À‘š·Wĥ/˜-˜0aĊÙná12 kğFŝœó0LŽ ö÷ötw÷ôöŽŒO‚Eş!pBéŞ-Ì|è6›h†ċf/Ĝ~ċm —™ ™çƒĉ@ûDÍ32 ĥÁŭŸNÁ PĜ|Wz/ĈÌ?jƒ°÷‚†Ğà›{ħï{Î;_ÀÜ|q¨ÏöĜCkD÷Â7ͤa̛:dżBüÛÔâµ r1j•µƒTèLŒ„Ê#,ςş œ¸ĦNtËDġ,?KEí÷L\Kt•>̈́šëuÙÔGiğr5SXšu—Ê;g1ÖĈö÷â%MÊóÀĝóˆinʌCêıñ1ÊPÏNÏöÀ6* Ŝ%˙ÈG‘5Œ„Cea4˜Ŭ(ĥGàfÀĵë@EەçscÁ@ìĴÖ\*éËıFÂà?˜šÄ ˘&6‘‚Bl?YħŜ\ê5˙4^i&ŝż/fR²o×ö-CÖŻÙ¸eû}GNÇŜ~òò]Aes11°GÛ˳D…Ì3‚Š1EÊ{÷ŬŻ0 f·ĵ´Ò§ Ŭeħù`-½żZ‡BĠߐ܁M³aPL|ƒ­ġQ’D@ʈàçmfÔàmÑЁ°J!‰­UxÙZ!™ŭíĵWĥDŞ:ˆ˘ûˆÛ¸–<\:.ÈâzöE¨P`_Ìh‡Yµ¤°‰ò™bħûĝcq³Êñ ô˙µ”y}‹H2P’6ñ¨Aœ$j0˙L"Ñû_ÉÒf5g~Çrç+·ġWà£ËÀ[L7ÁħĈlŭˆÂ[ƒt$ ù%°T[‰/Vy“ ı@Xü.bìmXU•är@?aı(ĠU+:3ÓékùwApù”b7UĜk¸tÖĴ’ħÊeü.½Ë“zNĤ¨ñŽžn]>sˆb˙“}Jhö,QUâ"1%‚gş´mÓ×T„ĥ·s‘ċ?WÓŽtò³ĉ“UŽĥbEë%èof&_,dûĤŽóŠöÌBT` £ÀıtêœCy“X[ÂjMlÇË>·)e§­$"¤żìü(3ë2ÎY)Pş˘Ċâ½·2+zġĊ›ì­-Ȉ‹9°y™—İŽŞĵ”( ˙àô÷çAâ…DäUuLíĵ–m>—QPÛ ŭ`ÁTOEĉ­½‹-éUÁ*èlFêÁÎ/ÓB$mwĤ4qù/w˜1HšĞÚ°ÉĵCs¨ôı0„m° 2kOo<ĠW˜ħĊäÌ_şÄú"Ĵġ¨ y~żóˆ}ġO<Éè1§}ğÊµéÎm4Ŭg‚`İ/J¨&bf b˙´§š)·ĤŸçżÓPLœ£zvµğ0îâ—)c%³f•{ §‚<˜Àµ\YÀ]ŝZz:ÓYħşNÉÄĦ§ˉg0ßÍ(ú ˀ@9˜=Q_ŒŻTƒe`I>š0Ġ×`ĉ0 ‚[`bżÒ |­lÛ;"C¨7üŠ…ó•š‹DWc§ì…R?: :Êì†ó‡żÉÙt˙ÒÖŞpƒWâ>d;睂^ñäz$lĉğT"ŝPB˙+„YÌ xħ˘şoñ ñv­âUÎĴ)Ŝό–•ˆœĵ˘Fƒpü‡y¨Á&ó1bϜzêHRÙ_‹7ׁZĠoĵûĥMd>Ĝ¸;îƒSׇqĴ=!Pħ=ĝ~Ÿ^³•¤4/2­ xñż_ġ7GÉÒ&~ûî}jy,oĵ­8ÎħÍ 'ÊÁ°ó§ @]VĥsíìĉÚZÁZ@G$)ô²’Ê ß|ìNFqL’y£ Ÿîíó3‘&£âŝG_üZ|SZä<%U HĠ“ĝàûƒĥ"¨z`B;6ŝa—>Lïu€í|Íév7ŝêm:ÇñÚŭ*$ǧSÌxKÄĝ.“yǝ÷*4µ+“ƒ‘²ÑÌŝp!ÍÇür/ÄŻ–#‘‚żĠċ˜AĴêĞÄC)Öf]Ŝä÷a]ßci0 ž(żżRĦ²nÇ ƒqŜq腨ŻIîÁ‡^Ğ"Jk3Ĉ•Ç·á ‡ ávŽĊÙ˘š§:ŸAt¤‹—ĉˆ˜ƒ69\L rÑr0>$˘ö_1ñݟ9+ ˜¸êRw°w:wħ×ÊÊX,Ŭħqp1)JRÊĴvÀ‹ŝ;Žlç% -˙*c?Caîà7XĠĤ2à"Ë_)VĜĤî Ÿ›3Ž:ƒŻË3ÖfSWI9͒ĦĜžŞ¤”Š?UTx‚çEħĵĝû*˘ÇĈ÷ +_›î •;59|JN4ĵgúš²Áħc˘*÷ ZüâFV”AŝDAñé_ßĊûâF…`_> Ĉ˜GĝŠ˙ÈO=•|ĉRê]@Ċ†e@ÌĉwbĦ%NzÌö–.kVsHĜ9Ż|w€’iúB9͙k6}ŽRXN;ËÒ_„/Ӛ:!‚hLžjÈ@^ĥ7ÚĦ1äÜ:ñ-ĝJÙĉŻ´Ĝ İê 8œf˜§\e£şġ'Qż™Újn"5ŭRxÁX’q}—½Ĝ ĝĜ2óU]í3$÷ÎÔ3üÏzÚÍԒĵ)ȝC;HŒ·=Ê!ġ,vb˘oC>§u3]íÚÔ ybĝٛĴyn€“ĉĊ°fĵpĉ˙ gDÙ£żYÁGŝŸĴàŭÀ ĥ=ŭ7+x3°‚ß­‘&Û+¸p°‚_˙›l“Âíú+XÒqĵŭž7¸™·eŽà½/7éQh³·>osŸm·g”ÉšëßúĦµSúdï,e¸`Çċôòn(y&°,çñ…}Á>Ž&³)ÀŸÉ/ŝi4:Ċ ?]q”P (Ì2qô ŜwáqN€ĦXŜ]ž~yÇC0GŠê.Ĝû¤ÚGŭ߯1k”Ġög\~Ûó­³i½M/{ñ‘Ìm`·ö׎÷=Y\›KĊ_˜ƒ*ï,ş.À_êR?ïU1²MIÒğ59z^™ĦŸbBZöKLş­‡yBT Zôv¤Íŭ!¨gŬÔ>²S ¨ƒuö-†OÖ|m…7€c bŝR/`.HÈ)4Œ˙˜ĥrĉ•âé—ô ˆî…{àCĥ·Le˘ç ~íu˞;›yMï”[‹/Ö½EúVJı6˘´‰T@8\a“Àٜ)\ó RĈܓ ‘]œy(—σßäw‘ÌÏ&uÛÑlĦ@BŬÒ°Pĝ£˜ı Ĉu=. DVqĴÊT" żĈ˜[3ħ Y9€lî\†,k‡Q ĊWì™ĤÈħ xŝ R?ŭPË Ĵj%Eûkè´mU 3E$ĥVâĠ;eĴ==•:â´ÀŠ~żN5>^ÉŸÈ QEV>ëYàvC ÍdGĴâ+ïêÒHЇÓ[ĜĝxeÒ^BëCWħ[ŭ˘‚ÉŽÒŒ[‡ƒç[hÊ{~2C ö‹–oÚĥ=lçΰíÛ6.Yäaoè ‚"D’Ñ´˜|ĝVFiǤâċ/˘×Ùİ™PݽI•8ğ%ŭ°‡"‰Ĥx§B i;Lhí0ìyĥRQ ɜàW7FċÖ½Z4„›ç§NM]vg5^ıUB˜¨%ĞhJ§‡Xw´)+а KÔï§ Ċ^ÇD4Ÿa_-`Ĝ߸Îá H@×ÀJdÓDÍ\Ĉ50CÍO qm߂úµlF=k§1Œ²ñWş ëÛN5xŽ•-Bĉĉà%~$DNç^i7`Ÿ’UOtñ?‚ays)äAÀ²pĴï¸ §ÀċŻÇù•'ĉPDÜŻĠ &żîs¤ÑvżMÇúސhz7 Ğ;Ӕ~ÌĴQ M÷°Û"HÀlùŻùĤÊ m”†İK@HĝÉĞñİY_‹*~7ĥvt÷öÁ£·ğ£µñwEÑ×ĴÔĝĞ'C\L5eYTÙt>`Ĉµ0$èv˜;…t|ŽsôǍ@=IĊ7ĥx†’ۍÀı˙뤠ŝšğeΉJŝĝëċpĵ܁×÷$BJ7ƒû#żë„*yá'Ĵ”é{;9‰Ĉ$ż” €~ċ´ËŻÄ×Kƒ$nH9(‚-ì%‚8bkĠĈcİċù¤›cš~ĝéÖÒikSĉPÍX ż&ÁÖ çuë™ ~„Tĝ]°Fž0Í1BYîä?ĦûŒİ딍úĥâĦ°}sż/éï£Ĥ|*~OBġßcqò q°á"oëX¸ö3A™7âôÏmv6VDUğ0:y×uzĊ¸ ƒÎ}œ~á ßħïüĦ‡ÎTñeiLŜÀmÈB  ġ4˘”ĜŽMĤ ‡ƒ™ŬnËëDPSÁqûŭÂ>àD6½ż‹†Ñd \׺ùú{=˙\ôxÛïÒĵwiÏâï^ż}ĉôé3Ñ1—ߍ–ö.Żôw[˙8ŠŞ`ŭ÷×7­q5Ü8hĵv\ߤÈÂûÛˆê×-\AwĉAĜƒ½¨p kO RE¤}î·B P &?xÌ´eâTç‡Cüïzö|1Í|2Ġı0Àyċ„Ŝ½ F ŞÂ²A˘ž‹qBĵËÏ´Ŝż ›Ó8ù8ì½>ë?éIÜSċ5?òŽ‘|û›í…ŒĊ´LG½ḣŝ„żZşQĠ;‰œwä]ȅ4ÓëÎ^‡!ż6²ùÓĤİġô%RNĞôÜOsì&)żœ;ĥsD”’ï Êo›0;Š^riĥġÖUôużĤ÷<ú|ÔÔd$ l;¨év*µuj–ߤÜÇ?ˆCQf.$Rg•CIġ (lÖ³iŞ~℣£µvô‹‚,uı§ĝ3E¸ÉĤÍżÌ+sA×v2 k<ü\„8}%ı.Ÿ’ĊdŬ;S1êżŻXÓ^9š˙^˙ċوŝÙ6h"zÇŞħŝx/ašŬĊzlìÍ DzÑízŝLi è\£>áĴÊ!Ĥ˘àq şô‹ ³à7ç×ڂâ‰"c4ëıgùPóĈıÌŜş˘ì¤§"6XèbcŞŻĤĴ +#-%%-#Ğ ĴĤoj²pĊĈˆS7’²‹êz™à e7ĉ?;·u‘0Vĥ]{ŝ L‚ı]_.FÔ4äA% úċ*ò˜Ò~ŭíE҈Ĉ†7cXŭE;š°W|?V}Lф8@ÛY}döċ~ŜšÜŜ&ìĞE=fxêŽ.yq ŝɅÀ&uBŭ<Ñ>Êì„ Sï²ĝĴ4˜˘(>ŸÊİg .ÒíjGÑpÎ'UÍoĴġ izµìòYÌž+ ]ñîRß,ĠêjUìĈÓİ;À‘6bb:ú=Ïġì^eÍħBLa=Š MòƒwÈ_|‘û€XÛ äu›ävÀNÜFÓğ•°zA£ìb(£ż53xÏĜ À›pÄȜp“ŝ6h'÷}ĜYòŭÔÚC`íĉ½”°nîv§ßÂï Ï­Y·À +H…1½ÂÏ*ĞŜĈÓÑâÊ÷yżühB9"ıx7jœÀî9"ÇX÷"â›âµPĦqIš`g-{%p{Ż˘²ùPĜAÉewş<í×É#êTŜ>^ğX“BRr?ĝ˘ž…³š2ÏZÍGDÓ9älJQS€sÇÚJ³â˘wyYëƒ2„÷Ġ0!1 I)ii)I 1Ħ˙T¨ Ñ·ö Ú—UÚ6ĈĊ̎˘”³!Κ"@²<—ÙHŭ‹ƒîJ$ŠĉâĜÂq|,ïP˘ä]Ğ}éi;›×~êfDk/ Î“ĵè˘K³ĜI.PxŞĊn8ÀÏu ı#=ìcÔğÏġ@Ħ+ç€ĝŭâŬWž†‘™O= Œğyü+˘_ nU÷ÀU#us…ïá·èîŬÍÖ/yğÒġĥd꽤ów9ğÁoÒn™pLż ˜ÀŒ÷†f£ŻÙ°Xĥħ€(u§£·'ş]²í5ò˘MìÛhŽ|ASÚD#Ù&Ŝp š/ĥSdĦĝZñÊ*ɕHfÚi´”J{Ï$’wbgáĉrĈ€ÇñXÒ&î+1§ž:SÙ,îN$t&WCċï(Ùğ ÔĦ;&kœ)X5n¤m#ŬkPJî5YıkH…ÏŭˆvDĉ}sç!–İĵÖŬââaĜg_Šò‘6,?PD4è+˜ölHŠĦ…DŞJ„ĉx½ ï}ì+‰êî1WÛ*¨¸Ó xúGžòR@éz+.z\ ĥ’…żÁ˘ˆû_š'àò߸“›}Ĵµd˙„}…”aáğ"xËν‡ŽFó½¸ûÈwĊ9­ÍfòŸYĞĦ'äÜÑáJż†'JÎŝ1N€mV!kzV"KÛÇvQĴż =Ŭş†d™ƒç:#óÀ–ĉ†Ĝf|ÙHŽ;£zW†ÙŻ= DY‹×TGÍ.vàµ' QMŸĤ9Ẩ´? x†ßlÑ% ÙD~…UÀ1WDÔ|ëÀ1+vğ€ÏCÜ(àSÀĴν…½ĥ4”ĵ¨â*Fö>ëvŸ¸úèċğüÒêĈĥîÁaȃ@"dx°Żğ­ħş4˙ŬËGWOì^çco¤BBhÒÚö+ößΆP0àž0‚@ˆ²ËîÈ ³ŞŸl5Ed\Á2`ôs¤IwË„<ö—Fu 8ӟ6)Ħ†'jñŽ‹f¨úAĝ+AéĠ5{ĝŠäÂy]‘²!"qŜ<Ä9Ïħ$­iŽ†8€à›5e×XûRdeÏÀd[ Ÿù1[2żFwíèpŠƒZÈjÖgy³ĉÖ9ŠßGÜĊß3ç‹}s‘-î2Òïüğû;ÀŠ^*UY#½Tp˜ 8?,B¨ĴŒqˆçh:sšö3Ÿ!h:#Hĉoz Ss$EÇn‘>4ˆ‡bŝ*½iè%ĥƒîàkÒIü.µ ^0Y<†ï <›^$ŝyÄQp^&oÌKäŒÏá×(KĈK´U>³·#[X? ¤“ñ›  @€[Ŭä²S%û´°Ŝ[Bƒ9ѳ™b’¸ Ï.ˆC6V½–|fpĉ‰9bùdjâ9bx ĈĉĞĊé°Eeż'0KàŽí~à)Bħ8Qgì£V`èۙ9ˆ~9ĉ,…HĜí}ŬĈô~ŽYĤ/ ĦE‡ž•%Ş-/ŝÊıêbp—Ô´Zyġéû’úîÑż!p ˜žbŽCBAÌ)¨ ×ȃŒvחĵz52dĦ•&d‚ÉbêsWŽÏkFÔPù³C‹ .Ĵż,ĉsŻ€×özŻ"ċ|ìË(>˜ıl†VGážR~‚"âù Ìħâê=öÚtñĠ°à¸dˆ˜ßŸ˜zb‰˜?™<£L_[e; .Ÿ‰H[žaBÙÜ3I¨5Ŝê ŸfW:‘÷LÖıA„!7ñdiƒĴ-Èvögí’ñ%”kp ßÈy%â5–'c?X(ç8òY|Ñô3ÊȄĈÍx)ÔÂÏà$éġ ûšÖĞ⏅Š7| Ŭƒĝ2Mf ŭw&‚8cAê1´Êݤ;£ĝz™ö,ô ËrÎä="Jb<ú‚t öŠß:uÌ‡ß òoĦñÈĝ8…ŸPi<͢@UŻbÄjA÷„œZ{’£ĝïÔ^bwÄ òcE ?b÷e• á`AÙwşn VⅸäÏ?àQ*VR5.1oëCNŠÛwN µOœd¸Sd· î9R$Wĵ™H˜˘°&mk”(ġ5Ï:aŭr N_RN‡³{œĤû\Hd%ç°8‚ÏlÈışmž8cäŒ<‚ÜÉ(,`?ŒġµTÍz™ĝ֕ ѧOž8qòtô…+·&ÌúZ\Ŭ(Y H@Ĵħ(Α`#È‘%tçmğšA#àĊDž9+‘I .û^4q½Ù‡à‹p=ĞİÎg°¤”ôӌ¤­Q@´6ùf…$ĊñŜ€ h‹,Ċ=…5™hjëBúM˙6sì’ueÉW@ âU‚èÒáĤĵÈÏ>V’½}4‰ċ5żƒ½TPǏ"/ìmuşċ ߑ =Ġ‚ H|œANàÑ[ü@Ħ÷ĉ:ß`‹ŒôbԘvŬ›œcÉş‚fµËĴÇ4FïVÒb&ԃ0çpñ˙ĈĊßü?pñ{\<ÓÜħšĉ}˙ÂĊwĠèŭO\ü{ÀEŻnp\ü;M™'˙àâ/p+Ü˙‹‹÷ûƒ‹wCßċËŝŸ¸ĝÁżqñ‡ù?qñ³ŝÁĊ*ôĊĊO5ŝ żñLâ˙†‹·3˙ /GàâċŝÂĊ›Ûŭo¸ĝÄ3˙…‹oœú˙ƒ‹_.~Ö˙ÄĊƒ4Hë߸ĝÁ˙'.~Y9ŝŜ‘¸.Ŝïż¸x÷ î…pñOd4ß.Ŝħax ˘ß˙O\ĵ^M׿pñ}ĉš`Žud¸ĝ=˙.ŝĉżqñ˙àâñ:Ĵ(á†LäilĉĞ9üI˜9òOíÊŜĵs€ ·ĠîÊñEl?ùBñOc€§÷MŸYMŸ`]–ŒéÔX>şÊ•4eğ W?Aèh˘*q§½,=o•OA$ÒNU^ú IÀ*”1î| ¤ÖÈ£†GаŝÎÉ5_x#lQġ#-XјwW…ËİıÇş§G^R†żs DÙV¤• p”ıÂİ^€,o›ĜKħ)ĉF ™rOPkÇ7 !•sÄXêßĤ2Y€g<‰ß ŻžIġĜ'ñ…Ĵ´ ž¨ ‘ĵ.m[€…Ÿy+·‚žÇ|äѰĤ/"ÛóÉlŽbé,Ċ‘Rò½~Ùí|;kŜ>Ñz 0 \yGiùġRĞñ0FA³˘7÷2r—í%˙Ğ<ôuü›è^€Dâ1ènÁêrÖGYóĤN{ħd@EM”I§ɅúÑuÈÚáĥEèÖħ6@Žì Û|ƒ]ĜİİÎ ×X‰7b›•/!é?œé?.OÜKV IXµb›4mÑFşS%ƒˆYz 4iÎéÁhÚEDe͋~ĵ˙ġf]2<<³›*ğĥl…Ĵş *Ѓ‹”ĈïrSƒôĥşóĈ‹Uœ°ß—W÷ı›ŞIô/ވ´²ĥ‰Ë‚€Uë6…îÜħwoНĦ›Ö­ Xàbc˘­,M°bItI5S÷ }W_j˜ò™ŞŒ‹Ġ!oĉĥ+tvdE-P%Sf-ğV6…ġe†›3Èş›_—öb ˘¸&mTPszI:ĜeIRŬÁùfMz[Èq„VâcCŝx˙ÌC}Ғr,Ûñ.Á^S6tNƒm7òŽÀkú·mEµ ŻE֍Ö9›Ti£²‰ ÀE'‹Ùw6™Ë~d-§>ìFcpߋŜäŻ£g€Ù£î—ĵû.r™ë­Ĝ\ÀWKĠçӎò\ ˜ÀİŬdzĥo—íżG.EëÍmy…¤µŠïâğ̞şDú@ĉ6ˆ˙*ٌoúÖĤê ½ 3`ËÍ£YĤŬeŠÖ}Ês:jġ4¸‰>çïAö€ÈÊä×ÈRÚ䨨fƒïŜf2 ] Ġ×*öaŬtü­̂‡öµùU‹‰]PĊ2²~ÜLïaáMżħĵŞâá6AŜ p+}ô\2BĊ/Ĵç–=EÜ?eû ܧ‹5|VÁkYoSj'´2Ż/Ġ€§aIlŝĈmÏ>³Ä@ĵ‘N›Żk˜ÚïÔ Û}-Ġ V,MBĊÈnaŽ#î&½Ï/İüŬÒŜُÎö–†ß•%ùïӒî^8²#hĦ‘ •£J¨[únżJHÈ ïnvs¤¨Á’3Ùí\l(?v ĵô4–^‡Žjgê&=2úH‹_sÑ HÑżħ‘qŠŭ­ßi &—zßÁ†µ"OvX‘‡ŭŜ$,s¸w&NŸĵĴ‚Ĝ-â'jÓ÷ Á$Ĝè-ž+|†]ċ µšĉhĜd=-šŸ­Ş’;JZ:òËm-ç?uĝİİWÛ1Gı´ÏZħĴÛtt÷`gͰS rUmû&´ ß,RúK|Ħƒü@ş45ۅżKĵ5 yĠ"zxFo1Bßi+ÁÉv@ÉŞÁQQĴşˆ-•m S;iïû ̀³#Dö”½Œñzz #…ğž|î aÀ‹·ïh+–„%ûô7;Á+5MQí-vMdv˙˘ˆÁ{,^AžÜ9 ™‡;œ† ²qŞ z%YïáLÏ!iÑì²Ft7sE!â7^{@<=Xċ-dV8 U3Ħ¨9{kş³HQZzżXħwW%NgĠÍÒ ˆé'îrR “dĴÖ]Ì&DÏcuïn…û[„¸X.X·˙â£7ߪZû‰àŸm”B96›’è_ż­ÁŝÖŞoo]Üżn%q „…ĝ­wuc„:ûâ:+YÁiW"&JoÒN܊ğ@Šm¸żT ‘Yt§ ÏŜĴĠÀL`‡ÏB´T$ÌU=P‹˙ŽP ,ÈäöEk|ÄBEġÌ<Ô#ŻĴ¤“ƒ8w -3ÙL²/ܓSˆÇŜˆ\äçÍı†½USLƒwV§ĉ~òY,Ilnk¤pxa0‹ıN^ÏMa,™~ÍXĈ~ ż‹€ñٌ3ƒŝ÷À [ƒìR,‚QT-<µAB´Kî8X5ï>љ>,ږŠĵm9Àµµäœ U ïÂü•:ߑ üäЁ6z”Ĝ-(×Ġ(9Œg ­à>Ĥlä?,ÍS(–µhmµ’z CvqŜ)iLoG‚Çklħ‚lM™ÇĝKUهĜ{Cà5ĝ!‹j¸×ċγ:·Puxġ`Lĵ7Ó{HJ,ĴYċŽê^!Xħ³Nw`…WŸ˙œÉ†·OŠŬġĵġš#átğĦ:$Qçè’iAK"T²(ځw@0V|k-ö„t|¨'p.é1\´ÁĝD‡e@kİı5]!ĵiÈ}5TçÌLKMNJxò$!)95-óc~qU¤ĊĤ‰Ô{ĵĞ&7ġځ XÀ%@\ÛeCL:Ż~qÔ6Žb&ko.àN 6*j‰-‚é’hgQ’N(ĵF81èŽ×ZñŽëvĝc3ŸÏ~!ÖZ\ ĊŽ\ĠEŬ³ÍabR‡zgî?³ž— KŬÒÉ:/%[³ñkÏ˘á{ìĦĴêKüħŒfĥ –a[3ŒlŸ.VzÇم„qßHYµĥZÈ­—ŝ€Mĝ)ı+„²Ĉ”jꔉíŬQàü7Èû Ž“ßu*ùcğ„ĞŠh'8–ĥÜ"o‘Ô6ÑÓ:Èí08 ĥÊ@k8µÌħ@yO>ËuĠêπâé_Ġ²‹ŝ 'ÖÓßB/¤ĴÓxV]³Öïv¨Žx §üi)>µkír£ßL˜U5|!ġ.”#ì2ÚÑéjwtÓ`móħ§³ 8´WXía7Yŭ‡Ċvĥß8£Fw'&,Ù ÈñgˆŻŝÂ}ìF_šÎä~ß5 QɚÔĈşˆ@û"{x³Ó”NÒX~Ğ‚…ç_\˘MGĴ7Ŝ.€UÄïŒs°½g€S×eíħï€ôÀ”~K>xx)*bëš =œĉZ™›š›˜š[ÍuòX¸tÍֈ¨K_|,18#xÀ•x÷àĜZ]P2 ap.÷Ĵƒ no´–BéÚK.ĉĴŠ[Ë5HtӝoálèĴˆ¸ÄÖ &²B”‘Yğs™éKĊİnGy_V‹3üs@0ħˆ›œ¸k„:żá·ì“>ÜÏşİ™0ö5á½CP œġË·PàÀ&Ô½zú(ÍĤŒ}„âRUğàñj+€„Ŭ¤ğuµÚ‰>ûÓÉMö’vûo-ƒĉşYĈe y ‚ÏsÈ-î"Ùê_ÒK §›ÑŻċÊ=K~O°‚äğS ³ĝĵ2·‘İ(‘–—ÈÛ‘HŽċ\Ŝ!ĦŠ´#KVn€pĠ“QÒÔ36·ħwru‡‡Ğ“½ıħžĤÑ&ŝÙYĉó‚"b“sëÙ*j˙žh­Hƒ@gĝ’p;G I%İ,ŒÎĊÇóOıJ" ŝwà€iFr܀ ,– []„/˙ĵ Şĥ· o9Ħ…XŜ›~îˆê\ž~2ħOç7‡ ˟;#+~dxô¨˜Ò-nm2ï'ïĤŒböĝzìòıÔ£ìâ9Âװא³)ŠÑ_ĤÒŻùûË“!9·³ŜHµxÈEòsĜğ èî›k6U)¸M=#$ĝ~]šïûфİ%@‚^|ˆ™ŽWȳn™m{[ŝV…$†^÷Ik…¨ożpġ7Û`ÙÇ&ÌÇÀ ûXÌ"Î=ô8F%…üI0˜^œÈƒ Nuèh³y wTÏÎùdˆŻhË$àOċá#,‰°Ÿ5î³}5ĝPˆÔŽ@£ŠG2c•›oÀ¤ž83x^“ä‘Áĵbˆ˜\íççKQ]ó‹wk@Çĉ;‡ġiû,DviB$ÂBĦlh.fÑEˆQµ—]Ê„§ċ…@SIĜĜXĴ:ùĵ ,<¸Ä'žÙşÈRCŠ IĞÚÎ[vètì­¸¤Ô×o²²sr²³ŜĵNMŠğ{úPX’yĥ†jÒÄŜˆ&aıh+̑Ħ]Âî+{~r•ì™$M/|„—Ú`ŝePKVñ½XÄ`Y8(LôC!֕°T™µŭ‹óúLğ‹ù]İRÁùüŝĞ&ˆá•An†IóüàL˘9âF°‰ĦËŒtnÏ"+VĠ Y%ĵqàÛK‚C”üSfZ˜Œ?&Ïvĝ˙V-ĵŸgUpÑĉ4äM=b3%;ŸoqµdäáÄìı3א¤q3S˜…Ż½†³{óĊŭĜ‰”-PB߃]E·óĦ›ĝAĈ²µÍ¸•Ȗéĉ*,Z0!ûÏßK}÷½²ħs``ÑÄá˙σ`ݘ£•ßßŜ;ż?Ä 3ŝ7sŸ—Ò~‹‰Ó_’é °hš†×Ħ—  +şĥ`ÑËnUÍZ×,Ú%Ĥ–‘&dħdjılK‘Xñ–5 ż·ìñx[°!ôsĵÉêgĜ‰sç,ì÷:Şĉí™0†ú]N*À˘Ó èû ÍŞ×o8XòÎ2,`­BZÑçGŠä½WÑ̛Ŝ‚D›Ġş­ĠRĉ´sùÛÑĞPğ.ĜBIdû‰ç÷Î6ìŭ Galj6ž„\›™;{â!’< ĉo$ŜžwJÂĠÏŞAıü3ú´SbÏֆ{P¸:Ÿ ‰P‡Ĥé8²Ż#ñSvÚŬ_@•Cñĵ ġßMşúÍu:zġ­ĤÊE£óYéÒfż‡üÈ@3–ÓÉÇÁ` ²f°kaŒòqïşŒÂ^7âY‚]#ï‰>yùOì£;júdfàŒ*Ċ瓠ŭ˜*Éġ9kòİ+Evs!(Ŝ•&ê÷|j6P·\:Ž—ÁêıP>oI\Ż·°WĦ0ġ+a‡€ĥŜÎ%hѽĊÉ'×:΂{Ur–ï–¨Û/sĞچ&!÷Ê6 z:ÛZš›šš[Ú:{Äŝ# áLµUċĵµĊ×fħ Ÿċ¸öd2Ñ7gwĉŜŜê˜@9ğ żĤ pu)ÜCġ×'ĥgÊ/şH=ĝ%HMâA•ÊL£ÏŭDiñCüÂͲ×§“Ĵç$Ġcí‚O>Ġ33OLQ÷ĜÏċdŭ'ĵĈuàPÀJ<ïJŜ™ëĵ2àkΘe]ƒkÀA°oääéÈ‹›ì7ôÛL:Ò{Ġ?´HÙ´µ^O§Y_·é·şĊ@ž¸?Ôxé#ô[Ûn*ıÎvÔJq(äA˘)ùĠÂı6ĥĵ=OÑÏċdėH„DŠ6g ÉŬr°)P|Š<Ġs‚.Ĵ‘ìXaœä#SmI`@à€Éo ĥ“ä~™€ĠI[4u9 ?Âb(‹ú›WħçrZùÑ…SÌ3/ˆj§lCè³+„l‘CÈqü~biĉ(`İFqÓ@Nĝ}ÁZ*RĵċŒÜ³BĠ@İ6ĈUܒËaċ(P+ì ‹WyÎAˆl–5ŒfGX £Ê>@2Yñp£ıÂö9öŠ8İ–%GÁô0‘bĥKv_LŝZÓq0fĴ·ıŞcú³‡7bÏF>°7|Ϟ½G½ñYúÇÂŞĉŜ1"ĈeöÔ|M¸{‰­† "a•\*š‰ßŻŽùh3 óĦ2öŭ‚2*l‘=ˆg…’…ÎUòX_´˜âänQDMc ĉ|@ µş7Âyğ€˘x°û˘ä³ƒÓqF(pÉĉi¤¤A”cAéb‚İYşĥè˜˘ şĦÑüZrÏħĞ ÇĉŝE”쑰sW“…4hĵ ğı}Ĝu°Ç߈˙9HÉgşĵU2>0Ä}Ȳ#–şç9Nz£§ƒj°ï“ëNF2š!yß"a.fìôŠï˙DÂúŝ 3ùW$L"aVÀ$ŝ û*3··J{ÖÏ?‘°£˙„ÓĜĈ™E”žáiš˙DŽkB$láż"adŬğSç˙·HĜ/ˆ„éĞùo$lôÍġ"aóŝŽ„M‘00Ĉ˜]ŝO$ìÜ#a瓿ŭ;vŭ_‘0%P˙ “WúW$ìúż#aߒÏ˙7vî?‘°ËA&àŒ!"aSGÂĉŭ Sßfôż‘°šcú ûġżEÂÎOŬĠ%ü+ĥ"ašÂGŝ‰„iĤáϔŝŠ„e "aA˙„ŭ û9KğŞwÌ×?‘0 xXA$Lġ_‘0“˙DÂúŝ‰„}Ż g›ıü Ö¨Á6ĝ×'0(ŻÂW(÷ÁĈ`Tׅ Şı^MWö5äÁ´‹ZKħÄbHÄvĜKIê-š5ċÏHĉnB/àHkXŸ•Àfş”tJBÚ°˜µ j™G>8SnK;:SċL éY‡ş–ñï*Jžcµ­'żÂÊ|QËtAġjŞĈĠ‰ħKšdŸ/Xc¸ }éWA×YÀé\‡6Íf9’ÍuÀ윝Cà÷‡°&H\“ö|šÀ:’Öh’„Ìw¤oĉ÷£ ôKXnş_ÚÀ‚{;Ü4„ímĉż÷vNġh_f§Ŭ=ĥÒÓA˘‘"IdÂ#$,,D8cŝj†é˘ ²ö\vênÚ÷NcìêœÛ{ŭÍ@.¤áĥ1h)ıż ÔÁ"ĤŭžTáns!’ĉš¤lâÓ#2dכ°!Bp0ç,ŽÛä6CËé:€Îv .˄7b_|Ț—Ĉ&jPWW Ò-Qß2ì•1y}뜤â]~™+şg$”ä\5s”f[>s<Że(ÙÎ*6”~!8EZ ž^ÏĴ5$ĝĉ£›¸É ˙İ,Qo@ĝ.a'w`'!ğ·X˘¸EÍeúríŞÙ H‹.ĵ¤ô)ŻÀW)ƒ-àÓ/ĜĞ­Á&(ˆÍZ†ûêL^ •ӏqíLĤ£ŻûÔ½x§Iéƒú„~ö”`µHnŻĦQÏwIïéTÚZ^y#ï5p&]Ìm§–fɨġ~ĉĠÛâȸ‰'H#fiAëzdIç&Ôó÷ÌIa]áSF#ETïršÖQôžp[ĥ )FLŜÔ!yšÂe+ 3u\7Êŭ°RŒî™4Ê˙ħKÑܙÏf‹'IÌżĠ(˜Ê?ù›YĞ5pyÍÉĦË‘°} Ĝ–™ĉÌ3ËM$I$)cżŭ4‹œĦúÜg÷Ĵp5V•$ Ad†¸œš‰µ£‡·˙ҕAkÖ]´rİż·‡£µ‰šœ8Ñ !1$U]Wìıĝ,·~ˆX(5}y°ßÏXŠD’4Y~&³jèUC­ÁmšÜÌ6y“tnNŽD+ mmá>Ñ£ĴkâÜU‰£ƒ`QWĝäÌoOtÓpçdŭpëÒ> ÄH$à7Ž-½^´ëĝ5Şßh‰ĤÖÏA7ħô™@ê#ŜFro--uÚ[ò{‘aoÈjà:€ĝY0tšço×§Lì¸ÇèċE” “:ĝ²YP mBöIöĈ“òK)—˜Ú>ĝzYh†Ŭ›0pâœ%eġ¨Ï^D˜6:=_„‚ApÇïĈċKÀPVÑmİX4ì.–Îّ Tż‘ CùLŜ×X ßÂSä`~yUtWÄtŜâÏÔ$/sĞ ù\ËıĦ(qbb`݈ò•İR‡ûfà³#G²W†ħ²kŽ\ĝS`*-bˆ.ßJv4É<ş~Ĥ–ˢ³ĥeamñ˔Is?ˆ=Ü~ÈäˆY…&ÔLÁ5yŸ'Hc„µĉíĵŭıÚSĊ//í^2W[†AÈ£e4Œí½WmŬwòÒŬ„Ô7ï>ç/*.)).úž˙ùŬ›Ô„ğ—NîÛşÊÛŜXC†G3d´ç.Ù}ée1ħ b6½sžtÏ5<÷%“vŞ&!Ô PĦvû‰äs’ˆÒ²ĝ6l,{Û,Tv9ĵ_ĠF›“¤ƒ€r•ıL”ħ¨WOٌĴdÈìmäxÀÙgĤﰔāİ+Ê"{&NH(ŜàCc~÷²¤Ú3ü­ŽĜPnˆ\…İş\ ~KĜşÔÁx™ò†#~€ <‡nऋı)ZvW€ËŻDŜqìd÷o‚0 XèKŽ 3½È§Ŝ“E:Ëq2˜¸Ŭ0Ùġ¸6ó4Ÿß+ı‘ÛγpĊÖ+>@?ԉ†óŬġ'î!İšóà àMżŽˆC/òKBÊf3 Ú¨A9p’ ÜW˘.}5FŠŸf6!ûx™ ş…° X3Vç@;˙˘/~“ˆĥ_ä•Ĝ“C‡ĥ’-á–cDŬÚc/}LœS `É)ˆcż<*+ °Ĥ@Rz9Í|êF‘ɍ=[ ÄxĜ‹7]v¤ÓíÎŭâMĥ¤SŒĥgôáߣ½äQaÓM 8ñ3~ğ½Ë̲œżz÷éÛÉ9ßĞšş Éĉ´x˘[ X ÁEp|¨ğİê{NòíÓğWÏ·œ%šŒžËÚ¨'ù­3Ğ}. ĜÉÛo˙9ĦÀG›L…QyŻèïx_Ĉv# ŬòpÁ4ï×9;:ŬñrŜûƒ!´àÙ//Dšâö”9ŭĝWš°‚`Çiçg8"),[áÜŒĊ`8Úµ•j÷dKòցĦP²} ï"Q޸)˙…†ĉP7ĥV…ş ™ĵ}ÈĤ™OŠF5}.˘Ż€rBş§`+w†´·ĝrµ ŻäbŜEP‡Úéôż·€yšİȽ }w~¸hŬôÁ âzÌĠ‚·]ĦUg›ÁCsGŝħú@ 4³™ıHÊ ú$§9´Pü)afTQ·ħvS­Ĥ àÂg·4h”¸Š„ÔAÁsq›ĤŜù@Š|&eôcz7âM£­H@_0:ï炄Jĵ Äñİä'è6·ħïkQ×Ô2{tÏ1EÚÒ|Ĵiże~{,Ŝޤ•ÊÛöÉċéL^ñ}TfÉÓ>ĵçé*eDfáġj.Żŝaà,2U7èîÏI|˘ >–@îf·>.>ŬñíÑa‰PşĴž²Ññ™EuŬcÄQĴ‰‘ŝîö–ĈúşÚÚşúĈ–öîŝ‘  ÀżäMu×eĈGï\ĉ¨'KGI"  :üè[Ç4Îí';èàÀQ6Oŝĵ¤K%Ï |XÏV__(ƒ(Żzڃ÷=]"ƒê(ĉ1ӗKRìo÷AÍU‘d?ĈΘOQĜ߄ċ/)ë!ÜÑ™‚š5T­ûìĥÍ4~â^"ˆW‘¸Àù5 îF|[G7:´'Ŭ=ŭHêp"ç÷6و?‡U@ï¨ë@9”ƒ[f,ÜD³ĥIÛ¸ğHjÁLéJË0°˜xŠ^ :^¤‹36f@ üX/ĥ‡ïhÎ9Dd²Ğiˆˆ}H´9V²!0œ(œá˜6×Àv^SÏ)Ûáƒà<ĥĝĦ° ĜK¨ĞIÁì,âè$üœ†>ŭMŒkXŠĴÖ'¸‚ìĠ°÷˘:éı&‡ġ$‹wDÚ˙'öÚġ)"³˙\ŭżbÍ(Ŝp ĵk9Z¸ž7E•ĥĉqĤr`ü×50ÚJĝ;a4˜n!×À‹6šıIÇ÷ĝĞZ"ı×À•Ö˙\aĤûš_żH{úàĈĊ3Ç#÷GìÙ½{OÄŝÈg.Ŝx4í÷˙^{ùŸk ġJ¸iÚWÇ}u€GeñÁŸkàE¸ Y„gÁ50{'  ì£˙çLqòĥ*ĦĤçágĞğp ôĤ(hĈú˙ıĈÏ&E>¨ùkì§?–gAZí!8/눂hÁ†zä{p ü¤%›‚]c84ġû€óħ°q Ìb“û^X„!b7¨<Î`Û)ϧ\á+ĵ+ȸ¤Á5™@ż%Áĥ‘…µ_z³è!ĥÑBÜy_I½6˘ĵó4š>K+)ĦçÚOŜFžé8³/£‰†@ŜôÉÔIgĊ_M³ÌzÈ:|•rŝ`ÙYk V<şŝÌáYÖQÌáD— 59ÓÎò?hI?Â_ŞÈ=Ĉ3uEÏsŞĉ!+[!ï˘ú€W€Îŭ€ç{ ö9XĊ ÊĴkÌħ+ Òü@ô)Ŝ Ĝ9ъjÂëŽB î`…`( şÖVgĞïĥiĦž@ċĉT^Y(ˆ;Dë‡{À“­∰ÁòóïĉÎjÊı´ÑQ]–zn!'Ÿ|íŸo(èÊ>˜ïPèj˙y–Ĉz³Ô”!(+ ‘@eµYzĈ–óüW‡‚aáÇ2€;tşżöó““!nz°RwÜx)Z!ìĥwç—#â[ŸÀ ˙]¤ƒ8"żJ%(ĉž¨Öĥw‚ê³VhOT„XÑ:ĵ)J ™sÄèŜ˘ŝDİWĈ˜×fQVT`9öˆG>ŝa.PÍ{  ™İ֕Èĵ*ÎyQŬLüħœÊKü‘´ÖŝYšsÓ2ô /GQšĝ`@_8ZĴfPÛiİ}ĜEê눓ì^³YMż'Sêş (èeĥ³Îsäö¤ħ=÷8ñŸ6 „òÈ5jċ{„*ÀÈ˘ä+Z>ĜéĥL8-šYM_G“G ­at™ğPşFŜsú ´Ħ·—Ì^AOáĴĦ<„ÈzjÀxñ,ġoSÁè~^޲ĉîA`[›V(³Î´› ¨à5=;ÈÖyĜSMáĢ6l˜ÈkÙ&¤-éû†¨ë[^×q@h½ċ]7EôÎĥ•jˆñÙFĜŞ;PEĉ?ìĈğâ}YÓñŒP#*ْšyĵÖğçJ!B†+cĦħV<‹ô5” ‘Äu=ĥÄĵ(î ˜˙•Ÿ^‰Ü´ĜĠ\B_î*<ŝ°b‰_ Ó!0ĤkîşxS䕧+;'×@Gñ‹˜-şâ$’„Ħo䳊Q ú'ħ+ …İığ_´òxÍIÀĞ£…f àƒégĦRñ]x÷ù"TH24ž5FÔ"*ñö³zˆéġ!î[Ħïâ½uE ïC/^Ah[ /ѐX „:`” k>ĊòĴÉ;†{Ö,˜ı›ä^Ç …TàŻä~ÑTÎáíGƒ§İÏ*D1$\€²†“B_ÁNNÜmôTCŸL{Ê×TË-„IPü”µáH2z}ÚÊ nw™mÒ0­‰+äE€ˆ•ċ!ŭÒğ nĝzĊx4ĞY"[¤1ĝ V`ğƒÄMÙèġgÀnÊëq;Ġşß*s‡?‹-d °Ŝˆzċ+Ìim=P  ž‰ÇÚ”™ÊŞÙ‚şMĊô>²K °÷ßù·d ŠÑUÍ3—dÎ0û#D”`|DZ<˘ƒ›b‰Ĝ>çôŸĠ ÍËà ]7Ĥ^VşCduµ Ż;m‚ÊĤ €ĵB£Ûœ,es]a€ì‚‹%SĝÀ‡cîòD5(6·Ïĝ~7”Èm1ÔCν,ëĈĴžŞ÷ñgĥ/q2Rù #ÓD¤5t-ĉ:ıyÎ÷^¸{§›Ó\ c] E)ñ_0$UŒœ–l?˙އ%ÀĤ{Ë^ž q„H ¤ÍBï~àóûrc‰bĵûħĝTÉEH(KÏżò‹Ë.=iΨèjÁxz ,jrşïşjERĜQŠuûäú'cIl?çı-b™Â툗>û`%‘ˆ~ĉ)ÙK3ÍĞPÇ"ÁCE™[üï Z`Ġ¸÷MWĜcÙŞÊ™ ÷&ğĝ3Á1`s—5oi£?ĉ%ú† ”ÎZ(öyxÊï:˜½Ĥ솷€ì~=›İ8ä8a½÷lPc*ќ…ĈÀ,ğû‘+„6 µM<‚gkÍ=ÂĝYÎúc°>š47ŠžçùÉüŞ‘÷š‰@H8ô— ½„Ŭ#sĦH{5ċ6v# ħlétz@WÚ@ms˜›§/|#ĊdüıŞÌ=Á×9Ô½cŬëIĥıĝk#p£L_VŬ×ÇşĤN ŝċ¸"&ñĴ‘Ë:¨ËKöÈMSÄb h³,Ùñŝ€ |ż˘üñyğu׎½Û5›J3 @ÀTÙĠ% Ôô?˙OÀïŭzeÓIRĤKŽ$uOˆï)ˆ{ ˘žÊş.A["Žğz÷QÒóWéoŜd¤żzžôèîĠsÇ"ĥùşXè*K@|xÚsÂ.¤|'ŒÓŬEIG–˜Â$@ĈrŬ•ݽ|Aߗóŝš \rRĦ½éa&4êì]ïĈ°ö8 ĊêîÎ{üvuDkı`à#Yvs‘ ç˘!bzs„ŭÒĠı<Š7A\s°ßÁ4ġkĴ}˘Š—§Á¤côϵ%­ïÛKóUpOFġ9žĴŒ¨8Iŭ<îa1µĠĞBΝ-„Ĵ5 9ŠŬĤĴf§‰,d†~ò­2h‡‡"$~ĈKĉ—Œï<útÜÄ|–ğcúÎPô“q„kmˋ‡¨B Ĉ ñ]ÖIFÍGôŝˆ:˜E[Ŝ“γí Fa‰ÄvÑìúLĊ#(ŻĈçŞ5ŝRİ0DϨkı‰À%Éġ*Ġk”!M^:Ze*™ŒĊҜ‡ƒcäˆİßî¤ÑUˆ÷o60PžW)öb69¨–ŸbŠ.úŽú“µŽOŜ›Ì}:=vwbÏAˆĠ=a”WŞŒ€‡{úħ†q’€/4FôzĈÙIí|ıÚÈ=0r›ñJ]oI1gíŝ7äc‚e x oğaì/ 4€:K}žX(ô")§ñ›T˙Ñr=ĊUÛ0YJTĜħÁtP3µb_Î9'Ş“†ż=çX_(ĠèV䉸|ĊsŬÛ7‚چ‰~öS+dNÜ$‘˜Ñ‹é| –¤y€q-X–dqñ/u°ÙQ†•œ°e BÓû!ˆqPÑb6ğ_4sı—V‰"ŒYó÷%”ô=°ó{ÂñĠÄ:"fîµnì“ĴÂߝ“˜À\üĦ61>66>Í0 üùUÎäpçïÂĴ'ħû×y™Ğ‰Sˆe ê ß;áĜ_’°oŝ,"j´òRÓüb·à˘OB0?=Ԁ̰=QÒ°£fɃÏ[dƒÁ}—âE“ ŝ&è‰Ñ#O“qsЧìŝ ŒZÁ[Ä=˙ê‚xa݌¨Ħ}c°5ƒ§éˆž”ÛSöMAö0ğÈDì.Qş,ÜÙËŠzċ£ŝԛĝid=Z8ñYÊàPÚġ‡anx x ş€†2‰‚cä7ŭÚΜÒÛ.ĝfA˘÷FԖړgġé˘7sHĴ=ÉŞ>ĵĥĉ§W‡cÊ]Ùiî£T$Œ&%O:ŞÔÀÄ[Ĉ*n}'Iĝ[ğ†ŭd2@""HOĜ~bŸGäôÛ(ı€;vœN ›Ùï䍪F6è•Ĵ^`ÂzğŭÑm^h(Ñr V$^Ĝ9f”M]SÑÒÒg'ŠËž™¸¤J[]çûQÔÎösÓÜHêQíXQˆ$}á+Öô›%˘4÷ĝĴĉ¤)IÒ?ħï|ä/ƒ¨%A5[”“ĜZN…dfUÜ&é-9MÔ÷ٝy÷÷xQNĊ9>ÛcžċĠġƒ„֗~zŭĝúÙÈ]›Ö,ó[0ÏĠÙÑÁÁÑÙuŜżek6íŠ<{ŭñëOġƒ|V]Ŝ³˜í>sÀ>H–4Ŝs?‚ 8½DO›âŞ úġ4xk$œ˘A90)H‘ñԉ÷%úK’LOÖ`ñî4Ñ%oĤYŻÒ%Cаö(u’[·˙ĴĊ/ŻXMS½41rJVüààÄYiéèİM³xĞdDš5hU‡˘^ÛP˙n¨ÛG° ôd_Á.hñH•‘ü;öfŜĤĴsĴËPĦ‚M˙y‡‘Ïb~ì'¤€C$OÚk´Ŝ„ï¤ç7Ë/†\xq•È.Ì[kì.úNĝ0ÛÔZĦÌ3Hë3ôËOêqÍ%ĝ ‰ĵêV\ŽoûU"ĵ+Ż‘Y Ĵ¸™y Ġu*öcYŒUĵ‡0t¸î€7µœ×˘nƒÀ*™XFŽÁ„Zz½)§™*Şo—„ÌKĜLj9P´°~6À‚Ċ˘g—".…XŞucûĥ:}K ïİ :˙ ^,${Nj`]Ŝ„M‰ĉŭŠĊJóf,€†}é~]Úżüà(D1Ùûù˙GÙ†EħnkÛpu˘›œsÎ9ˆDEDD‚" Š Ì bB00ˆ ˆ"ˆÉ9ƒäœi:W}£˜sݵŸ÷yż/?öħ÷<ötıèêŞğĈ¸óœF{ŜlĠ"QBßvÀ{`ÊĦ’ÓġŠŝÒ ‘ÍÖ/W7Û@“‹"câı˙ĉ§²\üAn-ùòòúİŬnËéİÊIŠ Òp<ÜÂàâh‚˘’rŞz‹–ıì>uŭċ—’Öa¸f¸3=eŸnî÷4‘ö´‚ÍĉĞ_ Èèüí!dɇRà-°m¨•¤µġM:ŭ°1™ßîjo:&WÚGˀnàJXó‘N˙°šĜWXÜaà!ĊżË]E0~ÍéĜIUıÏì Ĥè½G‹—#>­ŒK€ ÎÔ¸„O‚Î0˙,âżĈKWRÌà'ŻèX*‹!ùN™§jÄQ(•_˙0Çï&>ĉl }™\˘ĜT'ë }îËÜu’ġÂ[°=j…Cħġr}@ ¤è‡yĞM]£Tĉ’:Ìٌs@ĵïòUè×çˆ}í’öÇ˙zħ4‚œ1˘o:žFك$WMħx¨¸BÓın%²Ħk6R1ċЧ"ì‹Áí7Œ0Çù{X\pG=VìO“ĜWU‡'.™NOġàLžġ’2˘ĥ7Á*>fħëĜ6³òŞ3@9\cJfà;xÑM‘DVY™ ¸™†QŜF`ŒÑ^ıëÚ§ÊŝyĜèŽĥŜ ġ^n ĠÀ…!—„)̂$ĊE„pDÈÂ(ʁĤ˽C#ï§µŽÂ.yżòÓµ]+µEÀcäġXÑ̎ôÈĠ*d’˘ÛE¸˙̔ĸÄùj%“] ħu£cĊ,FŜ^5DtWÓɞüİtz2PâÂŞħú}4˙bĴ~‡ ĝ῜wĉ¸.â·Áê Vä„Ĵ*FŒ`äl×detêĦX/Ĥù6& àX7Üa/–vuÛ ĉE@9èÍ}ú§” µü[@ĥ7ëá$ĥ‡’6nŞ?’AŽ@Äêño³żtĜ€ĉŒìı‡„Zż ŻúÄp̜1¤ŽrcBa şÂ˜EĞ. Ç̛Y1/“²ŝÊzĦÇ(ß{U—3k/ħ?]ê‹G³}YÏH¸ n?âC8I4÷İBE½ş!g W^&y5˜ ?î ,Ù5Áğ`=" Ùĵĥc&‚Ïä;–ĴĊlĉ@Z‰Ü:Pċ%ó6QToÍLÄ(S|ŠħÚÂBÛÊħĤC²$'\ħŒeŻ³ĴÜ`9‚ wg³vŞ"RŜ€ˆ˜ĝvçòí|ßÍcÖ?ŜĴO#HC[ğl~ġïN¸k ÚŬ2àlRI/EéŭU€ŭñ°Ò’Á3ÁD0C*i[ÙğxúnŜ#t×Û7úzşĜ[k+WËÊhYy„€h Ş˙3zK’ÎX‚^PÛŭÄğz0ĥ@]š@Óßü¸žÉë~żçĝ6xo)DugÖ,ÀġrÁıĴُ^@½Ĉ8N$ÙCMXù6!áµXħE9fbĉ–*eS/$ŠuÜD-˜M×JĈ›EÌt@İrĝëú½  c_,p{&lŜ0ĉEşŒ>Ħ9Ġé)NıӒ@ÜpŬëIÏX‚Ù£‹Ġğŝˆyq! Œċ޽ß)ÇP/ÙżY¤ËL+³ùrA5-ŠaĵŬ˘0qƒR‡oÜÏJJÌ,8£B$ŝĤƒUÀp 3†ĝyPÍĵ£ÏĉlĠ{ò7A×à7Š'Ë~Z(”XÊNĴLaï€×€x>÷ħZCİÏà4\?ŜhÍ úì ğ#Ĵ÷ƒw[Tö^Uä*ĞŜ ñ¨cC˙éÌä(Ĝ’cg‡OŠáÇ çĈ‡ nW„4Ġ7í:­L´{9ÍHƒTèşÏ­ ’"šœŻGÇS6È Ş!éX÷KEDÔáBÄqżž°‡µüÎg5°Ħm|Üé4UÇŬw·ÂĞÛ|_ù§[‡Ö/Ġ–w8ˆ€” l\Ö‡G^½÷ôMê—ï?ó‹Š˙..Ê˙ùŭKꛧ÷F†Żwħ1PĈġBD~iíëŬúTŜ¤éÖïwv;‚ŽˆĤîzïÌÖ<Û Aqğ_!œ\tÁAQôٍM¤‡¨"2RĈÑúó&Dİ h·~^™4ĈôK;˘òé.4ϗ*ÑĊÍp ?ǏÀb'‡gcÁ=:yZsì:Ä­žuUDġ=öICô6h†oáżuĝz°s~–2Ĵs狇—€ìÁ•…2–ċ ƒż–Cu+{ Z<›ózÔm瞁ŭ×Qm31†ıÄ\ é%BÀ5Cİü Ö8Ñc¸8Òqû(ˆ#ßġIoÁ|Á0š0clAż‡'Š wŬ6z#½"GĦóîƒĤü.q'÷%ßşÙ\i³ĥn‘dn²^¨!Ÿ öh÷^âĦùJKêyVİ%ċđŝgN´Spqäú&öc5êînĉ r`-šċżŠùÑGfpŻh!Aé- àŸ˙×@q’ĊUŻĵqäÑGĈ9‰€8ò.ŽLq¤ş÷µqäÇÓnñ jğ}ħ ŽIğu`­…2@Bpq¤µ[Pĝ…ûI_ò+›ŝââHŜ üWÉbàâÈżM•ù_’î_r³^G’„•-Ö¸•V3ĵ Ž|qÔ n/4u·ÓđנJևj ·íˆ#Eœâ@Ypđ@ĥm½jAÇĊ‘Ÿ½ĵAùÑş2ÈÉqdÌè<\öŽYhm Y³{7Uí1ği=.ŽLÑĤ˜ùëâHú Še)ë<Ġ²rŝÑ£·ÏÜљò…ôíH7YÄĤğÍL:wvßKîN¸ì%ŜçSŜ€8²¨OÏh(›ĥ=”8C<ۏna<“N`El‹tÀğDħ9ââHä[-_ܔŠ?ĉİ9}‹˜Û(´uWJ!ܚ·Ñ{êÙµ’ [ µ€sè.şŠċÚȖYKŽŻúû¨XÜa8ó[]ù×|(^^ӖzWëbüoíÜ2½èXÇ8ËŻìC 33=ÛIfXž=âRŒ“ì³ê@ŠÒ•1úSÄêĠüä}Äèĉ0燿Íó#‘ıAŒls³ ë‰w‚Îí…jĝŭ_úŻcĜXöI€ô‰ÛE¤us¸}Y½´ùAŬµ§ßUC‹oĥ=÷Q„%ìƒ> kÏgîËİhX(˙ác}fjb|lttl|bj†ÎĝgĠÁĥŠœwwÏì´Öàƒv¨’OÄ£ÜöYhVż;½VWá×öş˜ĠÇċt§EÀŭGtéÉlĝË| yĝ˘p…V_€³S|Öuӆ,ĥ!“A˙èIò˙Ái„˜ÜŸœe…˜<]Q˘V£Yö8'£ĜħÏ2ÌHÛ{fÎhUSŸ˙,£Î‘¸oşĊ˙.-†ğê{)mĵz:˙KYŭ÷ n댅]ì'Ħ…ÒÖC²6£yb  ܧ zĞĞ•\ òç÷cz6ó·)CÊîè>ĦĈ\â­iMOÌ_e*ŽŻöÛ˙šŝ;(İ8ĝ˙>¨ûÏ`÷˙ëàÜ|“'âÙÀş)-q•ŜBÑùwÀĝ?ç`ŽüwÀçŠÏ6ü_s€˙÷9À|ÀŻŭżĉÛìĉÒN›ŝ™LŭwP]ò++ŭÛ×˙Ì^żŭžġФúżs€İĉ›œpB Ìì·ŭŻ9\g0¸ó˙>ŭżĉ9€+ß˙Ì–$ŭŸsĈżsJH7ŭŞ„ôMVü˘š J÷˙qÀŬñŸ9@Ŭ˙ûà ôŝŝ™ŝwpÇD¸ë͟§Öä‘n‚TˆA+Ğ܃H4•ìÀöRs@C=ù‘ĵ^°ŭL;‰ç-“ĝ ;ˆÌ͐KB ˘˙Tú3¸/òj?Ĝ'IpĵuĵËÙ6ÒÌĉùs‚šïħ\Ka1ˈœŸ8+!vbˆ~OƒìSŠ•úAF˘—›éJ–?Öş  pJšfáċà ]iħ$ ı>‚ì7ŝrˆŒ÷v½ôг$0bN~f{Óë½6>u·S) 3({°äċq/i B‘2pŜ~v:½ÓÀ}dώ@÷û˛‡×ÎEì Ù´ŜËŬuċ '§+]Ŭ½Öo ÙqîÚ7_ A>2 Ùöt/l‘Îmw6‚?HÚÄëĝ˒A6:ӐrÊM a³÷uüvr™"é|”Îkì-ƒÈùżqĊ#W!²%ûî/ĊëÁĴé$€0 ġ˜<Ù5“Û é?ĝ/íCÖ¸G:!&qvbüŒˆÌu Ô,sħ÷š‚çĉáİĜ?² ñìß Üíż¤“ìjò_0ŭGKá'=ˆxRù›™a+÷SbÙXž¸Óô?öx ĜKŝ8 Ò÷ê^l‡@I“DĥG°ŞŒÁ)ĜMR^ ġüĵž;‡9Ô}A>ĥ œa:£;$ş?"OĈ5\xÇ@,!êŻżJN€ŠĈñ)îÑ1,]M*9ĝ"6²Ŝ¤ö‡”Ċß63X#'¸)′Ŝ€S͵%^ÁC;‡wMP˙ï„RöĠ€1Ġ·y,K‚o7IŸäWƒf,AlĦ%y@Thg=VÈ/Z…5Uü;7H5ŭ ^D]ûjûûM öÁİĜhĉA\ĠäŸÎ°ĦìóĞ•pzëá¤Úi”Ŭ_pż‹˜>…5ì7Ÿ}žÛá^ĝ8û >?=ħ;À}ı…ĦĤ’Ĵ¤¨†ˆ ‰JÊ*iZ,wĜ}"öé炆>ü’aŒ4ċ>?ğÙ^CL¤Z.ûô³ÑéÚ¤8—Viġùì!†%ĝúރ™£Ĝp*lƒĊÜŝĊ†^­%-‚Pp?DšÄƒrÙ`W8ڄU…Šñ–`ġ;…D@'ÖY’Öĝ‘ô“¸uË,H„ĥ÷MEP bIJ÷y?ôŻñr´aħöJBço ğx ÷ Ë@³ĥż_C{ËÚĈàĝÓW‹ ëô‹xK@<ÓIéï/*ĜżÄA|Œç˘1ŝùĜ-ħu6dœhŭˆ|İC@Ġ™ËÔşï„gò˜Żâpòb\ˑsž”Ù§âÄşħ²e*żE|8·‘h(ĵfz ~(Ŭ×YO~\X7€-óÙ²úĠëHwâ†esğmN¤ÌrK(f•)l˘w#Áİ’s_Vüê|çf’ÉGôÁ2 —gĞߞ™ˆU%Żù…6­ÏuGk ĉw‡ı…ÁR$›ğ}°6!JĴO‡Ŝ½ƒÉèpî HU‰‹ÓPFŬ­Fˆ˜ċÎÇeĝ>?~÷rĜÜRdÌÖ}ĠíNt~¸ıàŭ¨=ëW,ց8ß?ħpP…áİ…\8ċ™,ĴŠ%tŻXż'êŝ‡‚ĉa(3G²]g£ šÊòŬñùxĉ ìñNK1DÀhë::X€¨Ú°™ÜF$àŒ'Ż— šÀ6¸ï I*¸;|×шîĉċ­§IìoF­!ĞĈNÌÜVÇġéi–·?èGÒĉÎùĞâ²÷9•N„½a$ğ*fŸe9óÉİm|#²kÌPüz´nĵZ_6{~ ÜâpużçìW@7~ôDÄn4ıÍñùŬݲ 7XN*}™¤óG­ñHÒ°˘/(?üŒ½ŽzyF5E†%!àˆn•…ĝżòĝk@<òxÎZgè+eä>L/ÁóÖc?DÜĉ>󯧝§@ԕx`;äŜ@`d @â(*}ùO β‹ ı÷òËòÒxà° |ÜìÈi ‘pÈXüO`Ċ§òWz  y€Ĵ9ڄ Ûµó˜œş’ˆÔŞĞ³ˆ\yíġ˙äŠìĥÇóÊv[!€è5YÏ.„y/3„\8. ó*°ĥstŝ'àìhg y<6LÉp™wĜ…gY50˜ï‡<ÀV;e<`żûAñ?y€ġڐX y€ÙÒĞФI×uĉŸ˙ä>HL˘›ÁzdA”ƒ<@ϕòŸVüOÀòá"§Gfäyé…<À²…<À}nábxŞûOà‹9£HkF òÛòf²,N§Żċ˙<ç&òcÌÏ,™ŝy€}”ŻC:Ös!/˙;lXv+êyÉaäħ¸ˆĝpXvê˘7KÊЇ×OO„XÖrÁ/€,úĞf3•JÙ ¸àëĵr"˟ú45ϸa„‹ ‡İĴA͘'X£^›×Azu÷\Ġb_fiÁ ›.„Û*ì‰ÛÇò|ĈrĴë9O5ĝB ŻO\[‚­%İ_Ÿ ?5C=ž™{µ” r²Ğ?ŞL0ıÚ‘‚T`Ú4·*jY`y\=w8ÊF€¨éE‡Ŭü|› p 1›ĴxßA‰υ‡ŬÍjVc¨öëÓ[]L•qè‰_\QÇÜŜÍoûcgŻ\żûàÉ󗉉/Ÿ?yp÷ú•³Çöm÷s³7×Q_(‹*›şl=ŭk-°bıS-YwT8Ÿ’ŝç“ĝKç1Í`Ûóf6§íĊ&u˘€MTñ<·>nıyQTw:-PŠ`ĤĞ&ċ£ġXëIÂÒWs3!fOé×ĠIk‹°’µD}¨G„i<ċ4'Xċ`Ÿ ĝŽn'ÚW@”R¤kbZY0V_\JöM“uĉÂ/Àċ9Vc ‹{Ża a\¨h0ßQŭY‰äŜuïĤ¤NÙ¨ŭ- @À—AċĴX§%` Ġç‘bçô\m²‰EĊÄgÈ*Söñŝ„ï ü'ÙVÌó”Âzá]`j˧†c›DʛdÜĜ·‘ëÜġBż†L4;jĴGaó8pÊĦ ½Ĥkñ fpĞ’„­ÛW“/£)RZ^ 9·oC|zGĥB²iî8Ÿñwì­şàÜ °œ\´š`ö–Óı_Dô ċ]ˆj—8_Ŭ)R{ްŽsڈŜùVĴ#fQ20}†S~҈(ŒğÙ§4˘™È†\ĝeW9ĝ6'@Fw9ùĝ*5¨h ”ßMǸ“­ıÏ"·8ÈâŻôҚpoßyìò½Äß ËëZ:şû‡Gàgx°żğ£ĵûÇÄ{—í„'…Ĥ44ˆ²N["ŸċĥNr1zwŝ£UšP P[u<ıĵqċ ›áړs½ İáŒ&|£ƒà4½0Ñèd9g&=P’¸(Ĥk=݇hŸëÀŞöHQÜżr.İ] qPTd'ç­auNÎv)Á1a`>#¨ŝûnÌw|²t[Gz}míÎpÌӒJA/“WĥY '“-„™!nŬÓ¤§R1ĜÖ ĜяZ+Ôthš ŭZϽŽÜfğÉ4•‹lÂÂİùmàŒÚ%\_H9Ï´°bŸäoĝNxÑ/~€mş 87ĞsÓ_àĞ("_™7µe'˙l…s^ŝâé5ÜK„sv¸6΍ù öL‘HÜÂÌ! 4%'h<Ùw6OAš äXìİ Uó˜/á0³H_ôöNNá=àd_Âbƒ˙ì|вޚó@A8rjĝ°€âíùƒ‚2Ñx4rhĥ ˆïkÄJ· äñn˜ jÇëħž@[ H›áü9ĤOówôèÇ` ‚Ŭı˘Y´˙ӁĊ lp<ġò }ß/ûàÚ8u—CO‹ûY÷­ĝpu—Ğ‘,  żâj‹Wm:tùaʏ²Ĥî‘˙˙Ú¸‘îĤ²)/Ú´j1hšĴ‘ëĞ*=Ìê/~zÈ5éù\ŝŜ‰/§AXħĝÀ§~tĥ蜝A#ĝ(Ĝ£=ĊˆúÇŝpfÒ€s{£Ğ?†˜Üàċ o)Ċ÷‰ lmÜ:<;-#x°oŝĥ˘ÀááİHa…œêUˆWëüY~X ½”@À{ıwĜ#Qŭ"ĉa‚ïX³•àS,– =]…ĵY_rü‡pvsĝt. ‡ˆÏ˜n¸6ÎnîáwtK1˙^x§kŭI>ϲ5żB.Şàğ@×uÖĞϞ@(Ké:†÷…šsˆ×+ψ#|Ts‚bëéĊşCßĝvA/àcµĝï~cÍÖF5£îZUèèĞ”: ‡UEp߉XĥşR‚á\˙7 am˙€ŒżnZèÄB ˆWĥq݆ħ7ôX^ša‡XĵeŭO/ BŽĵêTé—ċö–Ħƒñ°P_˙qšó'B áŭjHÂ{ĞoĠħ ¨÷âñ^@Ċ£ ĵà¸˙qñÀB/à9ô”aôo/àIzqcßÄ{µokÁ{-mp3ŝo/`˘Żħ8ŭÉż½˜)C/àùB/` ĝñ~Gĵ`òL•³•ñx/@#zĴş[Ğñ^@ü_yKô"ŝpĤ?‡1Fü ZĥW^kĤ˜ŸV‘ñfÈ?½œ|ì2Pĵˆ…÷ıĠîȊ2D‚bz{߃Ÿŝµ„ƒŒßúâoĞ×ÁVK‘w0y<,cËUôĦ ZÛm¤ÖĜŞiÜ˙[|5ôvñ}Ò] ½€Xĥ“Úà'BD׈9Íŝ^À1! }­£ 6ĵm ·ÈŠI7§µV£Ħb­Ù¤+L[€JÄÓ­uFÒÈX0˙A}£ÁŸBŜì'>żĵ §8…Ċvq^óğŽ—İĞ•NùÀ†âıU3¸-³jJĈžŠk~ŬÑÌÀ^ÈI=àÙƒû§ŽC=˜[í‰Ĝ~Ç W"VŸy [¨ —FéOŒÛwÌÑ뺈Ċ)ĈÇĠĦɌż€uúÚä)nù1=‚äú·X÷S/)D~ŭ³ŭÏĠU҈°ġĦOŬ\Îß´S.Šd˘”ÍŽûùí§wŝˆ?àf( /âZvŝGnĵÏó xÔcv´§Ş('=9ññŭ[×bc\‰‰½vëŝÄäôœ˘Ş–ž‚úòŜß8âo‡;g(’†nâtÒá.‡‘Ĵèr*í/‡Ûŭ鐵0"½êê:ŻÙzyDÊëi76üv½$AïX9w*y-^Œsò à´ú#cêħ˘{}”ùÎ1~B½¤@ŬÒÀûl…Ĵ,ÄÛ"žĠ\(Ÿê&ÚñHɽÀ24EîiŠ?Ċ^Kjŝd/k³•sĜğùL•Şİ—ğòżĉì"Äa§$.Ü'>a{ ŭ4ÒüÁŒEÓFtĴá4˙|ÊÀ–y…”Ŭ*ŠÖšI*ŝC5Ħ°gƒ\TĜ†Ú/fnÏ@žŽŞĴECE ĝNÜşs(‡4VÒè&âS0]x3ï*–?l)_Ñ·Hħ|ÈV"g֋ï ܄ÂĜiĤ#îYx#n‡’ğa`˜mp ˜i÷D6ôL˘ƒ#% üˆ˘2ħsႲWĤ&.ÉÑĥĠ£9”…ߌ A²½Q˜m:c¤ĞüŻĈ!]§Oô{?Šu&À @uëûlòÇ !‚œ[ ôBfÊ7A?ÜpĠ,\Ŝ™sgÏ -xwP²ôż‘RÜ6Ĉ€„ÇüXw}QĉÛqQ‡wmö[êdżÔĈÚÊÊÚfݽ“ëżÍğGĊ=x›YTß=ÀhcĴ­8ċF¸·’Ì´Vìı“Ó‰ë³n0„v¸Ñĉĝòh†Ä¸É„lNü˜ÄŜoU…1@B'6úŜO’ IĈñWü Àe¤ŻċŜZtˆÙÁu 5ˆ´~MîÒÄÔYÁıX¨L†Qà˜ü͘rhŞgâÙ>s€ä0{h~HŭÜŬGM%ÒĜapïÂç5›#a;T¸¨ŻBŜr8_Ìuŝ ÌìNïÛAħ½‡)9Ŭ îĵ|"ĦèZ•ѧHFğaöb{t›ÂäU¤é’ÑH‹ĤëFIijáüu"[ħŬüEírĞĜ·Àç)^Öİa>’+ĵ†‘DÙġàmóĦ·ík2”Ëo&DÁ{ ì*}ÇÙ?ÔßcñÂúyœ+ü&ùÜ[˘j)Xš˙éÙî ˘u.Œĵ‰͜çàKlà}´D–}áµìŬیĴ§Jî­êȒ–' pƒ(e9À‚ë˘Œ`~mżğR˜¨½;c "x{@Ö§ô´‘ÉëÏ<í,G$+ğœHiš˜Ŭıww;¨  ²"äRRAÛ8 EYŬĠıÉ —Žûş,1ĠQ•—À{á-q!1)yUÓ%.ÁG.%$çVwOà˙Öx[AҐ !¨9ì›ÛÍÄĉšRN¸(“‰rΧ3ûyÌĈ§L3Ü!ĊħŒŬÚDá•wÛÑÑ÷00ŠÛÍ£ċÑ 0 L\N’=˙ĠöJR× Í{E…÷´,C,?ÂNœĴŭœÓ@„%I51¨{ö4żN–˘&z‹›oÂ…“§/½—V˙Á>N;ĵF6Ósċ ›úlĊżÑ} ĥ ÊÁ[(IŒ5Âı#ĉeâž`ğĊ^%×^ÄżÛ*RWÊŽ'ŝ(½]­KĤ5f ŸšpFM_ċĞùAx4ŞàùËCžNêĜ³/Óq>Àm°G/Wj‡ġ2 Ï%êˇ?mÎM(ƒî›è@ݍuAĵNĊ¤/ĜR$ġJĉÁn2Úħ’1_i ĠĤšċÄ]cŭ[Ë˸äĊ.Ñ˙n#Ĥ žóx3NċĈôĵıç˘m‡¤¨ŜıĵŜKڈé­A^ñN˘ċ­ĴjĥbŜ€ kğí$DÒ?=…öĵR'Òí˙ÔÇc6<>€ˆyƒÒ1g°è!ĜŬ²”ÉšC÷2ë†ĦÏm-ŝô zß+½˙Ċ |ĝëà_sä@ÏÊe蟊[Gñw¸.óŜĦ5&RdD|t‹9ĵħÒÁPE4 ~ŜÀäġ}ÚżˆFTzŬƒNe' 9Ŭ>À[o1(5·`=·,‰2;‹yƒ·LíK½ĵ\oŞÔĦ64×Ŝ|§Ĥo¨àíȏĉÏ 4Ċĵí/ŭ’˜ünÙr–ŝħ]Äċ5P­³œ ­ìÏ|‰žd |'+täsAÓ ÄĉÊ˘gıÍñû°^#ê,@!ÀÑ´ÜÑ·q>@:ñ"Û^gò)ònP>d…ÑG„5|W§Á…LÊÀ°½âŬïñ‹#ó—íM#ܘ·2œLDn2•˙ĉ!èñċĵ³te'~Ȑ\>™#ê<•+ħt¨BU·āݜŭH8'Mʰj*p˜ġKn÷„ŒŠĜçĝ,˙°/ò|G_*ˆßd׸B €ó@IĜĜdLÀǧÈñïháeĜÏĉ[ş™¤OKIġt+ÖÊeƒ ­ĜßÖdaÏWXûmàCù5ËkX8"²,ò'0ڊQfŸÛÙ/Yèdġë£Z‘1_êy~DúYc­…É7mvµ’‚9N—UÖÔ31·Zbgż|ı½Ŭ+s=MeYñv‘OHFËÂuóħ›É…­c€˜îÈ~j½9̵\žDYżœu6ĦòšĞE@ŞûıL\x;oö×-9Ŭndž_y “­oüĊZ/€ğk=­ ôĞi:Ä^uoMàgû ^Ë~ı+S°‰š;&¨ô€q×öMq…—èwŝ‹ì?–|çĜEFB÷à ù‹u˜8Ue(•Ĉ GösRAĜ ĞZ1´T"wÊY4grı$~HïĴ”vžI<” ĵżÊŽÌ›Hâ¤ĦĠü BZŻĴ?¸aá6˙[|/ –ŸDґôFÚĊ9(@$0µW&Û&Ö\êhoé–ß‚ÛĦ6fŞÛŸ'äˉ‡lĝ äĵŜċí8ÈYPFĵ¤ıŽUj+ŝ˘o€á90Ç$Ĵé\I:Î,[ €?Öä#3í^8ŝô˘ˆÒnċjÄİM3'Ĵ­@żĜ"Ë2ÑĈ`~İ=œ4'˘úċAÎ7OHÏŭA{!ŞGĞÑÁ‡|Ğ dŬy>“ctnÓ]w)(Ċ•ÑсŒˆâˆ éö‡S£##ÚÇP„@–6÷‹J*Ċƒ³Ŭİ·Ž8á˜ñ‰ÊiÛĴX³aûžC'˘.\‰½vŭúµĜ+˘NÚ³}š6Ĉrà޸ˆ˘‘cÀÑ[İ …`IiR”Ÿı4™ bèÑÁÀĤ*n7DėFd  ô²8¨IıßmâÒ Ž™Àp·‚ìĞĝ˘ĠGU£Ğ=èH:z~ ^V':qzNHñ7˘™ËÛ/hĊZ‚yZì„Ĵä>Qıˆcu½Úgސ­˙@`qó8ieçÈzÂApÇÊ} ç6ú/EíÊ1WÚKEœLÈ~Ŝ]x ĵ÷„xŽŻP^żéXTĥ ŝn‘ö}tA³Ĝ6,@Ĥ7B  ôԚğHk„ V˜ĵFİŝAx2,· J  x‹cŜjÎ>IJÑ7›ĝH>ˆ&½ž_%^Üo˘ÖÔŞÛÖ˘£Yßc!ŭsf% ĥ›éyÊjó;Ŭóżġ$^C1À¨ˆEħİò˘|Ŝ}Iù§ĵâ–îÙhQÙ[ŒÎííçìö4ĊĜİİ82$ÂŞ· ŠîŞĊŞw‰S×|aM>ƒ>MHgò›uùŭĴ;aÉäx$B;‹! Ŝ Àybƒ@§“i8żğĝ~ˆ•4 ŠA‘oÊà͝3֘ŭĝÌf'C9A\*l¸duÀîco?}›ö=ïwyU]CcSScC]UùïĵïioŸŜxlwÀê%† ‘ ’ œĦÓĉ3³Ç80U({é Ġ ’´UÈŭbœMžv:i4£è'²ĵ1çë):nBZ™ġÜ_Np{3É)ŽÓ³I֗5Tñ]ĠXí.QÁíĠ #ĞÄMMĊ*Òv´³ŸkSĥw2nɊFÏvo!ÚžÊKŜçċ/šO %ŠUdµ€×zżçw#;ĉ ԔóàuK Ĵ›ù)mÑSŻİÓÒĤĞĠÚ¤fÒ_,jŝ5é0è">N˜é¤‘NâÔïêÄ<Ŝ0Ê?›ä†Ÿ~TSM*#ŞÑċ‹9Äà Ü# ĊĦ·ÈiCëù;H┙ñD ÜLĥÂ{ Á@ˆ'İïêq÷ŸÏ\ĥ”yGô”Àżc>_Máç$Ñs g5ñ(£|1ġû÷bS€=E{Ĥ#¨şİpÌ!ĝ7³Á–ֈWÇ™ËӗŒ‘Ë*¤U™œŝKĞ'“ÌŒµü‚Ŝé€×#YĊµÊMâ–E NÍUÈßĞn|ÑÁeÖŜ_˜6ġ7‹Ĝúá„+A„t<Ž'–⃛ĦŞO×÷yš+áÀ?AY][­‡/%ĵŭZXĠÒ=4‚0\°ˆÒgĈ‡ş[Ş żM¸txЇ­,\4$!%sÏ}×?Uv5XšxÜCG!'>´'°ĝĉz ÀÔ­ż_ËävĵĜœ@‡Ğ5FDW…Ŭà–ĠgEóÈIş· ˙Ú ĉä+‚Ĉ~Nĉ*’ÊċĈKSdy&>Pgô„ÑT²›ŭ pDNĠFL÷"€×=Ċ·ĝ7ûuq9(quπ'ñ$盂Z>|͎pĦ×Ġa.•=ç÷Œğ‹x ĜmwċŝŠá)R0``o[áÁ2al6•ˆÜ™·6œ~ˆĵR‚ê—dO2<Äp/G7Ş"ÈğÁ3ózĞħMòϑOŬR›ħ@İÎï¤sœ•Ê}Y”£èfĦâN•³oĦp|ŒxÔé3.˘9SÎ"™sëÀ[ıúiİ#uFRé@ˆÙ8U³HĝöÊlœó|֕@9²(ĉÜUy‹e™PöŽ†Sµ’x•ˆ-Ä`V%sÛv Jžèe§,E îAoj AŭÜ_ôÏNIòr€,„ÊŒ `?šäµ#xŜĉÊ2A˘ĉĥw½f‰q•%˜…&61ñßw·˜‰ĝ”÷=,ì=Żeï/mwґÀ‘ïò†~û.G%>‚¨Ù–ğżÇQFSb¨™AÂÉshïğmšDÁeWÌÒZz%BÁˆ ZëċdɝżçÔ K ŭvÏYšÂî=!)¸ğ›ĵˆ°ÂQĥˆG%/I‹>:şŸb’…½U½Á)ĥĈV5ßyT-Ÿc„ĠLmFLş”QŬˆ+ġ1ô2÷ƒğuŬ\ĤˆóTލËL:°ÜĠ)œôvvİJgħfô(%ĞOy%çé{§T ĥYŞûò|X~ĥZoŝŒ`Ç;füïbÒŭq%ÌGyô’< àdqp lg#KàŽŝ1lŞÙY%·düüG}N\Ü}ġ”Ч–RÙkĵÖDì-Cqêö!ì›Ğĥ†óKÉb3Œş„­c{ÉfßħO Èž…DôÔĜiDѵHk ħê-ü[Ñ ü˘ÁehçYMÄ8ĥĞ;‰¨ƒ~qê)¨\ŝ˘sjÏ_{çÇAlêW¤½X£ŽgôÂ$(ŭ´Ğ Œè̃â²:èÀò({{~ÓR5!|¨kïûê{EÇÈ=À<3>·µêOqÁݟıı?˙İŞoŭ;0>ƒ{…QöÜHGĊ÷Wħáŝöş@‹' İ-ŬtŝmpGèYqAĉ0pTq=s ŜŒàŒ³ü5… ~›\5ú/¨ħJmĝ8ÊKuDóxÖkŒhžíD˂Eù7˘­%ĝ·Tc…kHZñ€K;=6 ]™YĞ|›‘÷Ž l%Ĵ¨cœá[\'iëêı}ŸáN'J úV̤vÜ <½Ç›*V<€…tPâÁ×òÇĝıŞNMÓá`„À‚TgĜ•W9µ½Sĝ 9=ÔY_š›ŝîYü+çϜˆ8rĝ‘ˆgÎ_ı˙ì]zni}ç4`MġÖĉĵş™R!| d³1:ıjĵ?éĤNEÄ,BŸĠÒħñüËĞċ‚–G³Y’cÁeW븳ÙğU…àïóÌÜ`¢¸n_+ĴO2?­$)€%ǝĉXúC=˘;ü\ ih™;²ô'–aJ i)M/û .rSĵ˜/Šġǜv\2)è5êòÎW ?ĉwìo1•ÊšßHşÀc° Ĝ°Cż‚ßKħRb-@‚žÍYéŽ%â˜Kġ§3RĦA²Żpei†'ĉ†Ü– XÚñvKôH_6 ’„'@4ÛI}0 œ>â5ġÒĞY÷Aı t4ËËzFÓĦŻ^ßm&W<á"žĵ·Â6½NÔ;ĜĦĊx‰më$(ÏO(rr~aûÈ.’UúFƒ˙èĝĜQ%¨Â@!ĉ}èâÛıéË ú÷§çž.F nްżyф°ĦVË'ì:)kœüV °7]w$êîIpú~ˆİĝŜ­œĈ â|´À°xûŬ‚ y6ĤĊlY˘ˆQMûM§fV-$ÂÀZ–óáɍsGvùz8ÛÛZš›™›˜™[ÚÚ;{ĝí:rîĈ“9e­ ȄUe><µÉ^ôsüJKĥĤ5…t àîv Qµ|â ĈħıÊğ*ڟ€Œ=şDA‡ëMÀµß*Ŝ¸bÎLŞ·0”œÙ?…i^ßĜ#7 ĊOçĤïë–§sہjˆ~*RP”R8:6~”_ šgEÚ52´°˘~ŝ$0'È-“[ñÊċbĦ'ĜŞSo‡[è…ìçp™(–3ëW×ïtSo™ôr;m ë}ÖjéúF’ÒĠĜÑ  ÂdûA Ñ+ħ›ggÉ “‡@AôKí]g5ĥEĤ˙LUÜуüjEĥ1¨¸MΕ}&0 T7ŭġÓ$ÊVÎcÒvÎK>_úW1ÛF|tÚ$‘ĊcŜÄ0 yĝ}aƒ|Îe|xSTíöY‡˙ÌìßMĝ$0ŬŒĝÏ$°‘—j‰ĜgòZv/Ló}İ’P ŞŬ/Cry7Ša|›€÷c0 tŝÏ$!dí §M,^ߗS+IàŞ“˙Nïì‚I B×uŜqé > ôÄ_|xñvŸ•ĥĤ:* û?' SÑ1µ]é³ŭE|ĝwŒÓĝ$ÍÎşâ$9ìşóï$ä*|¸âԗ>Ğéi6!˙™:$pì=lIàcŠˆ˙7P;dö×B1H’ê›ż0 ÜŬÂË´G,Sy˙L‰féĝ$pÓßÙ3ü:Ÿħj˘7ñIàeN}‡$Ñ{ĴqħH›ƒI`cŸ­ĜWş/ßKÎvÒcÎVJ”·ŒšĞ$&7ÙrmĊ` Ú&Rû‡˙ ê“Àwŭ2[°Ġ:ô ´†/P G¸;eÇîK‹H·'ĠÖb[¤şÓ÷f­7 NĴkŒ¨[,W™ĈİU@ˆ8²Î&÷ Z2$Bžòö[è]²ÏtħŞZñì&ÂqN†œĈ/ÖÂځ^O5,'šmóÁàè.’u!úREèÌôÀ>íDn½Ñ0‰Óĥƒ& 4Úş„_¸Ŭ§äÉn™œá† ‘ ÊŠ@XjÇşnĜP„V?€ĴğôoÁÏ:˜şS~ë;ŜtrÙío÷Z‰"†7òıĵħŠÄ7HŠéşìŽK-ï{ôÔßÊo/"ĥ­YfŞ!şˆ…LŽˆĦÁT‰¸6abrĤËÖl‹ˆ{ù­ò︣çzËSvğèBÛLXÇ-"ħbŒÇÌğh(€ˆZí}ÛÎĉvÙעŜÎÔAÜjĞIs‡ôêÀ³ĠB›]X;pT.‚ o s2ŬÈò§şı_VtAš~Ižĥ£“dHôĞç&jóí˜>#¤ò-´&í´ÚŞĉÙC¤ċ ßòìXK8Âú!—Á9NĜ4[ĴĤZ<íC “˜ŭĵ§IĤù0Ŝómâ$Ċï!VIĠ4ʸ²ôJ·Ñ…x˙ †µñì=BZ·Ôl­ÚämRQ)ùΘìN.ÂñÂCÁQüġ9„ĝIu`ˆ7àš9ċŝ/@–¨Ğ‡É ä×O0wĵ„ŞĤ}ċò•ÊFímF Eà°AôÍôUċŸÌ½È֙êEBñ@Š„Ró]aïh‚„ò[,Cn]„hŠ>%ä/ĥ"˙ĉ#m’ÏĴp-Iġêòò·ÇXŸWQ¤v—$ÈQ/çtà³ğĠ ½żêĥ#¸7N‡D5Kíċ1êŸl5äG$l÷%Ö*´#3&Çú iĜuísĠ 1u9‰WÍ5d„ñÈ‘Â/,!ШĤg`lbjjbl §Ĥ(+!̏‹Ċ‰|Â2ĉ΁áWsê€Ĵú|m× M!\D“ c ™ÚÄ}ĥżáÖ'ġ ^oj˜1•¤ƒ[F>nƒúšÏë!Ĵ–Ÿ€ëËQ^y¸2b s òŬR”UŸYc·  ˙ÀğŞJZ[ˆŭñ!i?böìWĵÍĝBÑOA –şà ›½U–H@żë߅˘=p"…UÏlEö2*ĞÀàP˘<&‹ŒÚڍ”+,äËûLUÁì9œ+èDž Žn5uàŒ9CúÒŻŒ‹ Aċ>OÈİ‰Ă½tH9)aXzH¤À$ԔMĵ=£oÏ9CÉkDÈ~Z~‡ü*`ŬgıJ×4+.™J£n Ñ!,9†Ċöñž€0à‡´ikŻ8l"!Ż\i(ŝÎ*@5܊lŭëIĜ1ñ× ‡]ì%Yĉ7€³`E!¤ÀĦ‡`•VútΟ•nŞÀÊ6ñ‹íİŞwKı}bÌĵqĉ…L==‚5&çêx“Ÿ‚…ïħáO;áó7;C¸Š;2NT\™ÑĊB'*žt‚D(D·Ç|Ĵb·Ğ$ġvÄFgS|¸ƒëċ5 -í]Öúm Û0<üàŝ°íA~k]ì- 5ääá04R1uŜq;µ¤ ĤÉÌĦޏ1ÛñP MĊéàóŠ ”Ġ•ı LëïTÀ82í€\;? cƒï6Â_2èÓ$Ż*풁™mÚ Êçüf†ñɍObw5V·GŒSVħ‰*{vxŝĦɧͰ"ÀĦ8ĊŞó³gñ[@%i/ŽUñú;ħƒàùwt#œİš–Âéú­¸a%¤ï#ÁÑd×Ûj*ŭtOxûħĜ1$Nç÷ı›¨iSK›k¤]áÜvƒ½J#Ÿĥò -y”3{ŭ™ÛÄì&ƒ&n—ôpİıŠCR˙5†DÔâĈ%˙CÍĵĝÇR#é zÜ’ĦàŻ` ÉcH:C.CŜ7î_cˆsOŻ+C~üc ÈCž˘Ù¸1¤{#ai!úN‡oO˙Ü˙a ħ[0†@7lÁb°` )Ĝ*NvxĈƒ` 9Ĉ¸1ÄûĊ‚1D>˙cHIĴ§n ‰Ë{|ovl€é‚1Ä˙ìÛcÈ$C.†zX¨‰ösÁbċĵnKĜħó` I|—ú97†d|N}—ĈóÇÂĥĴİ?Ĉ޸š…GèE0†L.CŜžġ_0†˜Äf÷‚?>?7†(yĈBCŒ!pè-C^@;T}Ï0†cÈA0† ]z´ÍF™ĉ oÔ#gzƒÉĈŸâ•È2°Ĥğ½ç€>°T×?BÓ×Ú}ë‘+R²K:Çgtñ˙˘‚`Á “Ÿìl(ÍNytċÈVwë…F1ELciÀÉÇım8%Ş%3nĞN#ĥÚñàÏ6ßĝj·ı"ċt.o¸Œ)Ó£żĉ¸ qËh|6W›ÑáĞhÎ/Ĉ¸E;IË^@"̕"ŜŒ•lä—8ÔÁûâ@0Idġ„ IŸ˜8+.qqf ŒO7ͳ&nßOĥ*ĉŜÑü‚>Óú!{ëzxıŜ9÷GOêt°·)è֌Ĵi_(Ü˙ñ#„n'‘›ĵ­|Ÿ!ÁßZ%áïĈ2ġŝLR$×UT`wg ì9ġıÄûŠQgȅ#HZ#44ĵ1°Š²ZECÑu }Ÿ ħÌeZŸ@è+VÙ$ç ï§°}$5 çL,“*µ“ĝ9ċ* í‹ĜmòşÉ2m…l8†Ì•ê‹żĈî ™”Âk9ı²Ğd]ÔŝŒ}Ò›%~D‹W ŽyXb Ħ¸-TùóŒÄĊÈ˘gsӏÍŬĜ^~(Ġġŭ +;H’¸ĝjÖ˙x•yQd›YiI#„§oòW”ƒ"bžÒÁl[ĴŸ>ô65WGĵµF˙[üt€­TƒÉ˘Şĉ[]OüZÒÔ;>ǂĦ/ŝ3è³Àˆš˜JÔ,XĝÇĴıñŜĤ’Ż‰×mu5WH”°šmÀéçéˆH^EĴքŞ_,`éĜ)áÖ"ˆ„CÔŻI†i–‘%LvYä"²ŞÇŭXÛĠĊDÉ lÖÌ{WŞhP>o V1<=÷l²8‘1|^žş"‘ĥˆC.–çˆĴ(F?’CĤŽÑt?aŸµ/²*ípÌÚrĜĞ–šŬÇ^‹ë—Î…À10[Ağlrù6dÂw°“\§~JĜJ-›È†e i‚ž2œċš*Ċ|AĝiXk3–ıOa*ښEˆŸPġÁĵ5 Ԙ†‚a(xw\ÉóU}ĵT\‹à˙S'şĝ2?ğU–Ï?F.ò6 ä ›itTËێċŠœÎY1‘/³¸§I_ħ•w°käµcuĤ@‰ğÊçĜ1ìK›ĞħMe)Œ³çë!á5Ċ4 ûlH 훽$!~~r쌸Ĝ‰Áùm˘g>VĦà-hQżÈö?h×9-Ä(ĤĞ?İ…¨…—rĤ?J´ö˙¤sjcìaĵ#u›Ê‹ZsXğcé0 îÎfŒ‚mŠŭ†‚‡ÊßEÙİA;€ßËü^}™üWĵó‡r™³ƒŬm Ġeż ó~ŭüù+ŻwYuC[÷àĝ,ÇE³g*›ġòêAże8jœ(ĴfŭcôŽoħ›á£àĠ‘PIîM?fÓèċQySĜ­‰‚ö1µúÏŭZéÀOӜÒp5Dëd=Öc„hëB˙lá(B[@,¸Ë÷$j'Ìž?36y^\âÒl_(Ċ3–eJÛ%ähëÁeQ 2W›šı0‚ïp‡#dnĊLëĈ֒Ż˘ßkŞHQżİgħLŝÄ ‘Ĵ镢ıcĥòĠf9›x‘ÇóËUşçi³hŬŝèZĊÁ÷ȋQ%_ÌOiü.,ˆ`ĵ PıÂWŭ“˜0²37éĉ‹ĉ°Èhâ=1 *ìÑ4ûìğ€"B.B* n:á<M*êTyPnb ´eĞIç¸_Uä> ÷…ġ:Ĝ } %˙Í]D;H…ƒNİ â‡ċĜ#˲J?²êġİ™{Îú Z]pĞš›{H– -á '@·Ò?}–]t$z™œ‘[R Ê <8̏´•|N8½ĠDŽ‘DµW„ŜÈlüĜHéÓ}ËċIDûÉm,ŜÀ×Sö—p8Œ¨Ŝ7A*1Wh.L| ĥġ"ölş?´[†y%ĦRdÇ7sxéEɟ‰vofêş*ÙŻÍZ†Ĝç`yˆK)İÇFÂȋrÑòR˙ҏ~SùÊ=GZ=ıŒ–€Ŭ¤xŒUé(NC$p½ĝGqğπàä˙6fÙS(ˆFßO-@@ÂÜ"súMBĈ_ĴSO ŝĴĉğ2£€.G"ç4Öcúógiµ?á%Pgï0y…À^Ô_²ù7(ĥ‡ú£OÓ¸ƒûáOÄöĝ:ċ Q0şŞΟ›H7ħK„-ó?äuʧü §ıé²?Yljn=Cë‘íî„àQ_y´£´R° â–ù8iѨÉñ3ââ§GĉnБחaż}) Q}œ ’‰vĴġ•Cd×Cv½ß]/܌ $,£'Ž9ó֙"Z޵ŸP ıdpú˘(żħ²ġdµÛs#§ĊĊόOF‰JÇÍwm!Zä`)ZQô­Ü;'ĥ#ë‡z܈ÇY?5dÓı§ ŝSċ:ò?ĉ·.a7I›Ÿ…ĦĤ³t´@ÂaêĞ€ĴcûìiİÙ÷ƒş ċ˙Ŭ,éî¨(ç?Ì[Ħ/‚?kigçġ]ħġs‘HNùŜ¨ÜvÔɘql"”ó,kS°Ĉ Ğ;‚.àͤİÑĜgr8ĥœ:½TĦYĠ|¨PÂ2aNyÒV}uÚjċcĞhÏ`6ıyH]ñ;û0Áĵy)żŞ&­ĵl"öYC8–]éŻ7@‰Ry†§Â—ŝÀ~: Ëcċ>$Í{³%yü@ۏÊP<HÁюîÀêOh Ú'kÑáç‚ë˜F˜ħ‡¨ÄWCò†Q }<1û3Ù#(½öI°)ät 7Äĉô°°Ù–ŒĞ[pu0YBoeÈĊWy͐í8³ƒ-ߒî]8²cƒ‡£™ĤŞ˘nÂQrŠŞšúf6Žvıp/é[iËà,\1ŒÑĉĵWCVêIqy–Ğ-³Ğ''vƒ!dMƒŸÔÒёì3°“”Z ğĤğĞĊ j!°³hŒħĤş>FkOj#'êħŽhmxıĉ|ó ÈmGx :{O“äSŽ}_†8üÄ~,ĊsáÏT€ŻËŽ•ìXaÏX˘Ĵü[Ş}…càÒĉqÂaöwEġ˘9Èà˘ÏhĞĈÊĠ´ëúĴ¤ó&œ ĉ Q8dÚ\ݰt:•ĵ '32|ÂGġáĈ™Z³ÎSÀ tŽaì„n—½GÈAFî+^ HíDġçâH?›DvaÀ"Gr=äÚ 4w˜üibħf÷o1ÀPîEcàÈ·›‹8¨šâ;›+kÔ8àÄì2²Çp‹  Œ^ˆéÁ‹ SËônÂŞ–™CdëßÜğ’²ı‘uìxEÁc£Sç%EOÏ%)  Ëó$İ]eĵħ%hÇq{ӄ7ŝäŒ=ĥ#‹oü6Ïĝ ˜k‡Š˜Ì˘(ß.ğ\Éb×ßñ”EDìN}Ái›ñÁĴypüïo´ìe„‡ž(Ln5–o=Ÿ˜×ŠŻcíe__^?êçbkĴİ )"@ŭwŝû@ĤÂTIMc[ż“×_~-kƒË†5Ŝš—x~ër ˜-‹êyDĵ,ċñF~Ç›çÍ8£räû);DÖóN=›Uyyԗ#żé! ÛmûΘ˙ĥQœl÷xŒós£0Íû;g(V›`û†1zUä™‡U#knĝ¤¨äùİÑc‚ŠñìĈuˆîCYÉğÜßÖäC3-Ğğ§[ hÍ*Ò{ĝ-›–a2ùĝdsg})8²ŭâÂr‰A÷ÌĠCìw·ĉâ‰Oä@y,l—óàF’úċ‹íiúIŠ›ÓwFwJ ĵ ü¨$ßAûÀ9ŝĤLĜ’b!Qsáž!ïV\Ċ‰!ĵœµQï)ÚÀ½~ċŭĵğ0ÊD³§]S™A°ŽŒ†–ÚW)úQòUô•¨iĠL0>¸²§D²~/˘]`•ِ§z³Ş–}KJê³5€hžCQl3Ñ _|'>vI‘âS„5ì,B;ϨĴÀïl3üî ˘u‘ĝ™j0Ki³àfj²^‰ d}ìë0J݊ߍG#Hچ½¨š‚ïgîíàŽĤÊ[ú|šÛ2ŽÏóê½u2Äkı݆Ĵ(˙¤aT~A!aa!A~ê?Ú0"™_TVtıWÈÉ[oÖà{„ñ–ܧ'ŭ,ċáT\r;î1SU/Âl% 4½ñUttĝë1k!‚Òú'à´Î?nFĦ-Pà$~j5ˆĴCÀUĵĉVĠ3hQ €èîĴȇ˘xi ÏBĝV ™ĥ80'œÊĵ!%u‹]ğ JSád›2ÖÚ˘ßĴHŠ=>Z žİ2}…^%{ŒÖK}…Ff4Ĵƒ˜İ‚ÓÙ˘àòvŸO oè5îĦ‚u›Ù—„Î*Ċîïä3\5 Àż€â,ŭ`œÙÄú#t ?ÌĊˆyR°ù+áÉxCÄZsÉç8NêC ›âzJ7Uˆúrc¤1dŒµ:j-†~ËĜ—Ê›ŭ…fâxĵà"3r•Ô ´sÌHä öRBó;ïş ÁOÀ„Á20USü|›/âP‚&ÁÖ£'+tx`ŝž:yCöˍ¨ys=2I˜d$Oí@5ÖyQQ¨F‡@½CsŒïĈúŸˆ[#4cë{ğUƒžÀĞÜ.mè-Lg譟£×Ô[TÏuߝÌzÜù5Û[ġġé…0ßċĈ ƒÂċ “VPĠÒ32]´ĜÂbñ"S#=-Uiħ‘L‹%U—û†]xúµŞw÷ŽĠgŜÙç 7˘¸ñúèÏ­Óĝö½4@êĞaœî˘ĈÖ·}ĜxF¨ ͟÷cŬñŽ4 ĦĠêˆŝĊNĴÊNɌÉüuwòĤ&ÑíVµĴ~o~ఐl½vdIh‰âÛ6^P3_Ŝĉŭ4ĵÎû)ñ{"bT3–ùBu\F0áï<ŝ@?÷Ż™|é°­Ìï! ĊÚ-cHîáŝŻhE“´'äoCêNœsäÜVħ0ƒŽ=!|m<É4rÁü4èH ċöL™§ùkóHqt[ÖRN§Œz„ïG7ĊïÁZÑSĵĵ]ĠrüHƒ¨ü!BĦÜDêšé|y†GŝçèÙĠ™‰ĵÄî[ĠÍìDÖöy#[FúüïiàŸç o5À1Ï$^ġ:ê€q–bŠżH… ‡Tb dÈÎÀ‰Lq£e•ÎÌŜ*C08UÉÉ VFdÖ'öb_öR(†ğS`Tû`#|ŝ²ŽÇRÛĜtMR„+^▷Ŝŭ °ÍĞHO8½mµ…†~!,"­˘ ûĠ^ŝ›ĥíĜµ',lÏÛ6ù{­†ƒ ˙/) ‹ĠÛN'¤WÀ6eï˘7YËĊs׈¤šiŒÑ–zÌQ€ja”²˙ûìŭ2Žġ&—A”ƒ3g¸•§ 2[³™tàŬÒÜR€éL–9€U† A&´ÄżÌŬBDä9ëŞyI0#c„Ĉ[4èúÓ]Ŝˆ_ßÈÄğop=²sĤΊ˙öRÄĴüŒçüŽ †òùÓk¨‰ÜPˆ†CB'Œĝ>ßĈ-UÛËĦrıħŝî|GP™ÎÒ–­ =Ž”WËšiꇀħ۔ä1ħ´€ĝdHzşÂ€~…\X+ÎsSú˘;­á4Ò)4P`Kf’Ħq‰Üâmƒ­³/-…á[è]`Żż òĜ,£ú7Êuìݐeĝd/½ÜLà&šİ.ŝŭ˘%Çù³”¸shlĊ8ûnA ha?R£†vħžêĵ˰|˘ĉ­İÙG&ˆĊ³ÙÙ6µÈvĴĉ"aѵnĴéĵAfó—YNùih2Cb°!VœÊèfóFŠîm·”"%L×G½-‡ /{Ĵáû£SAŽÀcá*FKŬ÷œ¸|÷Ùûôìü’ŠšzP†8ĴĤ˘$?;ŭŭ³ğ—Oì t_j¤"ÇÂàtêÑ÷†16DŒËßF­7• ,·ß+áħğ3NÁ@3 NNE_`WˆĞċ0ŠOš’˙b½ TĠŻAy“àO&X™kİâ@@ŞÛ-*¸­ ñUŽœŽS˘†´qÀı­ƒyGNäÜ,hµĴóÑç wı… ^gTÙRΰŠaÁž$İ[Ğz‘şÒ/x <‡>àóšù%cŜŬ ŬܧÙÒĴnÜ_"áBO‚o89u„oäèzÉĉ"êaÀ~Àèé˜êZl³ì3BN _ì¤âvÄdĉ£9w‘Z›KŒŸPóDwˆµċÀ2¨˘€LÂ(‰ĥ;Jz?³Tħİ^aÉÄ7ÁuŒ·|Ĵ$Şï|†ˆH…şFĊĜjêà{×™‰ÂnҖ4O![&Ú] Úĵ’°sĴ#âRNJ“È­ZĴ‚ ÑŻŽ—lFpÍÇJ½Éj×&ĉ› –/éӏ̝+½ĵß! †eàï½Jˆ6m§Ó6Ë!ʸ—÷ï39´ÄUÌóş“÷˜ ¤OÂXŽ7ôëĈFSQYÎvûġŻ-ġŸí,H<żÍÙ@†úday=[ÍáÑw?ç–ÖĥvŒNNρ1~ĉésӓ£Ŭ­µıŸïF‡oö°Ġ“& AÖÀyÛùÄ\8Ŭòġúv[92AÔt_C<>žt”&˜îIîĉÍWāÄFÎçÙ_üĈʈÜf@š”ÓF”öŝ†u „‚C~ózŻè ĉĤé/-“Çs×ÔÈŜX+Á,™WçG4jĈ du÷ĦĵXĞÎÙĜ?ĥ“°²ŞĦ.í[ ‰ĉ%´›Ĝ+Q³şq/ s? ĞPŻqɘ÷&ħùŜ2Ö ~›X˘Pߤ¸tĉ=é(ĥ“ƒ\À½‚ŝ làI9mb;POµ‰xbn-ġâœĤĥÊФkycžÚsW(J(ĉVÌK¤œ)?ô­ ]֍sy:ż\ħµJr#\ç!vŞz`ÓĈa§‘XG›.RÖ*Ÿ\Gş‚^Ü/Ğ%x“áƒcŜ*€0 ˀvzŬq!†²àÉ ĊÊ_§–Šƒ·a`§×  +á÷Ëċß^DP9RƒöŬħ!‰ €[qʈ(‚¸ĈÓCµˆĥ‘ù`kÌ8l%„HÚR$o 77FPUWìTËPhġçëaîĤò€ĦˆŞ˜9쏎—URß54‰›â˙#üW‰;ċ'‡şêK²ŜĊGïp6S…-’€ĵİ{ĜġÏĠ€ eô<ÚżoĝÇĉÀvÌ^²:œËüH[˘Vhú8Ž2!Şà@Œds§­9˘BXt{˜›ëÇ/\ŽĠ- ¸ù0™’>2@ż.ĴĜ‰“‚Ê TqF|ÛçNÓ ²@ œ1 žM ŒX żDݐÖM–k)M݃u|rŠúûÑۄƒÙ=VDĈ*ÉŞVĊċóO‘Ğ7ÙöÚÔOŞ#‡t‰ieθH)ùCı2§í‰ykÑ#…$£zeFÍ[§6yĝ½Ed7̌>1—âŜñ³ ­j”] ñ3Hb¸‹ü·“*Y"ùkr•` +Ìħ·ÈŜS40ô=‰$잠Y#‚äĜ4{´ĴšyA@'ž˜îßÛ`Ĵq*À~,G–|A남  '’çsÓ!úqƒÜ_Epc3k#@6 Ò÷ÈEl~ĥ‚Íĝ}ڂJ6ܗ1‚MäFÂVÔöPj›7#ĈWWá×r?ŝşb”ƒÎuċ=;éo£ Ë Š˜š…Ûĥ7_ûÓÜ7A˙§ÛżıĜ6”! Ë ('úš˙|{}óÄ67 5X$’„UmüO>ËëšC9£Żğkñ#ş1?xìÔC ·ÌÀF2ö’ݧ3ĜgÍɂ.ú x“YLÜ!²ñw0NYôpzî9ì½  sAT‡~Y‚,˙8!Ž…ìƒwôOŸÀµé:˜ĠËHg›IŒ23Á{X’„^ }'ĈüĦ¨ùgʛ| ÜħÁĴÁU“ż$—ŒKٍ˙qg$A à ŝWÊ6VA&ô,1uD{)óáìxv‹´|'Ŝ›T[‡ù¨Í\Ħ6d ˆß$h ñĊÌjyàYŽkĴ䞤äµIûÀ sHÛräĜN´I[Ċ†5³écRPJW2lí2—΁šê,êA٘™‘cBò·›ˆÙĜG]€wù"kÚgÂIËjéGIN­0Ĝ< î{ĜcA›ĥgjù6Òs¤ÍğZ •Ġ U0ĥTşdÀL­AÑv2o'š—KíĦL8ĝH·ċQNrWjŒCíB{hÍĈA0éŻbÄ ï6qÚY†İ'‚żŞ”zœë˘:şIöJà ÒBĦúqÒR­³\Êy.…o+7žˆ‡ÀWÀÈ]Ò×d$›ÍĜŽç~”4¨€ĜÚ.zıİì/Àw•(#ŸĜkáVÍJü ÷Š˜ìmfÛ&’Q2ŻÚ—hÈŝğ_X<˘‡Œ‡3³ ׃@żżWúüÜG/!>Çǃhóe ²ËƒżĜè‡-*ˆ8ÚXœĈ~*DŞa ɏüŠġ…Ï_ĜïR8ĜCeIQÖP (˜­9û0½´mx·Á0gF{ÛjKó²Ò’“^Ŭ‹_È?nz Ÿy8”vO„¸ŝżìD˘o5/و´İy[Vì >-YYŠÈz’ĵL"¸×`³&dZ6ž­3’ıǑíŒlY£Ĥ%˘Œ7†áU bšwPl­pg>X0nÚN~ŽŞgú‰{K8ġµ.Kq˙dí"á'ĜcbÀ:4ÍìVôüŞqš¨,r…ÑêKXV€}2!A.ü.ÉżïÇZg÷—ĤŝÀœ8m}.w Îсe@im(•q'“}Ä!eSÊbŸ0£€£9–oïwèŸg °ñ˘ŝşù\Íi£'ïÑ!CĜQ$ġœ6Ÿ~ŝ§s!<ÖÌPgŬïìŻܸy,|ïî!ÁÁ!;wï ?yéĈƒW³×uÍ$†Çïü“ŝàôf'=͐% ==Êë?ş3ëj€1\şŝ7Š ‘Rż8Eú;Ŝz2ĴÖ³Ċ V)¤—Ä}’'ıeP Ò>ŬĞÄ n€›ğž&ħÇ?4é=ì÷ÇÈDküIş/!N2ù„,#ĝĥ2ˆ('˘9ĈÔ3ó@‹Ŝ?ÓäÈÍbC‘ÇĜáEµ“ŝ0J[ÜÚ·Lè='lÁŜ  ¸ ö òö…eí1ÂĤğDEĞ˘‘ötEHìċXÀ*ı6€( „‹t¤AsXiĥC´9bÁĞ{S"ÂZœĉŸÁ`ÀÂ]Ä aàTĠcŞT1`-•;³†ú 4Ĉû8ŸÄ5 ݆Yk1£r@Úmí\E ŸĞ³‡\8àÎ{'RLżcġĝ@yFDö½˙€€üĠéñ ²xM:ۅ¨y}œñf AëJ?÷ç!ï/̙÷nüünàŒİù‚Ô†”1´Ğ(A}{Ê 6‘}ÌJ ïû{ ›ŝso£|ŝqـ ·żê¨!D ŞXû†dgÇ8„üıôÑÚÂĊœ>¸s“‡‹ƒ­•……•­ƒ‹‡ÏĤOǀU ĥk_3Ç; OzĜ×ZE@Òp ½ġ½„DŬÙqplĵ÷g›úë!O´:–= ĤlW'ˆşŜï@ÇR ÉĤwĵŒ1ĝƒ÷3Ì/ŜB~rûŻh–ĵaŒ_×$şdµxÙ ÓWċôӯɊœİ*ŸŜGìğ)ċàd/–[!n_7N\Ġ |ˆŬsċFbŻaĵz¸i‘ĝ'Î>Pv?£™É•²¨P2íİ‚mĦ¸ àÂ÷À!àÙĵ“Bëáhċ{"|Ÿ)y͢;°uJÀHë ÇË@Áòאċ¤{£²Á¨£1ìĥ·anm’Â[|ĝ n&{)° TlhVÁ·ŽS0zžÌ“²îĞӂm  í9 ÛÀBuĊl@loZB½ÊËT•J„m ä?iˆÄħ+›ş‚‡o=ğ\,w9bŸOB´îύCJÚ#m?"C ŭ #¸t^ ?§m xXm6ñ&3BT ân˙l]$ñmplƒjo_ĜÄ-l›ÓcŝŬêŻÜqéu^ËÂ6pf°ıäkÒŬè;üŬĴMġ4UdÁƒoeT4ġL­ÜŭwŽ›ôµ¤yß3F[ò^_ÚħR˙ßm`LzóÂ60.`a¸ŭ1X£Fr}°¤Ë?Û@7q‚jHĈ$ŻéŞ Ĝnñmà)¸‡înFP ĝĉI‘9ҎĉÂ6êĝÜ}-|–m,‡_‡U‡mà™™îM‡Jvœ<^ÉÁ60QJ5“w•ş¤i|à7²Ġ a‰>§ıÀ6PĞÏZ*o–3SŽĝ6PıAĥ”½àŝ„oßb ċ“=0˙ğ <Ë0vDƒeGï‘Ê ½Ïa  ĊÌKmâ(%¤6b[Ċ.ŽnŻĞġç%$Îك5NÖa6•×ÓD‡ÛàŒyËżjüŞfùÄȃܧ:v÷Ĵ€XhŞĴÚw‡[WÍGíŞ™giF9è Eħk8 }e9ï•uO/ŭ†‚ÀŜnÖS=˘g!V€ÂA@RŠìB‹6‹Öf0fŜıÒ×ĤLsŠ÷Ğ!J;²ĉ8ĠÑ f5?Y0Çëx Jä_´/µ‡ÇjM ³Eĝġüf÷ËıácôçAS°9Ÿ ´WÜoÈ˙ĝèò‘`ogk#MEi1!΅!B#ŝ‰ÌG“VÔ4²vö>rùÑG Ë%‚šÌĝ£> "èȆèà¤c÷d_ġÓGD-’ZYĵžÔ}‹ĝ‰ŞÏ;xs'ÍAn]͙ËÚĦ„¨í/ĉL§Ĵ¤ı›adĴ‰l.Bğ"•H ĊÀqáċXĦ'Qï)Ğ{Ż€Â zïŞÖ+^ùJœİMLñšcD;ËĴĥ#GÌWYƒ>üğšl*DBWôt;RïC"dÍDıĤêŸñUüoÁs=Dˆƒ·s;óÎ:Ȃ7Î~.‘p–ç/ZU' ĝ·ĜVl£aL¨yaŠ ‚ó^„uPb11ż‚3§ëfÙĥï`š·0M@^L™˜Mżġ”—xe³Âòı·¤ŭ°uĵ%ôçìMĵt=“€'T·ñJm…\ĈNä뇊Ò7N$ÉÇZOĥğ‚5Ë ñëž §˜ecÉÚ´#£‡ùU²ÛĥP´ž°ş÷ Iždĵ0A–~`^P%:}`N$,FÔ#Û°†êˆndÚ˙ÀJsĵ˙|í#(#l `áT’ĉ&ĵÖó9b™tġÖ]ÊêfĦÓġ)Q>Ĉ$‚ Ú²-ÑŻ ;ĤÀ=7—|/jż‹•>ŒûĦò=šĈ00‡Áe°RĝaQ oċâż'ê^r^Ä9S…Ż£·,S$$Œ}˘Rê§QVwÖuE”ZñżĤ§›4ITÀ…',H ‘öy=ˆŭ½ïH£:<èG"uġ X[¤:²8a‚ùÁ‰¨zaŭa)bò‚1xZRh7ë‰eKûĦ ˙áÑñ#4íd,ی>Ġí‡xu7εĵĦ]s!Er)İü`@v2r´+ÇŬ¨O` p½‹÷ô(›ĜÏŬpÖÂûIoç–+4WŠ{íĠ´™ÉÔ $aÖԂKüŜvhŬıJE>ññ ÄA‚9™C.ĵ˙%!:Âħ3–`V§TĥK°ĵN,=Jŝ2Ĵ·hâ3ÜRŽž°ĵ„rĈmeËú)V @aʍ? şĦÇy‰‚Ëz:—Äò&ĠĦsZLàşşƒ~QO@³ô.2kœ gN ¨%òÀşĥŸ÷,ÇħHĉt…‹ …6`… ;ˆU햤Ĵú@Ÿûà7€3ìüŬJˆúŝ|X²5$ 9‚–…Y$†ż~-z47ÚUHVu?›ÑÉÄĤë“ÏxÀâNT×eύÏĝ  ïÍ)wOï\go˘&-ĵ°ñ#Âú_HT¤a²²  “€˘ †ĤPXZÍÄ~ŬÎÓwS š4Ż>ßĜ˘‹˙™^g’ë§1fgĈYwU2QÁ5:wXÑĝ먘SLPĴ…H†G!ż_QڝϞù·÷sôĞ(’ğаĈ€ +ÄB…ÄÂğ8ɈcVâN°€ŭĝ pêòĠNÏü $8×0/ èeĦ âê_;Bàa…ŬP§ġ&àX—tö,Lä‡vhżÛ$s*ġ—ÉڎçyħžÀIp/ċóÄ"½á/ä£h X]ıà.,@Ş3 ( Ĉvœ#ĝĉ÷e?¤Â͝0dFe#. :"ܞ†ĵVXm–ìÁk™öè{0GğÊ·•ò˘ ™îâ%½úzŬ•rĥ ^6Ĥ"8¨W ; &q"ğYYrÚ%³Û`PĴ'ÛKQŬŸÜ+üĤ…ÜkÂZéĜ;U‘+ÌĤµÈÊ Ŝ uÚÁÁ™ËR"LJçn*ómC³WtïOÏ<0F?™Ħ'-#*ŞR”6œ áoÏ'à!Û¸×ÙÙûîÄ@8yE|ÁfËnĠ ıÇSÚ1ېrf­|[<?ÈnžàÀw¸ëOZBäεKġ˙чÍ0h‚jê™-H  ›Á‘@NòŸn.×_şvgdBڟ.¸p&š³ö4‚ğŠˆŝÚ3) ³@#I=}T(Ŭ.›ĊFEÀyT%0ħ: ût ½v׏]ĝìáo2ÚQÀĵ:˘@\–DŸy²1~03}_—°2­Û̧|snĝ¸ˆÔċ™Áƒ4ġĵŠ•ÈÚ&ĉĠwXş–5nĦ)˙îO]QĜŻ‹ë*`Ûl‰ĥ\k7r”›$è4\‡wÁ#ÔV²[Oż·DܝùÍ .i“wôûQíe,PÁuJnĈÖ+ żDÒڅàÊ *3H3ġÊ´JĉŞGżÈWV0aËEó׈é}Šî\(• j.™{ O?á˘^ &5ĵm$wñ’6³R\à U9ĥV×Èn-Ö°ÇÂ&eó=#ż~h†ĴnĤŸĤég`Ÿġ€€1v˜_í §)€dÄiŬNHô u’ç/´)LL`o´í„" ƒƒ³ ÍíŬ4ğ`·"˘y¨˜5ŸğW‹ îñ°Î0§Ğց얗!ĈP Ü˙şa£7ŒòÂ?)3 ğv߉1TŸóââïzò" 'PÄċÔôÙ:‚,tĤ-ÛĥoßĥeÓˆ:Ú.ÒW“€­!OD^oİ÷ž‹/rꇠ]8ÛÜY3è‰è{E}lĤcs Ż÷C9PÀ8äe –`ħ¤™GäN£=Ä Z{sçYĊ‡4ĊŬìéwn4AxÁaŠ'ÚßÄš_ž$uàĊ*R··r’ HMœ'jü‡Ç€ ˘÷˧Ĥ7݆^Hż²q¤Çƒp`ÌDĝ1ö@ÀşeĜ 2Áñ|ĞÇ*µàà"ÂÚLzÈÛEL€÷ó× Ĥ·No‘°Ü˙ŸÎ-Ñ„Š'×]ħ/xm~‘%ÀÀKÊĝ.Òġ\ħ•é+ÔfÀD'(akŝ%lĞXsŝż”0à˙RÂÖs…Ċó½˙PÂ^ü—ñS¢˙C [û?”0×)a^˙C kĜŝ%̑¨R­Ż@ žÊ)aË˙K #›/¤sï€Ş(aċ˙‹ö§„µ§Gû,PÂü˙„Íà”°#† ˙JXĜá“g/%§„Ċ\<{òpĜ˙ƒĤ`èp§„ÍüK ó_ „ĝD§·”°G˙‹V@ sğÓÈ7!˙—ĥüSÂv%ì+hÑ4ˆŽ˙PÂĥ7ü%Ìë_J˜ë˙PÂÖŝ‡ŭSÂ"ŝK {ñJĜ½…˜ŝñÜçĴ˙/%,Xà_JX~3œ˙K [³@ ˀ˙ ­ù3òİM8Ë| çˆ@]>ùËĈhĉ!œÌgA;Çv•m‚š +……‘àʚr˝tŝB_Oy œ‚CÜ7Bv=ĥB/XŠKo ù,'[C2{%­ôK”“}ŽfRÁ…HXV„ÑĤ"%zrt&Vĥ£•ûy bŝŠ9rY•èœĈĤ˜Üâĉm£8ç–ìWA4‹ÙôœŬjˆ´ob6šĥKp‡ 6]|ĠC‘ȧµŝz€ğzÇl0%¤œ_û`ÎDËϗÑ;ŬÌUĊp]€€¤ŠġJŸ­ûN\¸v˙éëäé™_ż}ûš™ŝ1ùġÓû×.œĜ·ĠgµŠ$l |bŞĉn;£_ŝlçk¨âŭù€ĊÒ$‚¨ñ†˜ï²1Zp}½QÑjñ44Á-IgWÚ(֗è+¨íÎĦ³‹5•ŭ%ÜñŽħÍyÜĦ[&ˆŝµavš3Qġòó•9²ä3·uM!vfô¤¨Ô•ف½Tí7hÑ2ü°t‚j˜…>—•KÄ>(IżÂ%5²9gÉ.=½.”Xô…mgî! s<ĤĴ§vžÌsšÊ‚4) ‹"$° ÌĠ$ëÊñĴıœÎÙ°.óëŽp–˜³Ž ĥ}B>7Ó ›•áFPŸEx9 ½ uטxˆ¤ö$èI#ĵ?|Ôĥ˜Ĉw‹ûÉï眤Êz ´ÚĠ ğ4tšğLċ Ĥ<á, E5Ĉ­’Ù­È~ftĜîšŭaž ÛĠ΃üî7÷–˜R"úÓ(€WĵÏj "ë½ĉ´lċSı93~Y‘oĴ<ˆçĊ—‹Bz’=ö†(ğvİ/]h4çGŭXçŠyd)“]˜0e˙'-à‹xĥEJ”w‰ú }ÍĦĵ[[Ì%ˆ ˆN݁%ŝH]füħ{]ĵċ)5Sû5›÷Ÿ‰Oü™[TV]×ĜÒÚÖÖÚÒXW]V”›ù!1>öÌŝÍkìMĠôôÉtíŽĊg֍@´`¤*%:ÚĦD ó-·ò`Üû5ÊEžHĠÛò Œ-Oü•SĊf–FšSîtbŭœi4——°ÑŜ-K´y8Ĉ†ĴĞ(,ÏTŽŭÙÀ§xy|ĉĤ ßÖÎk=rPë"˘cñMTğĊŭ ÊĊùZ;ò ĉ3Á;ıÔÊgîGĥΖh)ü€LĜx˙òœ*3íjÖÑhè2TolÓ2è)“rš{~ĜàO[h~Ì×ñĈè…x)ô§"'4ÜÑmÒ/ YġWŜŒĜĜĦ›^RBŽÓwâ… ÖóàRè9‡LËȕœƒŞ‘‹T%”N† ÄVÎĤ ĴĦ§ žÎ•²êi2P(˜ñ‡Ŭ=•íK°ÇB‹*aè;Ĝ·xñì %.-ċ&ÈJ\g6ûlsħ,k‚ûôëÄŝZğ™*wnˆùĈ1İŜ;˘Êİ6Ĵĉ ÁòN?Zs\ ĵùĴù{Ô?Pr &oQaÀقl(3Öq˘Öûßĥ0Àäó(ÔŞ{fŝçSŞG9c âġ}ka£Ğ}š„šİ£ÏŽcWâ“TÓÒĠ74:15=3;;3=51:Ô×ĠR ¤ĝ+Çvĝ8šŞá‘2ì‘Öîğŝħb€qFĞSÎû›Áġ%cúìDŒ–·û­á?Ü6"s›)8 şµ-É×{'ƒ¨íù1ÏÊçħÖñ´˙Ž%Aá` ÖvJ…h÷ԈFˆġĉ99êĉZô›=²ä+úǝ`…ċÚ|›™×%d¸K‰Ħc`[óú ´ĝu}ƒ0Ĵ\$ôKXÒ>°’ï¸}g  šzĴ¤r§W ĤÑפÏ+‚úV” TùÎA–ëO‹Œ÷PUVqO× †óœôçbñOûքâfÔÎQĜγµċí’ìBħŬ¨›úĜ äĊb ïÇ Ì8ž TĜĞm:òCxñ¸— è”y ä@Ĉ1›žÖE’ì}PZËQRͅY ïHÇ òiVħ‰@,7ϘvžY˜°ñ/Ĝ×E¤ÍÌÛ ‚‡çn*Qƒ[pw–ċ;Ö@”yÍ^7Öí^Í̧Â5à;s6ĠK˜bw·B–+ÈĉQe,VYô˘ĉö·=(½üĈEĝnŠaÈ\CÒ!GE AÔÈ'ê}|ŝ€÷|wyç*#<BQ2ZîzêÚ³ı:ĈĤéL†B?˙áq9l&}zl ³áOîÇg×N…ú,7RÁK²FĞv^~Q¸ŞŜGù‰(ŠŽ‡’àÄ9]ż n?ŠknÀY¤çívM˘À’hüïeNX!ÖîğvaŻÔYĉ÷Ĝm§ÎÏĵ‚k;Ş›÷c Y!j€ġÎ÷µS•nÎ T¸ÍìĜLZôûbL900Çĉyšq7VÀ¤˜ušĵ˘cLĴ\U¨`îcgH.jíħûÂ$'µ5bÁäñ„í#ĝcÄTğ·P(€wtĉïA˙³Lg>Ö1u7t·Ìü_ġKîÂ?÷í 5fJ)Üѐ …ÖèÍY}ÎIâÑmXˆPy½ĝz^$ñŬ´FO‘ˆ7;Œt„ğĵ@S“â!~ğAÚ4Ÿ-cÔ0ĵŠïvŸĥĴc“xŠĞ G€—RŞiècIwĜG á+ÌúĠȚFÖ-ñK³ŭaTµ‡Ĵ]œžŒQħb¨ÒP™(çÙöíı 𕳭XûeˆVfÑY´Rë“ħ·•ħ1ċóĵżïBiy×èœ!XΧGyh€ÎC×ódR´Cç{'ßê°î—Öħ]q˙ŒÂšŽ‰˙ɃÀGż³P†ˆsnb £Ĥ0 !klu4 ĥžOúŬ WÀHEÒIO‡Ò4<˘Ò!‚0”í*O ‡û˛/Y!†(m|;€ &­—"h(`ѳ Îjrık= ú›Ë=èïm"üŝP‘Ĥn(Ċ§ŸŠ1“Óp ÚĠĊz¨F 럽$.s‹Ġ¸Y]Ïĵ"Ĵñ{§$ùMS…‚M˘¤f.ûÑs°cí>v‡oĠpƒ‘Löü&Ò XDžóâIÁpáŬ%D€Ŭ1ží-RÔ£a9ŭŽÉ[/^_.‚mm(ĉ;ÉqŸ½ -pH…‚=:Hi*†Š [ĝ£ñ—?ċħ‡„/â!˜Ż\Oñ ËAc02ĦDÊZé/ABN||ˆ,è†ċ/ϙqçıóĜ}ŠçDÜ7ÖAxM-7}=6*f"ÛĠÑ#Èĥċ ?‚˜sş>ġÈÄPE?/ÚC%Ö7F4m§ÉF 2ßX!ĤffŸY"ڗûxE[Ċ(+“ĤÙıÛ¤‰f—[°ÁçPĥ29U _°ó6ü$Ŭ] œ.ˆ^!‰[î{×ĈB‡óĈŸĤĉrĝy),nç{‹_Em²Ó€‚/YXÉtċĤ#ħÏ3@0şPĊŬħÌù9¨ÏôìÜ<"ŝ)sv¤ÏclZiŞħ0²¨†ŬĤ¨WĊp°‡JŸvQ£˜ àzŜ0Êj{·ÏR‘\]餏ğtIü6çáPzÊŠlÏħ–ËfDémıì餕ħ­EĵËڈċ³Ù™GĤˆĠĉ`”,m{î‹p†˘4¸tKÑd}JĜÄŞ~:ÄèAµUnKŽ ×ّOħŠ„b/DÊarġMN·r“r½ğ8oĝŬgrėçA3, /ˆá wyIwTh-ÙİôA ÖbFœ/"Ŝñ…pLك׀hŝ–BŝŽĵŭ+r˜mi#˘†Ÿ¤Ĉbóù›„˙7³VÚÙ´ô4,=ÀJhĤÖÔ˘iÙ ×Ún¤T:ĉ œ¸ƒ"AÌĵiĜ| M–Ô)d!zô ù![Fğ×!F‚ñíĈ qıÜÊUˆkï.y[;ûİ6yC úm9böŠ9xNĵ6—×  À)óÓ/—‘dvA;ô‘=Ex]Ê·â¤!QlÍó>lèŭ&%DÜċZ“Ûöĝ\d ż;e`oİ~ĥËZ’@VXĥçAA côŭ~¸DŻKhÚxí>˙àcA]÷ZûaÌNŽöŝí€ œ;ŝöŽNÎ2²BlúXw]ÁÇçw{Ùhâò`aĠ%‘Ż~÷10V_Áƒ=ËÈIë]ÏŞÁSSvÇäĠÚAÏÛ¸ÌŞk.âˆÒĤ÷CXßó5bD“Üİ”uÂûG Ŭ%CZörz>e`;yıkÉ ç™ŻÌċߚ dí§ìömdŬ7ĵ WdU%÷œĝ |w<2°Y×=şñêñ aêH&£—ÈîMĉbàà PâœÇJ•ŒÚ[ġÔ: 4[šÔÌÀíé‹ûÓh-{@Ûjörƒı\cĝáĉĵùbF ég |m,هEŝEW“ÉáxRi!1a ÒëTĈž úċ½Ñ0ŠjámĜZ~§˘Û ĝsÈXğĞRÖn"[Äu6SĜe꧔U_½rɤ'ŒbHŜĠFâïÑË翃^„ô*+@/˘F1à°}htÙ<KĠùÑâ’§GNŠŠŸĈ#^EXeMò0ètühb;ĦD “s“k½h ´*4Ž–s'?€‚CçPŝ<§êÒR E~Â&sOۉ"ËO‚ĥŬùù¤‹™ aħċFv×<€bïhАGŞ´>ĵ_KÊİ‚-/žĝá0ĤGûğZê*J órääüÈÍ+,­¨kéê^ˆq`o\•“t ĈúÒT€MŞĜÂX`ħó]Ù7ĥXHÈJ.'?w²AUqrı"jw:wú œH—Ş8óù‡t@pòa’[~Tş-5hï5Ĝn@éu§Í/m;,I ŞÄŠĵPàiqѓ#Ó%Ċ£qB„^*–cNŜ3 ˜@ÇFuQ1À‚­Şè^ƒ)—Ñ÷âFĠŜ çóœ,Q֭ﳒú9ċ"œ9ë*’=a'[Ùm<ôSp/yÀpRì̧íÁĥ WW„ĦŜòŭ'cPK –’â‡%}ĉħŜDÜw†inË9I+/£ċ­VH#^à¸*t€zĉ()eÚF­JÚqö#5ˆó¸èäğ¸OOóUÜĤ§ĊDús'ÀâÓ¤Œì˜­ZŻ-Ÿ”dß`IrM×ĵÄĴwEÖµ0b%¤Ż3:ĥ ŜòêüIzÏ‘!"vĝ/ç£bpjî…˘3‘@ašç'ú|š|…ôƒ‰ŬŠ"°âlÚ?mW#ˆ:ÇU19-ÔH|z[ŸÖÏ£c×ŭġ€Üàŭıy…Çġğó›–à‹|~9c—mgâ?Cßŭ8˘ż£ĵ0çKêğWϟ’ZÀ³*ÎY” ĥŭ¤î­ Xó˙ÜÀ|ÒĉéŸ<Ħíĝ‹;£ƒX½˜›şo€Ĝ}äü=,&€”çz$˙:Ŝ[ò–Ĉui‰XFË:ĵžyIP;şĦrIĜYOZ½¨jv ˆé4Ĉïd~“6iéħ˙  Ĥ§@‹ËÛC|È ˘~œu”jSħ™NW÷`ŝ‚NWÎbڀòjŜQZY9í$Ç֜y÷Ĉ%öŠaê#.x8ô7Üğó—ƒĞ$aĈȚq“fäó‰ óŭÓĦb3ġ™o;ïl›Ž!× ˆrBÁÇÑûd˙ı²F C+İñĜM>çŜî”+ĵÏòÊĵëĤpùÒLóı·Ċ€ŭĠïÄàn²_–#Ğ~c™6ˆó/P'’´ïÏN\S'úÊí‰R":&ÏO?_J”Û_‰önCjsÖ<#w—2˘’9Íkĵî$DÔÜLÉ'mÁ×´ò|î(:ߐöhŠÊÊ£ŻĞ&xĵ‰ş úT…H$ÂÜB/<èßÄñMù_?ĵzt'îòùÈS'Ž?q*òüċ¸;^}ĝš_ч„Ĥì Ÿ]u[Ȅ İ‚ˆôcŝ'W½>şH6a‰ óèhîù•`²= Ê ÁÔšD!§ëĵéÌ%DyW.c>k³HOúÊŭrÄϧ瓉JQ=ÜŻĞˆê×&fïk“@›ùËħÉÄ~ŻB–CTʈĵ{pâ0ŸáW E‹Ŭĉĉ›Òà&j*p—Ħ,˙™w…²˘ğיï&O]9Ô`$ûcΟ|6²§á,½èĥ(äo;ßç)•Ž?˘> üKœ17œxC¸É°6šI€ğğœ?ĥ7Ç~kä?‡ÇÁ]\m‘‡[ÂÏ:PG‚X.Žĝ­Kf=¤Â‹Z¤½àUâÍ´fo÷ȃHÏY~´ÔùuüÉÌ@òCŜ¸žR]Gkôe¢2„^˘+ù½ÉoY3·—àŜ5ĥñè˜>Hĥ.ĉĈKKßTğâG€WZ|ĦŬŒû*ÔàVî{s ĤóZD„Cë°Ò Aámhg”:bv³-? „œm@‡žA?ÜòNá…e‹ŭr·ġI€DC[€Óŭĵğ*Í;⠁ 4Ÿë;p³€˘ıgXÌëÜúĵî͙ín(Íù˜}òPĜŽ­›üŭüü6mŬvèdt\|âǜ҆îQ<<ș¨Ï}ĉiˆ;£4v^ÇÉCœÂĝpĜ ¨şŸ˙dŠ–ÄŞÖċNĜ;WcÉ}'İ[Ŭ›ĴŻ2ú5£Ô§áá=$2“ù×ͧÒüXÏIa Ċġ*èĠ´œ~Ż^Ò-E _/ÓġZHFÖ dL~ „ı;‘KÓèhĈ:&Ĝò•p\Í ŞûM=Ê[+×ùÄÓÛ‹ş”—ͽ&"ÈCä瘭ìŸ ı’;ħ,ÀÔ=@Ï ĦĴ/@0[ öèdI½bH°{*4lĤy1lŞÓ BÁ` 0Ȁâ1Äaû5tTXúÂÄÔUEjP ö "2W†Y)ˉŞçşyĦz|‚;çZAŞóó´6҈(îָǞˆò&„L˙:³T‘qı˜ÀŜŞGÛL!ç{)³cct˙Œß·Jâü ĉköĊ+h™Çc~½ß“ŸÜˆ>ĥw{€·ğ‹ĜblmÁäâî°}ïħèO’ż—6öNàÂù‘–‚wħû֘+C7\gĠĝŸŬ l#ó’/o\’·8Ñ(²yîL\ ϲOTȃşÏİ—§°†Ż@êVDUĵ:5qAZĝèlGĊ/#† ċı ­OC,ĜĞs*Œ¸˘y& @ĦƒŜĥ(փSà°xŒ¤°B‘3è5f‰Ù”ÈY ü‘µû)␠ƒĜ1âëıeÊ]E‚Û@˙³Sn-ï(ġwPê6ŝµE „ÑAJӗ¤.ĦÓóşîĜħ„o­P TzÜ£/6žz‰g ;~LèìuÂyCfjÍÍêF=ĠÊ&´4ëúà!ÄĜLĵ G’UC͋E^Ħq|ŽC^„ùrsŝĞÜ\}ÁXÀŜCĠúáÎU ğLïĦè%A†ĦàÎ=‚Rg™@̓éĜè5‚ġ‹ú[G’Ìž2´˙–Qz{.“ŝu‹,˘^iË3ĉŞĊ)ˆu%iĝ B^µ²Ú~Ê|î N‡_×LĦœĦҗÇÖA$”*gĉı/.ı¨m "œÙáĥŠ)"û{:ÛYÁX[SC]]CS6VvΞŝÁ#§ü¨hĈoÌħĥ˘ä¸}žfrTˆ…­=ö²tˆƒNĠĵ>ì¤ĥzçSÀİg· 1à#iAN}à”•b~²ĞáZˆì–Żtfîvi˘Ċ­~´l Éñ-}ĉ…5AçÚ(>ġ\”È<#%¸§bÁ$hI%éQBşé—Ċ@^ċ íp¨Ó‚h!VP?—{•ßĵ|>œà5ÔéȇYÜ<´ ŽŬ—‰›pëĞÓÔjĝk˘\Ŭc¤ŜÜĴf6”'ĵŽ lo˜ŜŭîPÄS½/§ŒÓï!=Ħú`Liĉ;Z¨+ É/!>’ÚĊ³3gäŻÍ'_a˜[0âˆi@…‡1ƒE“ɤX(_Ĉ„•JK=è‚. SÈu:G|éPšV͐£àk'„Ìôs²yŞż_}VċéKiĊwX2ŝ8ƒáĉĥŝİcüŻyeЇ|,k ²âV ݨGó#)Ŝ`Sƒ´„÷n˙U=Ä Ĥ­:¤ŒœkÂ:Ѩv7ÚħÔÍJˆÔڇm8!ĈQ ‘tކÀlE|dBEÌ6ß΃ÀT}jt€…,bŒŬÂb“‹; pS½ġ…iÏŻGîßâíb·ĜPKUQVJB\LTTL\BJVQUËpħ‹÷–ŭ‘ן§ÖtypÍu'džı$†OÖ" :µ݁ĵۛÍD _1 ·€hgIDÌgÄ´=\+…(mNCÛnĜQiN°ĤsˆòĦ*´'ĈÑğÚÏŭá ™yS/ŒÌ?‚<\1ök²$ Ëw@V•ñ^k›ê߆Îáà”Œ½S”~ ^ÏĝSµnj3²ù ˆĵó!Ây-è8T£V9´T)ˆĠì•&;%Í03×kŭéZsڀ ċŸ2ŽĥŜ‚# nXĉ80ıBġ)d­/šYĵŜô޲D wNmĦcŒœ[;íUĝ£ïó¨cXñÌñʜw÷˘·[amĴ,+!"ȏkÉd\È/("!ĞĴel½bŬÖè{ïr*ğ 1{˘£è]L¨ ž ċWħßy+§‹Ñ[RO‘Œ(ëx"½—Çj~ħU‡LÑnÙxf˜.oµ˘)ŝRƒÓ5èÈS ŜŻÏdÎ$9‘÷Ö@ HŠÏûÚz"PMè7dq ·y Eó ğuÉä3ö˖Ĝ58%ĝ…i_çšÑ.r „î˘dU³9§ˆ^#͖¯óčsżäġAÙ&ú^žsCH!È\'˜=fĦÖ^h×û°Éu“mŭ-° .cÄċá×È\ á=w‰Z˙ù‚ŒŜĤTċ@@H2”çd@%ŭ‰ÂŜrŬßHQ\wùŽŝ8lfÜ\ЧXԓùœ´Ìħ8äÈ A3pŭÜ&<…ÊÈí­aô\Ô ˜yŒ´˘ujÁµmP%Ü;rxˆO랂ñħÉsb§FfoŞPĞħ_ Ñ‹c$Ù :qCœ,/šHPwôÑR’ÄĤ,8Ko•%è)f1 !&hŠĊğí!ƒˆ.;= '€„ ~P8„<(ċqGŝĵ8âĤ+B$ k:lğş  >c´­4óE܉ëWZiÈCö‡`xpá’Ħd>È ÉkYŻ\żóD܋ÌÒĥQ8°'Ú ^_Ĝĉ )L$Šèşyñg„Ë-yz ~ƒ 8ŒfŸ^&ŠÈxÜc³ŞĜ B"ĴÁ*>˘CŬ ï/Y›$HKró‚Dh^YœĦ8Ä&‰1 ˘Ä_Xu EċĉìÈ)1‰s“cDžîħš|‡RŜ#9‰;ÜĞÌ´ıvMµ cˆ>‡UuóÈ(a><@“ħ̏ıġ<s ܞËè>ÒsĤ§hq–ù8ŽvÚÁ_!ï΍"}ë–óĈ9àżHħt'^¨$„sŞ(·Gf„h/üïµÔèyCGŜĦĈ_ä‹Ĵ%SO‘G³‹ wŽç'RÚĦ²dú%D1Á/ĥÇîy€ı‰üÎu2ßX{‘]Œ| ı4ŜŠcûĜ&$§EġBşħcĉĠ0ûĴÏ·x⤐Â]&è’ĠâŭGDĊ"zYoёXMĴËĤŸÚ”7-dÂ4`›6ü ÒUĝ`49@‘÷Ġ‹MċDX@×/Ħ–|=½njgÓä=לv1˘ x° 8ó,·Ìĝyĝó£KáAîv&Àƒ\È˙-|î ? Wž’´‰{PĝGŸ‹NŽĴħ–Üggl@?1“€‹iÍs€$O?ëĦ·€§ż ŒÚ?([DäLa½Żüċı€dp\ŜX¸Ë³aĜfè aMǕ 6O§aÙIŒÁߋ߰z#ÄDô3âĠ@ÍĵĞ trbx?Ÿŝg,ӐzlĤ²´½cÁ8+:Ù4ÖîHıÀK“ÓÈgìBö²ÉÔ ;ó'Â6ğ3™Çĝm2!1 ö|˜^˘ÒQ*â’É÷†‹g!O§ –°.’5 à9ÎGSĦô²W4‚‰àı°+ÔÚl“ùHŒ ½AžOh;€aâKżŞ#ë&àí•;KD½9wóè EŻȘ…GÎÜÚ[v0.Ż$Í˙ÓĞ‚ùT4ú^ÔÁğ‘Àħö¤Œ +0U/#îï "8VpâeĊŻ dœŠ–y,>£µµ[3“qŞ$ϟhËA°)üâġ\DŬâ…H­ïôbÍL‰âà‹h½é_°œiôoâF5`Äü܏2êŸnƒP˜¨ĊއnĦGÂU~„"mêu$ŝ[ŭO…ĥĤĈŸ ósĥQ”üG >żüÓĝŝA0@PBQÇÂÙ/ì\|ja ž eŽÔ‹?âe*MAĝUö$àJ’ñ?wX%ÊxÛÓzÚ˙ù bÔ6&ŝE§sÂáċx³ŒâDÓ ÍXïk˘tHwèÎ"DóBï81ĥ ?=IŞq“3·Ô(µèg ‚GšjL‚ŜqÙxN…#!¨o|qY5XƒĴ*ÇI+ÚÇ‘Ŭ –xF̵JOîÇ|×ĥ³ßÒÖÌÁĦl6CžÏ¤=èyäÇ[´¤SÙŸ7YŽŞŝf;hOġKÊz°\ÁŜ—Œg ­c=†m%mÜLĝh€X}™Àlƒ4 Ï-j&XŸĝ~΢Â1VÚt,ĴO„8pòQ ê-Óñ“/ ‚f˜ĵ3ó!̀ĥQÓ§–ÊUwéêv4¨v6jiÖ÷ZJfÏù+2Žä3YĦ+ƒë,֏5/ĦĈñҕäŜcÏ$U>B&Lĉ š Q÷‰$ó,ì+చÙO4(Û iBpŭ…UòÉEġ³?:Gs€›í Š|î}+˘ÌŽ|öôoQÒâK ĵ‰O›Y?xL| ‡Z€ö֗mNûÛ0 DĜl[B)Üúóîî\ŞÀ‡”l7O*ŝ; wññĥâÔ{gvĴµ3ú d>!L—–WRĠҁ- U%yiq!ž †Ô¨„²ĦŬÚgîĥŜìßâ¤ó›l•hŸÂҝwóúáPš°ÍLħ{ÛÎá´½ÜŞ Ċ€o “E7šà5\ZLġŝ0ÍÎß!C´ş?Ì͇ĥ‹w6wX¨ÙŭQr|•Ĝ/W‚ Dc·Q4ž°ÁmŝË2'혀E.úDaU$Ÿaïċ”ÒyqÔ%Ícëq)‹ŒnĊ¤)(‘ŝsْ–½ġšZ†j şş]ĠrK§ÒA–ˆŸpuèó ½‘t8MBÏ`ÒZµV~ÉÄw!Ïù4ŸùЧI/ŝÛj"“3L…ĴۆÜIÑpTQùĈ½Èg[ µF§Ĉı£dĞßÜÛâ Ïx…K;çŻJJ^ž8ÀŻxsn踘èÑ^ÖëEȒöÀy%ÒŞ ö0# âúŻ+C´ Áú(C‚ôĤôinu´…f{Ħ‚Ċގƒ'€Œûuˆâ ~=ı\ }7 †Áĉ“oçRy ž–‡Ä~Ĵgï˙ÇŜ[˙EµŻ}ÜkM03tww#H *%"`˘(Š…ŠŬb b€`ˆb¨ ¨€ ҒÒŬŬ1=³Ös{Ÿû>Ïŭĵžż`o~8Ż×ÙûD֚o\×ġyżgz~e<ş´|̅Sŝ¸£Ċd”µ ÍmŬ=½|–.ġñòtwv´17ÔR–ù³A(0>oy…Gżz` a TÜÀ`Ò oyĜ˘ŞĜìŠKT²uXo*-@5 …X=;‚ΰ³6J‘˜dZN‘Ü^Ž7Ħŭ‰WoɆô³ž–— Š˘?1¨mî _‘PŽ9‚Fú7">­“{PÏĥ10ĥ,$‡cïd Jg6š#IÜ£ŬZú Ÿ…€+P™ 2Pü„RĤ*ÖÖ)şġ×G*AğñM’ġ…ÀŭpӛŒF?·ˆäÙÛ D1q„bä3òĦ :ƒ^ĝFĊ$İOq ]ĵˆró“/ŬŒ'ĤŒ[éöˆŻġ1ĝ“"AT|ëϟz ŬË{Is6TúÎ܁â|UÖüÖ¸•ƒ]ž„£ Ŝ‚ŝzñ  µá [›¤ÒCnµ²0ϰA}àoöôT~cˆôħ^Ĉ"‚FX//s%Ubkżï†Şv¨‚?üčJs‹í}¸äîŽäÌò›c–É#ҋ‹g°Á/! eaÓMÑ•ÓçĞkÍA$cá{úEa€™z˙ÔFw 5I° Œ`Ĉ6Î^Ğ7í?vúüĞ×E\½tŝôħŭA›V{9Ûkü1‹ĦI5 ÷§î§)è]…/NûZȀ2È|íĠÏ=Uiô&SaDfaȗAlĤ8|‘4"ż,Ĥ™?›sò‹ ïu£QŬž ó+İĦ7úĝE[%¨+3y½a„EĵŽcÒ"AüT@ĉ>¨Mž·ñİĉ>T’„’· }v8äë `f%xv oÜMċݜCÈĉw%êawÚKŜ^ôҟÏâ-zGÂçò ŬWŠôèZ§c›EKëŭ°”˘:ñíĝĊ$$a@q#îe3MàñżDsK!ĵ÷֝şCÈj‚B`÷'¨ ğĞġ|#‚İ‚½PXJ§l U8”â¸ÉñœÍć0Šß%ùÍäİè Ĥ—@g`ök:Ù6Uc‘ĉlCħÛülZë·3yœŞŸŒċ9Ħ+~ó H›yŻÌQŻ|ĵt5I-|„™`#˜ g&9ÑÀĈÓZˆ1HƒÚ#çÖĵç×^ħ˘Îğ£÷e—çC0ÀïQĝƒ, ĉxèM#ln‰G3{Tġ;§× Ĵï#ġߟ…íôĥÖŒy„Ä´Ì­ :xêBĝğÑħâÇFß½~áÔÁ €•‹Ì´Äá €1r-kïaÏ×n–;Z›~}ǁLÓh"ĝéo9B6Ì"(ìñ ü 0˙r„Ì£ Ù^İċż]#EœÙÊ cDët£ HtNb ĤÂm˜#áj¤ĠxjŝŠ×¸‰dÀû½uʒġİÇ'{P·ßĴ0šI6˙ĥ˜a6DííkĤĥ!ÓżÌ@ÉqIpċÒSɛñ#Ŭ…òß!ŝCâfN{ù:ù]JÊğä×/ŸĊŜĵxr_àOGsmħ?1"y}§5‡nĵ-還!½Ğàé‰eĈ‚Ô‘íĥ{ùƒ|^ï×0o5"L$ŜŻd`=oƒŒHBG DĠ³D‚`x´‚ ;UŭSĠX˙{‚üî2ĴûŠbù‚óPƒ;3P÷LĴz=Yë£÷€°Ê]fÇV˘Y*ž5—¸c”AÖùüûR)YRMÁcč YÇC­N0ü€âÚ×ĝ.P› l{2s ´Q—<×i¸ŝ^ñ8İí°UFbYÑnSĝûj8°Gê=>·e‘ıŠnĝ˘òZĉN^ëv>q÷qB҇ô/™Yßżge~I˙”ĝnÄı;×y9™kɋBµ€,ĤbhËıÇßê` àŒTżßd§HFČV…A/·î —@5–yÒV˜ ĝnx ċjĠm3ܲúˆòî|îd˘D²¸£ÑVˆ^x?÷óÁ5€d‡Ĝ&sۃEä/O€3Jêâä!ŞÖ ~İ+`éĦsäKeÙ8,MSú ŝF^=ƒ…ìÒ>àMĵ Š^ëĉ>á‚=öĝċ3ß$ŽĉËÌË_BO"ŭîJɲe7Ö=$–ng8ö ı7;ǚyĝ½QìĦ9ó­áòĥU8 9"Gü2ҍ)íÀ‰ë~ÄNÎç\"f€v”ñÓ6ş}ßi[ ´pŸë'üuÜQĦĴwŽjU•ÒÏQħĴ­ÀˆğCZ>ŝËPŝ÷ş~˘ĈZäˆCArpSÄ$zzwQ•ĵÉİò@VÔ³ï(H\˜<,Ĵ|‹>pL\úô 1|[ËP×b­'•‰ g)^ħ5ŸŒOŝÒĞĞ ĜÈëĠ2¨öîϓX{ BÜKfÔ}˘AâÖ{âkéĝtMâwu íÎn›@WIrÔŝ•vÚÒm °Ĵ†™£—˙co<ŒONûšWT\ZVVZ\”—ŭ5-9ŝፋÇvù{9ši’a(EZÛnċŝ¨ä’.8Ì´eGï_jŠşû‘Äšiœ^ż PGY>[÷RE߸vlòónmTfġëĴáŞAÚ_7X#FñJaÌ$.$*ŸlĊ ×Qe·òżÀ²÷˜1xZZüĜŭ–²áÁİ  wĜġ+*ÎMIUĝÔèA?0×Dä&È7@ú@Äşfb=zŒûIŜ×ĝrpF…ĥ²>ŠıŒŝT²êİRÓ[Ĥà8ŝUĜ{Êv[hßûtm`˘ïg‘Zoñgád,ò(qKµ§nÊŬ/uÙ4~ƒ\™>VڀŻTyŽq5o Éo–]Ċ?Mü8b<Ü`]I`xJġ[h6´èé·4h×ôXËfÏĴ"Gƒ2`#=O]#ħÙĊĝi(•€?3)àœ²Ğ`…Rĉäò£eĝ„M]³—$n1;ĥ‘uŸrÚvR•ŻMN^× ­*ÄĞ·‰ŠUĠ ]šÎžx6Ÿ(ğ-;ñÊ[DhÁŭ.ĵĜkD³ níÍE’ˆÒÊè:6Żġċ3°bO}ì€ĵĉO˜Ö’#ÔŬ÷lbi?x#DzâÎmv3UX)RŞĈžëvżt;îĠÇŻ9Ee•żkëáĞöweYQÎ׏Żân_:k§ƒħŞT ˆ"JĤn›ÏĊe5Œ9²ż4ñĴ/€è‰r0wö’¨O)–fĥe+]½R ‘\t³–Ë(8a§À‡Ŭx×ŭB"ŜŻ&¸yÛd‰óŸM°Ó—‚:ŝ‚Aâ˘ÛŞñÂU$ë““×”İ;Û8OuÉÛ:˜·$.Ívm"8e˘ùıs(ĦĴ ;Ħóœħ‡x‚”áOĈ.d'#OC=lŬax4yĠLĥĴuOĥAS‹^KƒĤE‘”'#vë`ù˜c<ò‘xšżJĥ9FĵĵĠ_#ÏFÔV┆£Ù•äJۑµÚ³ç…›S‘¤.İŬ|wƒéğè§NhìŭU%ĥ´aÙŬšŽ€›9‹í„YC'Ċê6=Î*UËŜJ5ÓĥfcµÒQZ˙Ìΐ3ú5éçdjÀüb$ĠĥŠ~šf+8bZÉĝsòŜĦħ4'Ü:€âubèCĦòÔÍUx‘Eîh V¸ADd=diĞzè6ğPHÄ+~kŠšGĦĜ_ä0K.•U{sB;Ÿġ;f‰¨ô*`AħÖT%+Úo‰#göUĵżyp•ĥlêd1E}èġğtçiRú÷ÂòêúĤÖöŽÎΎöÖĤúêòÂïéIOï\:c}EÁ–!$m·êàÍ÷}LœäÉÈ-öŠdTÜtm„@:˜}uİ:‘¤³.ĉ7‹ßž°v%‡ %LNċe{ e^T6ï%"´0vûuHHħw^/"²Ħk9*Gñ+ÂĞ6SċC‡è1:^ü[@'×qŸhŽ í%›Á“µÄ"8ŽÄC30Tw˜^eK„I[LÎqÔò—‘\;9ĈO yŒ–Ş7·™ŞUöZŞVuëµU+:ÁôîNÈëĈÏ:jvgƒ4l‹XĠ/Ñ] èü„Ŝ6pçï–êJBR›…ÏÏjŻEĵMĜG$ş_""àµd^!ċ6HlÇ7H7R÷â[DKš=`ĥ ‚ï/œ5lİĠ\Ğ:w¨¤#P îŻÒÔĞí›'ñ³ÎIs‡–B!ƒ‚Öwî9’KÓÄdy×ĜvÔ£ĝĈŸA†D;5Ùż›l˜„•y!_’¨Y§ë°¤ĜŜf~ĤQíB/ï0µw–a—!vïĵnM”ò{?ÉĞ µ ŠşŬiâOgµ"›€ûW¤2b°ŝ^ÉÔgߟñĞMÓm_tvû,Ûòâĥy˜ŭYh2šÎ+_ĵŭ49#§¸²£ğ·`pp ż·ğ£²8'#ùé틇W:[hÊü‘*™yl ‹Ïkòôl{vô>7Mĝö:ŜgŜCĠy˘äŜz AÙ'n£ƒMÈBG³§ùMwÜD‰Ħ•ĵÉ÷~RDëëx9H†]îÄÊvŭŻ÷‚Ñ+“ßĵWLòp'Á ]Z‚q@ĵʰ$CòîŝÉS4k}¸2½ÁŬ>ÖµÙ1ÑäB:ÇŭJK‡çJ$Áŝż›óAb^_­žfz@ä([48WµĥYËr8KĜbĜŠÍ%˘[½ÔÂFé ĝv‰†\ÒĤ÷¸Hcò²[âÛÄqĥáÉĊrŞÀ%d1B)ŭ½ Š™d Kŭ–ZÉğe’?{ şËäĉOd‰{L}“p-R2om³”Ë„ĈdöDx~g·ĞMħ §ħô£ö∤ŭÁ¤f´è#d,×]zW Ef_ùğ¨½KçŞ ÈïT-K×ĠA'#JÏ)­iî읤ù‰ÑĦÎĉšÒœôW#N­vµÔ’ÄĊÔĉ.ŬġöîhġğKë,e9ğĦñÈjN:h/‰ˆÛMÀĤ /.Ÿá›~|ġ:DcW]°OÑçM˙}[H9Tâ­àBr|IŸŒ1ClYCaJBĠĜׅˆ'Ĵl)jû˙1…yİ›2òxó ;Fúe _Ŝ ]3UV"Ëĥoäpo|§ U ëzídO{ `ĴNY,‘;dİÙZ)çÎx^ħ ˘ĊÍ ŜàŒù8¤ĜÓB…5"`›Ÿ‰"ŭ,!ŬœP ÀĜ!ûŞA(û—/ ù/_Àèkô:ÛU­+‡ş/ÀIà p‡ü!ĝüÙŻ(à X8ôë/_@ ~¸b´vŽxĵÀ0 ”Çà Ĝ? €£}àfGI)=äUy!n?ħ÷|oÁœ _’fÔ˙úĉB:´ĝ<ċ˙ǐĥIIà _èù¤=ŝöÌù Hĝro€;\à ¸žñÇQènòÇ ïM/Ŝ˙ĝ½ĝ/`ŒħûfĤĈ˙ĝŠżŒż{ñà&oˆŝñ˜¸†%tüñd\˙ ´¸) DtŭñÌùÛà€Ġà ĝ”6Aóê/_@1oôɁ/ëlèÜ˙ġDi’|˙ĝ,á0G˘÷ĜO7ÄĞŠ÷PI*Š }£;À°]VpooĝâĊçԎ ^8÷_ÀŻĦ…à ĵbûƒ/’ğ_€Ó_ÀjN—š+û:úzô/_@ó_DW˙í °@ —ŜFÌ3èÓa•5x¨C 7ĥĤSO‘ ` }!ïÇ÷‘SÇ­µÚ~ÉıL¤ú.÷„Œ¨Ċ?U k\„ŸcëĤ€düíÊN×/ `TÏ'îŸìE–61ÂDĠ_K\ïJ~˘is38É+Jñx4ï NIĴħğĤˆ)Ĝs$„<^Mrrĥ+ ĤçkĝcÉ~²¨ÎžoÓüĉhDaùŭZ6tâ·Á6ĵ$,ŭ=_Żĝ‚0@Ŭ%ĝAvyxÖ£']i!têĉÎA§"'-Ĵlì_Ä4Ébsà‹ÍbÒ§ÁÑÙXYĝ5ùqäİ _gsu88˘BÒşNŝ!²@ŽÑ;²ğÀ-SÔ÷ÊW8ög†-ˆÑV˜J`×Ŝ_€ÈĝD7ó§żíÑAeŭ’Çĝ5çMQ…í9œÉWB9à‡żÚŬ1V’jp\İúŻ<ĵt³ı͛I‰üJoĵ„˙B]4ŒÑ´ñí˜ÜOœ_Í8AZP? êpğ+Dߏ{jáö\Ĝe ÖPġçĝb DžÀ”Ŝ3Ž?ġ´‹ÜŻ6-ëñTò>|?ù P§SĤĥŒ‡àkT†Ÿ˘Âg˜FŜĝrC¤aî@‰kĵê“ßˆŻ!ú`ĈÂj6y2cm‰ĝÊ×asŭîŸpĊxCŜ°=üÀ M ,ŸÎQ0oêq{?…n™-Òµe(qIOß2ô ½ÊN(”]fG>>ÛìƒĴiŸ9MÓMÂòÑUpàĠÚÙÁ~ĴKZW…eşĦĤqôÑ(m‚[*g0´1cœŻĞEhKSfa¨R†`Ŝ„ ĊCäÖp.W{2x*~qÍ\vŬ£ġp X™7‚ħÛRÏùèPaÏ#O {G€šOwݲĠBhr:6žö_¸›–S^ßŜa`<~.¸ 0 .Ü×^_ž“–p÷Âŝ ž6::I\vĠáğŸm%Voá“#ž:ÂUÇç\jɋôĦtÖ?Şcs›üT ³x³–GÏ=lAĉĝ!Ĵ)܊ C­³)Ki"ĞżrĈb@†9ÈIu#hGÒLQ·LĴjI÷1ğc§6\VĦŽyX’.íôLûħyö8ٌ*dWE?ˆ.ëëYsşi şE³[Sü7bŽ=Mĉ 9ÓË) À }ÑÀŜò¸Ş˙ìÖ7ŝJف‡Kĵ‘ġÌ$zÖÊbĉ ıß5ĝFùWÈûvàÄY¸ƒ6 Q߈-°ĉî“ê~ĵë’Ŝ.NDOÛ3o#/'ÍÍ'’għMÂ?úLrĊ—ÁY4Aóa?EßdĴÛÛm$ßs#AÌ p‡ž$,€^ Vİġ ·¨×u膞IÀ³l@ÇŽQ£íëeÜ×ÚTÇOŸX&°†.Ğ—|áġ^ÒBíŸN²R—QEWbÒÓÖHìn´wÏS”h~ | 0z 4–ñí|fĠ=_ "Ywíâ ¨Î'ŸP‚貐—´;k "%*Ĝ{ŽŠèap\x,<6ékQuKÏĝ4 òçKaLġ´T}MŠ ?¸ ¨r˘puQ™•Rƒ€ ² –!uÉy˜(³V—LÔ½WĊä·Çon ,ƒÑâ”9QÔ¸†m7ìHkÒèÌOĞEİËRY“OíQ­K½ĵ/Kˆj—‡X –Èüt~Ŭ&!ûŒŜ}4µ6m²€Q˜ìـzÔeŬ²{…jUÊ:Ŭ ċ„“à ĠÈa!‡ıï%mÚÛ­e¨+Žżšs‡ #›ËÄsLŒúoÂÎ’&ÌÍ'_"·™öĈ“‘DÁÈ˙véwÈënİ}\ëĜFuDy›$ۏ¤µGt‰ÖĜsä…À1ó3`ħaüu`Ö3œ%²1û ëtB€ˆnfş3GĉÌïħĈöatÍp›+é,ğL8Š—c½ *gtĞ@n–žCÜÜÎD̑Ù›*ÔíMĵ÷öˆm I)‘—çwžS#ÌO˜aĵó_˙=ŭn…è_ˆ˜DL9›UzQ g |ˆ˜²!bb*S›xȲûĤĞÏ' AŒ@ÄìD Ü$T͜}wñìCvY}Ç߈ä"†˙7"Ĥ£,û³§wù:›İ‚1DŒÇ@Ä!#UÉçW"†¤ì|(†Ĥ*bŝBÄÜ(D̛@Èĉb)‹].@ĸ>ĝ #şâŬ4ûÛzq!ÏwŒ™„ùµsüïËÉJ0—d‹Ĝżç5m§ŞÜœ8ˆfûfâœÏx†™@u‚nY/JĜĴ}–äÚ6ĵ=ÌŝĦ@nÁŻú“Ì"&ıúÁDÌ èÒîDÌj‘Ĵá9şàŽ^xτİı˙AȽ@ži-p[ß/$xî۔… ĵ Û†ž@dLl?ß]oâ1òjDۅs…~Xßnöñàâ-ëħ/4n zڏg@{ 𠍈âóûêÁaĈ ‚ngE|ĉ>dŭXÓòEn‘èm~–‘H8§Ü‰°shxÉòžjJÚŜĊ¸ vrxúš"mw7É™—Âí‘òÍÚ@Żìò†> ĥenr&_{ S\ Ŝv{tÙ.U‚–ëĴ •h”Ò‡ÍüŒX˘ˆÒL·>†[`Ġó½N FÔşËäċa³ ·¸ÊRG K÷u{Ïß}™žWÙÔ=4pÄüĊ†Á˙ĵl`F u7Uĉżĵ{~ï:wK Á!€"kèşċbBAç,ĈƒÈñċuJ”‚ÓŜçUpĴ~ĵĠ”†*.‰ĝ9ƒġİ6gAjVy ş– n˘ġ…"ìŭz’ TpaÏÒ߸€ğ Ëó’éĉĤÌCĴ“¸mğiŠ×Ĥ‡OŠ)\gtm'™Ĥâß,I{†‡vœÊ9á"FYüÛ˘Fı܋äMcë‘}Ì|ĊÏbİŻoĝhÈŬĊށ<ö ´ç£1\ڗ1k­Žb‡£Ïfíô‡ßp\´G^!'ôÜùûĊ údñ ĴyA ˆ\0˜Ħ›•FbÑÌ‘<ƒİ$yHs÷"1mPgŭ1Ċ]ŜH˜3J§n%ÁqG‡$Â)ü&Ì$żuĴ1RnÒäġŠğ‘ġŽ0Ĕm ~ûŞ“m%D`ì$Zg)Fä]½³²gF'ŻÈ‹ìëäĵ‚µ0•×q\–²ĥk>ĤHZô€JÎ$™À<°ĈyÒ¨îqxËuG(°…×€5”…d ˆˆİHf‚·kÇ3èLVÄív€[ ì܀ˆ´úI¸´ċ>;°@((UFÛÚ3ààè×_Š~·öŽQDÔßÂ(ÀD (Q#½­ż‹ĵŽt0ÀÓZ[†Š]DoAÀıgıXàOÖ§EÌ-œî8ÀRÒkŸígäS ĤlL˘Xs`M8”,Ż·àqîTš'xòeHÎz·ˆ¤xĴ+\K‘=ŜÁK…ï§sŸˆü•ÉÑ3²³½ğÈF)xĥ5q'4ˆ%0Mİ˙ğ/n vŽġ‘ŬŒ"=ù4À•OѨfYô-LĉßÄOANç<£àÄÛHM‡é=0GàF!éótӈı‹4‡’‘˜)Ŝa‘†L4vÂÁ.–Ü`9Df?°•ÑhÂ÷ZácĵĈÓ÷Ñwêžp˜ûÔŻ5ŸñıÉY*ŭĞYuŜäGÊfĜ˙OAú"DB²ŞÜ7"nCĠÊ?è[“ĵ²†% 7Ŭ2YkO çeêJĈ`şâ7yEĥ¤}cŭÛv9X²žŝ™+2’Ħar˘ğÙ ĉˆó'~`3Ö˙Ä˓`N½X@”ŬQÈ{ħˆJ€÷›"텨N‘uĵٜs„ÈfûӇÉ܋n²ˆˆċĝzĜ’ËbwÚÊB­Ŝvs$ܲÇì¸ÓŝN:Rڗœ­×ĉ#Wc“3‹kÛûÇĤ˙H˘ŝ~ŝ֐HMġ·×g&Ç^=²ÙËVWp $Ké8ùŸŽËH(áışÍY۝ħep訏ße)‚Èş]̝ćÒ÷›‘…ĉϙċĠE:Q…ì#áÜúl1şèĊŻp‡,qÁ‹)ĉ‡Ċ$ùĝÏġCirFÌĜŬEċÂ&ĈC%eÌôï2HĈrìÛúÇö‘l‹x7Ċu3°IŬL^8Ċvr hyK e?N˘[è?” އÜDŜpwŝÑ·ƒOe(`Boó7S>NÎSmŝ%½”syȘŻĠ˙ žêïûÓĈ xDŽkż˘GU08üï—A¤rÌáëUÇï '¸óMfî‚aJu è£?÷i.`F#·9Ŝ2UM !xa7‘Ó0téϟyMÀ(vÒWÉoBC@dT6ğ œŞħ^Ġ‘ŒĊÒµĊoó mHĈ!óh‡\}Ħ½ӗ¤.LŒ_;Ü~a†şf[IÓJ†ƒrä%Y“ÏœˆrAEĵÑgn`_|1Œ5€ĥàz=ĉÇQs²ùÁŒ|"ç<EçîyÙÀÄÇKbĥÛÈĈÏnëġÏÍӉmŝŝèÔÚyÚ ŝ"@sßŜgëħˆÇï²Jë;Ĉ À_Ÿ˙?ğ,̙ñŽúÒĴw#Žmġħ‡x$µç­=ġè{3Ĵ(Ó͟ŻoµĦŒÍö˜’qœÙrÏ\°ÖıœÏ™ÀG2š ‘͏ŝ˜á×__@Ù}6üœ—nÏFyEArD§g“ĴKÈr’šôĦV~†+jö‚ŬsXLĉÂĝÄ)éKÓ{…ôášd)Úñ$›BŜmqít,VRç+˙Ġĥf*Ù>[fÊ t=WEżjM@éşöçÓx ĈNÍĞ{P{4UÉxsn#ÑÌš}ŸA½DLpwgLĉsO÷ÇU׋Ì8éCl3Ü_mü!ç7í$×ÉtöúĦOĠ‹†ôWc!ór‡%[ĠĊ…÷€ıÖĜrN îĉ&ҍTêİĉÍn—MŠŒqÙÌ6\˙t/hK=ÂÒ´%î  ²9Ñğ…à½Ñ£ì˜ “’81v^FìHëı)êú™ßrPJxcİ =IöJeMŸ5‹CŸGñĦÎ҈˜upb#+ŽŜ&À÷(ĜŜĝÒ"X²›²bOú9h'L8, <~-.ċ{Y}çà˙ß 0ĜY_ö=%îÚñÀ^’„–ƒßÉĜĴ&Á–ÒòċF ½‚D´-şx c6&[‹!ÒÎĦ?Ĉñ‘χ Aav${†ßpc!l^/è S\ŸŽ~#Êñé+Ġ‹,Ȗn–:ĜÂ˙슚>gġ“9?6qQZ*lj`Eï –ï@ĜÒ;µóŜ í4쑔öAoí7Hٷ͔ˤ€›ilžŞ^ċÈ"Z"w7Kòi8” Ì „ĵ‡˘]c•Ĵçò€ıP£÷ é ßKµï8CL¸'iżs÷ĈĠüq3öĦ_€˙^ŝ™/Àż[À?{ ĝ÷ĝ?ŝ{ \ŝÁ×À A˙Bżàx)ĝßf?ĵôo;ĝŜŝw ä>òïHƒŝ ûw(ô>úïXĝ?|,üß`È?<òo4ì û7ú‡ŝ˙‡Ç˙Dü˙"bŝመ!Q˙pHÔż˜¸8&î_Pä?ù/*vsÀş2*ö_Xô?ŭ/.ŝŽ‹˙WñFüĞŒù‡+cŝ•FŭQ˙jŝáÚ¸Ċ‘˙pqäżêĜ¸:ö_yô?\ŭŻ>ŝż2şs™>!ì Ĝ1Mo,X´ĉ'Ċ—+ĥ˙ À·Šv¨ÍŸ}I8$&0}ÄŒ:€:ÔZħxĜIòëìjòCì,Äŝ$kZ3ş¤1É2 ·;€èYŠOë:x²}…À€sœbœŽ§›‘‚zg.IÉ^<&&wi|òš ec5žCÔbż]H¸Ċ/Ü$NñùÀ X.Bq6Œŭ>gJZ•8„w=^*¨ĵêÁĤrÎ:Š!èülċ£­bˆ°áê+m œÙġ#zßb} JSĥZĥ/òMŝ 0Ÿ9ŜS_ò-9îfĜ‰½ŝĞĵ=\8Ísp˜ç´ÀĠ{•àŜa7’ż•Ô÷Œƒ7Z Îıo™•2 %Hè/Ŝŭ£‹‰3Ú2Ĵ6FÄ,ĥ>uôX.`Š1dz9SXÏĞ5Dzé.|(q•ÁôÜolĝ™;Edù:ƒE|S!żë‚aá[öP¸Ñ'ŻŜHQı69~INìĜàôUYİK3½A$3ĝ%SŽ ,K+Ú'ƒ·>Œ.íX…!£L2c–Ö˜Ê~b!għ‡äĠ³_%†‹­Aê0úC܇™@<ˆŸ ĵœŻÖQ(²?@ùѸĝ˙?kDƒ1oÍħX ‰œ`ÏqÁ6ŞN]AvòĴ#j4F@ˆ á8X0˘À+ż;@-l’[ÊÈWS6:=ù˘~ĵÛà!&>cûQS+iÉĴġ`+;ŠœÄžP>:*,äpŽ-ŝkŭüÖúlÙ|8$,*:ŝ}VI]×È,ı3ŭµÙ/#‚—Zİ£1mç72šĤÁM\R”Ĵá}ñ[?ŸŬżŬ„Bôkĉ1ËŻ,%èï˜áĠ\ħ!‹x>ÄêÎ#ŞÊħ[s­vĴdИÈĈĵ&HL<¸™ŸħħJâ5o£¨?`v é&+à×SĊ½/'Í+²#œjóAG;ĵѽ³Ġ6´[Ĝƒbúväû³ĵQġˆ'ċ v9ʏ%­g%ÓV2R¨~ìgÄ`0{Üĉù‰ĉ÷èĜL½}ä ıĤBêl<˜c£œ#&f(Q ­y;|•.¤êż O‡”ÖákûŜ!1ÓĤvÌ[èĞq+PN\J\i›ş=`èùׁ sıXÂk$9‰= ­ŭ`Z7¸ˆßrïér#‡Ú^-CĜ˘ˆs‰j‘Çğ#İŝ˙l"td|`7ÉôžżYüϰGÜsâċD½3×µ‹?óşCU .ɌİgŽ€Wŝ…ġŜĥ!Èn {ö.5Du{Ĉżŝ†Ğ(AgGÊ>ñ=ÄA ‘[t1{cÔĊÛK£dġEÇ^VŽóù@tğşĊICE…•çx]zúċWç8‹c왑†ŠĵÏïŬşzñÜéS'Ož:}îâĠ¨ğŜ}ÎĞhè™ac8Ÿ5ŜùëËÓKA^s”…QTTiËU`ÏÁw|yl‘:•ĥŽŻc`#ÙÉ!b!ß'”:Q×ġüİŒíŞˆÚlßo’%ĜÜîĊ~úÚñÙ#م ÚÍûĵ˜ u}|ĉqy1žŽĜgà?# óñĤ¤ŬG„L>ŻĠ%ïò,¨—8EÂ7ĝéj a'ğuġ¸ Ŭ£)‹ëLÏ%=€—à šaׁŜŠ\FÌÇI{ġĥRàÄ]ዕÉĝ+ôÓÎt:y×§¸_§4ôŭRŞ8ŬU¸bÄ:*ُ~Ğ£eY9pC¨ċeÔcü%jŭİà óTnÏġÔ1âÛ){ġ–J9—™÷”ÜX ĊŜeù²³”}w“ıÜÖN ÄÊ–ÎnCvÌTZŠÜĊ>¨*ĵÂâA‚*r…Uë‰ĴlbFJËŬ`ĥm&żĉ×Ĵ%>cw—<ÒÉ}ï„?˜œ}n‹èGôór6ˆQ—Âʙê+F^”Àĉkĥda·ûmĜè‡@M@²FU²¸Mŭ5‰B†[žÔ2°Ñüka]1ò ûĜ8ƒq‡+Ŝ\ ˜§.‚˘4E3­g£?5 ÌpàɲgÇúÚjË ²>ĵIx÷(6öQܳ„7)Ÿ² ÊkÛúĈfٞpfšŠ>DŸŬêaĤHCQġyßT sħ™ĈaF"°ÛĴ½‘?Š1jŸl1"jú?nâ²*£aĞĝakğï&Lĥ½Ö ¤Àd1ßTĜ˖RĊ6äú#ôÛ糓Œ§÷ÜÎ#’⇺ÙÏ ‰kkĝŻI›Û˜7ä¤#™M+ÏZÖîĈ+(&âŻT?`wE,+gv ÛfK ċR·“ġEÎĵİÛ^ê3ü„·‹pXħħ܍”÷3.r•-êöSo½–ßì ΰÔ~µ%ücÔ²rj×ÁŠu–V÷ ï‘<Ê2Bd†˘‰%°$(û+ĠGà-QZ… WT‰mĊ÷PóÚU\™ÁħNäÇ ™^Ç/§ñLqϙ 1É²ĥ½µjĊK…˘ñšW™J%+Ú½s`z€^iK×גÊĴvA}G²ÊÂS …‚û§¤d.O ‡HHš½ŻC\Qˆ˙Úì´,Ϗ*ı³o=§…˜_ïÁޏk!ÚÇÊyïüdPŭy nċGa˘Án NdŸq’@¤†dôñ9íC*÷n‚W~LÑ:^ġ\7G´Îµâċ;%İ~yX ñ6ŝ Wuîϝ‘’žş,#6Ŭ,d˜‚gY‘ވşT3C)–°ˆRl+éî@‹N’2­_EŒÀ£…–NĞÔöÚÊŝ˜ô˘ñÏwRĝĠĦg6ĝCd#2]UÚó¨{­bUÂÁĜ*À@#q£ê+qeĜò JˆÑC2İêòşSüÇĈž{T¤î1‚9׊q }7¤½uy>cĞןIŬŽAX>ùs4štŒÛë´ ›[MUKFTâ°ĵw’V CŜ¤+p Ô/`%ĝtú!›GşV"ú‡·ĦÎż87ò~-^ĥĥržè‘ÖUc_"sX”I˳ùí!ÊD··ŒİóVÌzĦ•o'y!&@`~֋&¨"R×+Yĵ–gġH nğ[6…ÏT=Ŭ[3Iyŝž‡ù½lœÙû3áÜúyb€=êĜŻĜ}ñáûüšQ:Ĵ8p#=m-ÍMMÍ-m=@frĝ@‹áGğjòß?ĵ¸{…½@& bó֟KĝÙËÄÙ½ù÷Ì­ŒŬ§U3ĝTÙ]Ö‘ô6>káħ*Ż{H!ŞIƒxï3àZ›„T&߄ìÑż0aó_L1Ŝş•CÚùÙËIÊX s…_°êu$½'œÖ­$ƒW‚#Àâ_ĵ‡ŠR79żœÑmŭ•]#›żÁnÂQV>œݐĵ‡Ĵ$ßñƒ2 }£%ŞĤ­Í†ZuíĈ:M šsò%|Xqèl;5³_Ïvĉ9r“µP{èz‹a5—AüQ'r”ko9"ŜùùVEB÷M´°YŬe¸ŸÚh,úİMj;Zħ;üáÎÚiÄÓĜ:ñ²feú ÂIüá9 ô“ÎÒıc ²Ĥ½iLŬEüyéĝ/pZDĥΔ›J<ÇcĊL‹Ĝ§IN5ô$‡rN„¨N žäñÁ`²Q2Vâ,ÈĈsŬۏü†@ŞBèë•-bñhzĉİ èzù…[$ɋ§8Ù[ċs6áÏĵĈĉ§KĜ첋ö4˘Á÷ĝT~˜› "f³ïM ʽáo"*;yV¨hFOQBh€“6@âHbŞ‹ŽF>K/Ĵ…-žĊƒw& cvfjĤffŭ×?eÁ!Ħĥ0ŭYäрEŞ`š Ih;„&-˜3Xòìˆ@L)jâ#wc·ĵÙg#†È¸…ċOáïwiöËĜì’ÓĉD1ŻgxÓĠ9ı­ÙœİÄEdÉ-…ü^Ĝ<™~dĜb „*Pĝm÷\<{âQ‚%‘ƒǁݝ†§èˆFpÊHÇé5N¤Óì"SħXüı„iùÌVä xÚ ~/%?À/ @4ïé,İ…cıÒΓ s`='ÂO^]”›ËÄ×a§‰iÚÎàOïV\o—jû„ĈŽŞùáËtghMï„1 ÷@73ÖZm6á֌‘37D¨¨Nb+]´ĵVj ˙á͔vwĦĝ*N4" ‘ò·ƒ£<ÂMQŬşH\#‹êÈgÓżn‘GÍŻĥâÍçAtµûıUœĥ6k=&GYW‚—û İDLLE*Swu°c5)Á}3W¤äo³ë—!KjYábÚïñ7Ş2ħT ÙxĵŒN6ç4aé@Û|0G‚œe¨ÎT>“@ĵ ÌŜCühâ6†nç 8!h4g•xa·ĥÍÔÂ9ŝİÚrÑíĝV‰ş"Ħ³ÑÌ-Bv-í ËÌ n“D9P@Ŝ%ӗ€¤ĥIîĈĵ´FŸ#ÏÇ˙C ·šz W т=‹áï"@ ߋE!Ç´p~ i=ó“€n)“Îهìeeİjd³ Ğ‡ÛÜHgĜEĉ‘ĵ\3êEĥô ›}Â?[7µħ€~x`ö–*e[ïƒbó†ŬŞLZößŞNpnʰ-~cͤĴû‹í& …Ĵ,lž0A'5ÂËoŝE .Zx]âaPI˜ú†&U há½@ ßıhá@üW5]ètúúÓ÷ÙuíÓ´à?´p/ž>5Úß^WšŭŝéġÓA MUÁÊÓĊ;Ŝ+ …W&…ú-œĴâr8ħháċÑÑÂo‚Ċûu Ax^˜àçÂŬ˘˙˘…‹­H™a}ĉ’à;ÔCğĝߗ‘”CûÙol‡ĵĤmĠ[³‡ÎjÛD´üŒ2#ëßGÖEŞY./RĜĵˆ}†äÖ6ĵ=ÂÎÖPÍbíEöqÒe,´OÌġ¤€~‰Âö-ÜWäû°…^O¨?\á_NYŭ‡>uTË Û-ٖŠ$ôÉì<÷@eÄŜ Û¤2~›T\LŠœ5rċù]$tŠ·X½?pç#ßT*ĥlġO˜²żÚ´ç ĉK.šI^FOY2•-kÛŬ`Ĵœ?½šxż/´¨żužp ŝXÔò×ìndġ@ïJdÛXç d}ßhÁħ„£ }ƒĠ¸uÈĈżÚĦŜĜçyȂ/ĜïMĊ ƒĴWvu0ü–N·àĠ•Q›ğ}XġI]D-8LĴ{4yżWĝ@òf@ó;ÏŸĈ3Žƒ7RÂn˙ë&&6^ö(ÈHÒsÖ^|+àĊ3û+ŜßĜ·ÜZ]`Œ JkZ¸ĝî8˜–]RŬÔÑ;82>)MOMŽ öv4U—d§%F‡ŸĜáëbĦ)p “ÄĠ­—ïğñ˘œA#Uo/ÀÂċ‚•cÌĤ×ûíàw8ž1ˆOçŸw‰Áĉdĝñ^ùÉ#š{Àu›Ĵ†èžĴĈúîÚ ÊĞñ–ÓnƒäÁvŻXƒ)›~c_ ó>cŜ¨ŬW<Û]ŬÈş!­+q$ö­GVtŽmCVöĴFvÏŝ²}ŒÇÏkí_$tżN\=ŻlÜm+›=µD$•L8mf‘dŝàíĥ_²Ì'èl½Xi“ĵï!­_}1ï”PÑo‘C|W£ÙHÁÓ=² s²G6İM…Sjż˘/úċĥbŜÚħHJŸò ì­¸^jvŒüy@ÏzJ  ÙOJšu•-ë6Ömİ×2é¨ÓÖoì°P̟\J‰IàĞ•u‹gĥ ûYyş²Ipm™SÊ:ErúÍ8EĥùÉğ-ݏŭ°&nï›:+Şò€Ŭ²‘dĝ’Û´EHŭÖôĜUĦuxùFšÔzĵ|›uĠ7ÎhĴ=Aa÷OŜè *ĠŭQŜ~×Y˜luüÌÎàŽ^×Äċ4>Ŭ B%Ï“_îíÍV„d­üŜVƒ9v¸&#ú„˙ÁbâPM‹Ë6í?˙.#ğ°ĴŞĤN€pĴŻİ*+ÌÎxyv˙Ĥe ,4êPĜ8 ĝŸˆÎ¨wìpċÛ0+YxżĴ6ß(ƒz>‡z(Áò³ùi#‡Û·ìÑÎ`ĥ.9gEvێ÷=r§R=^Œò~îV Ĝǎr­˘Jl+Çë÷IÑ6–ë„TŽMßRÚÒÄ}iHÚĜÂ~ "zvŞo;ÑúŻ*y›÷ӆ|ŠñۉtŠU:ĠI²şyĴŭȖ™b]e€³ƒÊ1޲t2_Ñ˘£Q_ğDĞE׸ğLÖu6‰´_ yš²ÖĝL>†­“Ş/ĤÀV(÷ ħÚŜĜVıŝè×ZJĝ”Ú&$œÚĝù"r’m5{T¸&t‰mo:‹<Ÿ4µšyŠÜàx*4TJû°cP<˜ĝ’ıLôë¤Ğdö„ğĜ'°U<ĉ‡+À+Q§îvÑçX$Ù£§Ûƒtž›İ ‡€9Ġwxĵ˘Â3ìĞ ċÔtçzt~!öJ²·&\V"dd6ÀÍĵóĞÖU ‚{*gèşb~4‹›$É.ÏÇxĊûĠíCEzÖnMDnu|/>’ Ŝ8ŞĠáŒa|Şèš AHw͍|yv‹Xg&<Ïġż˜T1ÖïñĤ/ÂvzYiHŝñA˨Û-òŬ²ïÔëžĵL~Ÿ–ñù˗Ïiï“_>ypŭÒİ}[|ÙĞËü1MKjXyí {ñ£i,äƒIŭç ÄÔfë"uƒĤ4˙Ĉ]!‚ŠÏµ˘)|8°Ĵqİ#xoüj9DswStt,û‹ycÏ]Ȓ›@…yÛ1ş>ÄIu'h\f%X!ó>òšwÀ1hz$DB6|Ĥ/EïV8]ß9}Šbò{Ĥ żS•K€#€v&÷<É£ğǃ‰=uhïv}üÇ`mù$ĉ>‘-é:ùUtó%1EcĜ>ҕ žœÈÓ+ÓÉçHì´İ=û)ŻFĝ(wžû¤HËä#Œ„#iHz=ġ²ààŻ<ü Ié‘÷Ç·H6ĉ ÇVÉ5ŭŜ†&ŽZôċŠá>Vü)ô&L¸ÏßûÎyä ˙9Ġsô—žJ}+rœûIQ'u”!틈'Yes)aìR;ÒÑéÖÈò&(ñâxż<×",Ġ ]Q!ĈÍÏÀêi²§şıİ.íîçBR;KħîkĤˆĈħ*lÔĦ‹Ÿôí÷\EHĉ' èĵúğ^²ˆĴgì½ŭéÇ‹ÀG@gĥĤ…ù‹£$9еpqïe`ü钔ÛpĦ7 ĵT Em3{·e냏„œżyŭĈë‘—χ \·ÌÍŜL[Q‚ 4ƒ&P>¸RÒ5ÍÇà8‘şÖJŽ„Šû†µ2ÁN(B‘r<žŜg‘(OĝĵîÖóè'ÍI"÷Úñŝ'‹…A:€UÓ@LŻuc;„–~ĉ„k\RıŬ§diuXĈ|5bj•Šı"žżxq Wd4-GV´N%ٕ²(sËX'‰‹À…dċé(~âGĥÒsTô~zRŸó‚ğáşŸŸŒŬüóxp׈ĉöXŒĤ’ۄ6ɭŽ ċ5JnÁŭċ{RgPòKÀej}:<ŝFXÔŭqOCúeĦ²bòE–%:!­WĊ›w–ĝ ¤S³OĞħĤ^8TNµ`?7K7`9K‰Z7§&#U(ÍÜDc˘7N“vdtäĊ#žnâĈ%ÈĉĦ>?dp·z€Qf.ö(l×4äEşe %£żt5JÇ<@ĥ‰ËßΨ@,b9ƒĉú=…b~ĵĞè“Y}"žċyĞôĤ3,mXÉĊeB—醞¸ż:lŝÈ´úPG€6Ĵ5y1¤ĵß$Óŝ•x•=_o$ çz*µ‹­ûL,Ë[޸ÇÈ°ë—˘HžôüÑy›Ŝ]­Ê!W‘DŜ1d7ûĞ˘^1\SƒE†R/8 ƒĵpšEïş˜nŝFC<œĠ°YTÁE=8Ŭ/ñ“C³·Ô„6Ġ`™‹PƒSÓ͐ıqÓôÄùċ£uxs¨˘wĤë‹^ $ìĤXOqhĥ|Äşâ×Ğ£"ĥÇż 3ewÖh“ Šn'Sš™ĝLŬÛ³Ëo€´éÒ#3aġĉNv”ĤĈœÛıÜÑHE>܁L“QT×143×ÖÎŜŜÎvîSCuE1™€À2!İbä¸|çı˜ÔҎIĝ™,5•<˙ċgßÖÍàÌĉ”“nвöš;e3ĝ—ĥ"¨úúĝ.lò+4ĞÄ=cğ@rċ!,´ şĞ;fĵî¨2a~"}:n.böpzêş(ĞÙ$¤vkvè¤8tGRµžó+!ËXáâo4]ħëĵ Z8DŝBʰˆ …–b=ĊŻìŬÈ1^˘ˆëP–nMŻ|Áè|éĵĊ_]†F=ĊRŜĴX0=­+nQò䆣I#zóÙW‰_Ûe6ák”‡^ İ­ QÌġiwµâî“î{~n„‘“ŭ:ákğĴ?K¤ĵFr=vŒôiÈrü#y/~cŻÍsP(ë³Tİamî¤-‘·ÄÁñ"óğÛç‰<ÁoQœÛ—£GY?ÍDïbŸ´¤b°Ż†Â—YĠ팰f<żÌq+ÂÓ­Qïb<ËħNĉv’ Ş ÖPd€§_ı[†ĵĝ}ö7UdùğiNŜnUDk‹Ux̄(êUeUD¸J"òK"Kf‘ì0OeIû|z; ŸŞM>ğÂX‚@0Ĝsóce?˜ùoïÙırıĤœ…q‰",*!-+'Ż  /'+-!*L!à_)bršĉ VîĜîLı…?™×Ŝ=_$ôÁûy‰4ŻÉ\8W¨Xö•)8Œe‰`ÇĦ'ñ½ä–†CŸHǰġ’5ċ"ğpÙöŻ„ët3'(ĝ6~F_ôIïZıâû ôÀcB^9bÖÀ”R-ß‘Lk‹™8šÏgĜUŜ İ_Ê g_÷C·é& È/¸Ä{‰:gQŻħ_zÊÙ̝¨üú…{ŽèÑ9ä‡l›hġDwOtĴ@üş&‘çdâÉzÔ£c#Ghêħœ–ÍdŬ8v×~Q™3Ìçĉˆ;ÎÀ% ‚ë;ÖxÌ\¨œ·àu§´ƒs°­"šó7‡½,hƒÏ0oĥż.7ù~螵ĥFêrâAG˜$ˆD‰H$üŭ_É4q9u#[µ{Bï'çÖġÏò`ŭh+xĥyĤĴ*fĦok§0v××++u)¨ìüğùì†':DŠĊÁŒQĵ?i£*"çûrï|àB8?„àœ˘uŞoîĈܘqÖ;W‚ĈÎ;GÄü9sàŒŒèŝ.vœ.ys 'Vvddì(U/ϜC>4Ùċ‡Ĵè˜ĜzĥNlCü†:=ˆç¸_TĠżCÓ}'3[YïטÜżŽCGö1û‚>Ï t Ċ÷_Ï.Tnü%µv„İ9ĉpŒ™ħ°fFµ€Î`6‚\‘Gx< }ô<#ä—ïİĝ+4Ǣi²àĜPH9÷‡šJ‰µüó`"_ Òĝ[Áy&Eh3ï˘ĜPGĤ-†C€Nùĝ2ò-üĊĞۍ(h~]ĦĜU2Ž“œŞXçİĤYĜsÉëìşÈ˘r ìéĦßTŜÛĊ~bHXZ ¨Š)žíáĤ/"ޞëÀ`„ş<9ŭĈ–€·SܢŭšˆêŽŻ³ÜŞ0[АUHŝ,żí™żfı/ž@sb°B3ôğ– Çóİş÷aëĴäÉUÙÚ÷XtFu?s§ûêòŜ?şztÛ*w;S9IQŞ žŻoš‹óéŭĠÑÇ|­•İYŜj]Ĝûş)¸ld^ó3¤!6Á‰Íŝ쳤4üŸµñgóCĴ„(ĥaUÜÙŻ;TÍŭEÜİ·°x™fĤ/§ÂDÖqN•¸(ÛsVQPŭ,XJ0|ÂîÚ+Ĵ|“Ŝ³‡ċ‹uìë’*Ïħ,SêyV•é8£ÒŽr…÷MS!‹ ğuwıPà·ÈËĈËuà°˜ö´‘w°hÈ6 Ì8+ünTY0žçŻ•¨ĴÛü!Jaœ7ÈvĤĦÇ5Wàŝ*c÷ˆ%ù„g=È÷râŭpˆı˜1ÏS*~’/²í,@ónP˕ž°0ŭ§É½ä0ÑÔ5Ş[ HğLfJ¸MäÊÚÁ!@³|ԃú ;‡nš-§ŬtŬX<Ê5~††l<ŝZI1˙ -%èntMŸQÊĞòAœ²ñì…È‚LĵlQ÷ÁìĜ5‘ÉĈZÊ“—~áŬ4EôÚñړZˆŜéßû’ÙŝZ"}ğ*ċu݁ÇĴˆôA¤žÍĈéĠ-D˙¨Ĵn6>ӘħÙA…А¤í¸ò2·i„ ӍĊŸï…ÙħÖÛÙÎ6}e9YiiY9e8XĜ9{ŻŬq$ì^âçâĈxüs¤)÷ċ•‹Œ¤IUĊasDZ ÎîΊò7AD-WÓñáĴ³ È &Żáž—Şħ=}‚ßp͞,"p]ŭ>­‡hĴĊÛôӛCÜ/KÉòG[ħl€ş66û@—¸Ş Ï\€,„_‡âSĊ{Ş.rvş+@;‹×ŝ€'(*½Ĉe52ĝ×(óĈÖA-0SEĞ`vz{Fġ-ׄ#€lDĉ¤‹tÁ •zcĜR`Ë>Lú0jj1ñà¸j aŒ…û"ùgċ<ÓÌL#÷‰ċߑëJÛĝN6ÜCâP!|6Ĵşß!јK>Í[˘Òŭ|Û VÚĴìÊxŠžĊ÷’èRƒÖ*•ŬŞŭv²ÙÓË(Où'}ÜR– CKH×°—’ĤċP\7ҘphĥfñL³²gâ ÙâŝŜPhßàäYq…ëôÂJ×ĤĈ.)ĥĠc™cÌWóPŬ>Ŝu˘ÂppžNò˘ÑĵÇy' QÙuoĦÓŝÀSĠ |;€gž€“—’OäÏI|Şôŝc˜Ò2[•ÙĊÂé­ßnıh8ˆˆşŬê#wRŠÛĈX΃.ïïüôÄèˆ3wĝúx8;9ĜÚX[ÛĜ:89{ĝĝì€İ…·ëdQ“ĵñDÁß i4¢ë~úÂuÑyݘc7t™Xŭ6šÂħİkJÂúè×ÄÏNî2|³ œèYx5Ï&.¨™=DXÜ>²jċĤ’/ħk¤%C –R¸ûü§”eÓÙ²vŭŞŬ•*փRô$Òü,ú”áŞÜ\*ĥ;NŝÖ­²„wšœÛ(ħ_İ wŭÔ6ñC\'ŝ6ñëˆ? q÷J÷& i­âûĝîú“1ÈÛ5ĝuĞş³o"Î*ÍÒŜd<'Á{pÛxÎÙH~Ê "\‡VD07It~oó\É· 3Z;ñÛR,,nRÄ:Etn÷µK (PͲ°x5ñpfójt> ˜×7r_×V nÂ6Iœî“rÔġxÍ)êšl^”1,mx=œ µŽ”ñ&’}&§JĜÌ˘SsȔı!ıÓXOÒ#2Q{]t%+ĵıրo€˙µŻíLœÙû谏‰ !Ëşn:ó0­´}L0ĉǞlŻù™ù>ááÍ+çNÚğ{çömÛĥïܽ÷‰sWn>LxŸù³Ĥ}pfBĝÌħöÒ´‡g6ıʐ’Œ‰ÏáGıŬ­Ûż^ó‡çO3X{³p §WFŻÓ&’v$ġ`Óı!s)ä9§Š˜ì’S&)ßä ^ÙXÎÔm°Gġó²×PöÔàëİr'ğ9I6‚*HġZ˘Á nz˘9ŒJÍGW73ĊĠâħ,3PîÖ¸€½Á™xŠUd"ŝ³ü=ħä\o%ç6÷ÎMâC;î:!ˆ÷”ĵ‘óîç÷àüw÷ŜÒÍ*ΌGÈMĥğj÷7|žĠŜ"1“úîü}â­iHBŻô^. ú#ĤìCR}ÏĴßÔóL 'îqZe ċ$ÏCc=!œ³Hµ3‡‹Éû ÍörY÷Ù·B[xÑ`½ó‘ü™éózL2™ÈI°ĈWLoAvÑË-D`’xyDĜÄ"ĜĠ‹Ġ-ŒpI…;Ĵ–˘i2żj5ˆ³9ûĊ¤ŽwsŜ: fħÓ3Oĉ"†Qĵìµ˘´iŒÙ÷+D…\`W­I˘;ñ‘w›ĠİĊ7³ıġŭÔa(pÛóz&6œıZŜż+éͳÏob¨ż*LsÁLĜ’íçcÓJZ†£~|ÖôHOËï’ÜŻİɉÏbc˘£cbž'&§~Í-ùŬÒ32-˜Îµ”¤ĊžßäÏD˜°Şh˘ÀG<ۜ~ĊÏžżŝêȜaŒY˙|ŒŞû=Ĵç²ß\,…¨o~7‚w>ô%Y_mÄğ‰x?ËH[A]›Íˆ2Dĉ>™™Ž5CŜrşK‰íïä€B}u?ٔÂş£ ÎhY,ŞfGˆÁ¨$'))żˆZ”Ów![ĤAŝžw df*˜6ôΓHgú“ñƒÑÛàĉmz;ë.[Ŝi3ñ6î jN§ê"N8áŭ †ï$¤’vœëdÁë“:Ä6ED;ÑïUB3ş>ĝfıŜdäé˜ö"^9·EÎ;IÊÔ³™~EÁw N8¨Ô5iÎé/–s͗u,W5iˢo „ƒÏŜ½żĊ^ä1~ŸfW?ħ Ù2ÑìJ;BcRòk{Ĉġ‡‘ éħÁžĥĈÚŞŠ²’â⒲ŠŞÚĈĥžÁħé?A<ĈxOm~JLèè#²´ĦÇiu|l².ċüJCxŝك|vËë`+Áu´”Îïˆ_݆JzŜkĈ&?mSCTĥc°rv*ĦĤ—[ñĤs:ˆéġ~~ÎZañíżêí˘ÁMĜW7Ô,žÓµOD!bzĝ„¨Òmf[Á:o@=5 CÁËZ§ç˙Ĥ#ş6OlA6MÔÛÑîEì[úŬ)1x8a=KÎŞ£ÙDµ|A6ÔQ¸ŽfSŠDŞN<„jÚFo0ƒtó•kÉ%‡i=E’{ċ6>ş3BUßÑÄNQIŻ£„Okúâ+5'î5‰ïĈ–Ğ€d:ËQo8…pžï+UYݰˆ‡€½ÄDĤ·ĝ÷1'Ù˘áy29‹EŜ²·ĦWñÛ¤U“:*ßYÁÈNzħĦt"~_dNó8Ñaĉ q~뒰~ŝ^vjŞoÉì^à‚¸ĉß"ó>a5)ʗ†™/,‘ıÏf§b-#Xr6@5y†ġuƒ4Ñ&Şï}ä!B²:_rî3Ö’Éôa|<ûÜ)hÇNĉ|˙÷ˆĠÂM×ûäˊ.6ۑû4d­½†ÇŬXô›ŻĊµƒ˙™ù3òw(̃L vÔ}ĥßß}Ž:è ÂJŜÁ7>V qpfOŝ£ŭnDÔxmdv?ŸÛ•zbˆKm¤tRŜ9Ó<(m ïŒġ'˜žàM½_#I´żÛ‹UUG-ï ñ²ŭhâĴŬ-!²í7ž·L08*+v´Ÿ~CIôĝÈxˆˆZŻÂYŬ:{†jü{"§ü¨ü3-†´ĊÚħşıb/°pâʉr]µÂİ•äX¨ûħ;p;§ïb7ìwĤ=?ŝâVwcÁDILÉÁgÓĦ°{ñ³K~7wġLLÍÒLĝbg§&Fúğš—dŒżvh“ƒĦŒƒÁ<ˆħû֋ñùí "jú|#Aüônĉ@?¸+=ÄEÔĠ{’ğĝŒŠ¨Ċ҈˘ïÓNl<5P Qܔ:Ċ+?Ħ‡¨î…ŽöӅ$™í?ù=áúˆĠ£)ú ÄüñìĝuMÒŞ<ϝ“ÌŻñ#˜§`ĊnȒJ^Ĵ’dğĈÙ7ş]Ô8s€àÑ:Ù8Ŝ8z O˜S3ĥfBR–ŒVhkU ;‹§3VSÙë…^3WŠ|Ÿ§\Û â8-èàï¤ŝèRwaß@_Á€ÏebV‹älİĉx4!û7ċòĴŽ/Ĝ T™ˆŞÎBŸ*l—kŒ>A’T—bG(…2kà>dh5‘B:„"&Ò=¤‹ûÌ´›t [ġtëşĴäsĤW=„‘ÍŒU­BH-3 dñhQ³"öY²C%óŒUò3,ώ°{ĉ‚¸â]VÛV’ŝsnËvŠjÔÔD¤yġOĵr+,5xeey{<Α(·³ôá àuŽéÁ;£ŬEH!àg˙ċ&¨Ĵ~XÏáĥÄo3Ħ˘ n§Óğ8üáÂû6`÷–ĥXúşşÁœÑşoNot1V´÷İÒêĤŽŜë÷œşzïiRZf^qEum}CccC}muEq^fZÒÓ{WOíYïíhŞ.! +ğl<ŭè[Ŭ(Œ –ż]c! vr›Àû…|NWúi7”j²-…ËݸZ‘p‹ú vû ’ˆ{t'Ŝ Úc‡ì”#:Ĉ³3–S¤‚*ñXĥVâ?W“Ġ"'Ĥ˘T)Û[¸ÏġI[ÛXwĊ/Ìto!Ĝċaϔïñ Ĵ„Î0+ÈgÙEf˘Ñx˘ŒA!4ĥÓ µTs›aàĦŠéyĞ:]½ĈVCĤF-³biz")eÂÊp(Žkd )G°ŞIȓQċĝ&…Á§hVµPä„J ò˜Ħ¸Aı˜›1=œT[ĝßKê7XNşCİ0ş^ìgğúĵédÒ^ßĉo%'²VSß2ŭ„^ÀDh8~°™ 2ûÚa/ò ü‰¨MŭĜ:d/½|Ž-,CK*û¤+Ċ-u$ìŬG†àÓ7k‚ç‘&%¨ƒŭD_P Éó!èܞœydŽX?™ynjÂuġaÔòzŜpÑ•ßôi†[~Ĉœ(âzĞò§miÍ d×Çm2¤À’ÚÁĈĈJÏW&#"şû£³Ĉo(ä§GŸÚèfĤ"è“„Ġ ­]–ßq0$,òNÌgñ/_Ĉ?{s'2,äàŽġK]Ĵ Ġ¤…I‚~°Š™ÛĈSÑé‚v˜È³˘÷{èŠ dċùÁKÇ0vGj<ŠáĤ¸z6´7ëhĥ§AßpËU„h~Ĥœ;ói“ĵ.'RZ˜ĝTuâqOmaDHÉ. ìMY<şÙžŠ´˜3[—Xk Ĥ| Ġċ~ƒ9ö.KVĴ Ĝşcמàà=ğvl Xğb‰‹ŭh†0=¤m½d뙘´ŠžYxúÊŜ„Ĝ) !ÂڞÇЧpfKÊ xŝúŝfò{Ŝîü<{?á=ñkäẒ1ÍûuÚĈY3YôÔU˘TŻ·tz²;Iŝ@ÜsÄDĞb?²Ê•ÑÙ{Z$˙~Š%ş²ŠŸ¨/´g`2DXû5–5‡|dŞcâ×;ĵYĠ;°Ù9]cKğ‹żŸS3îK¸„=£ıôי(ċM-£ÄCAĉ6Dµc9~Â_Ĉl4Z˖rn!÷Y.j]߅ŽbòíYÄKlsz1÷7̄Z8cŠ£wÈĠ´aĤĴA‡(nTm]²9›t Ġ„0ŜRı† ‰Ġĵä>g…x~ż™nÛo(8ŭ”w*QšÓÙ`¨^:şĈNLXĥŞV#÷eĤ⽔ÖùĈż!büƒGBóŒ9‹\Üp‹ħD=Ħ=½ô(Ñ#ŭŒûZ¤u•xŽAçÖÄìSKÄOChJ™­ ûĜ Lŭë äàùŻ‚Ħ”´CV4‚ö–×½ĝXz6*µìô£]¨T·ÇƒXd[Œ.·UˆÉ̉sÄòéìÄ-‚W^ıޤuŸÑDT!ŠŜğGH/+vTK.ŠÀŭ;M‰;üĈ"7ĝßt¤Ħż.nZ6ğ bhİfDĜiĊYċŝĜ$Erz8€Ġœ3FċÖ˜à×]s¤’çœÌ›ċ·Ĉ­QEEíN| ·ÁŠÊ8?ݜ„J}öíŽ*„˘dò$ğiŒƒógûkĵ²}ĊB m  šŝ(HĤ„EDĊÄDE„i2Q$h Ú WlıŭúGm˙,çŒ5e? ñ³Q‚oİâ¸ŭN6t&+Ÿ;È T P†Ä†>Ÿ°EU×ĵògóNÎ!SŻĠñ'>Ĵ—CÏĠ`HR›~pGÚ˘gÛħÂġÂğëB_Á@Ö%À2è[ü›Y7eeos~/FÖvN"ٗħ/Q-²Ï‘4@%`Ût…DԂ}FjÍd?³ĥ£aĝmâFVŠˆç4g&ż‰y3’ÈÛħä:oh~·–ŭÌ 4‚ğXëé,ÏCs8y>˘ß,ߟ€d4.ÌjŻÁ‘áğ¤_ßÑçŭ²;1w£Ù(âñ]ĝj…Îݤs<ĊÖáíĝ҇ñı:]?%}X/ˆ{áûŸ…ĥp(ëñ‘WÏd+˜Ö÷ğÒâ’ÏP“½üı¤Q!ûѵij7ş¸iú0Éî'B,ŻÔYYωV912yQF"dhöĥÙżÏ]JÔĵ6Â|ċ€êErazRlüÎïnóÛVEDï04Ù ›…ç_ŭĊĉÔŜ]Ş€ˆ;ŝ½ Ş‡›L„ ĞmÑ?‡ùü‘²Ç}àŠŠi/Ür1>·yŒ mÑÖ²Ï0îäçá`Ĥ£,#íxä‚Ĥ° / Ż ˆË(ë˜9xĝA€àsYë(´ĜcÍıñ·,Ôƒ„¸ĦÏñe#|ŝÏèmVˆ°ÉĤ‡U úvÚIQXz·–ŝuu0Ñô¸à'=Ĵ‡(nŭĈd|Ùoñcx›!í¸êw0RuxĊıĤI\š‹Wú“5nÏ…HÈ\œ9!˘Íİ_‰¸”òbdîñ~ڑO7-FwO5ıOħ $Ÿ„훆|Hx͵żŜT!{f5ùÄ3.˙y gáş—ĝ‚ċ#ù³KgîĝÒ|ğpAĞ˘ïék§Âj|—xbÔĴ‘;ĥSĥ˙9úŭéî0’UAş?‡W3ĉ…˙éŜEßiıpĦŻŬÀpĜG‚n rm£† è<‘%á:s½5z \L}Š…B7°PKċç\V)üϚhx­ُÚb‘œ_.膙sşKÇïĝgdŝ7ĵÜn3‘ ü‘ y²to™!z ¨=è…ş·‹¨çOĤo×D–üĠ \,‹H.8›9ŒÑÇmûÓ \ù§Ĝ”~íïn á˘í—r˙t!ïYò%ñŝ£;ÖùÀvo¤£Ħ˘(˙W7P^QECÇ>ëv½t?ñK $I˙ts.o_dĝw7Zzӟn`äş?ŬÀmqżéĜpĉÙ’ˆìâżşK¤PÍíé“üú;ȇ ş!zˆöİZhè!f· èC–‡ĝËwèFŽÏ@Ä·˙6qŝwD–ŝéž›éڀşüâDŠi„t(t_Èj~ĉGPĦÄr„óMEĞşĦĜSĜÊ5ġjza2gÜU"kÂY ÔhĴU†n iÜ> şŻk­Ħ·èŬ˙é^`šıÂ`ä>İ" 9'X<PŝA¸3­ïĈ?B+ŻŜ‹­•iüI Â÷P÷êĜLƒI“è#ŽŻgH†ċ8ÊäŽğ‹}doáWàĝ]Iż|r-z†—Ĥ ŭƒ}’àĠ=¸ o÷F·ôC|šèĦÂşoñ,kÂĉF”œDèÄĜY)İ3³w4IkÊ—"ċ^nşQù„(ƒdÈYŸ×JçÇôWçBĜêI/ŜŸ°FQ\ŭ¤O/ì*-è‚’|ü[)„˘ís6açô==Ì zABr&۞gŝ4÷1Îô`kenÚ˘ȣ{ĥ÷]ĥÄÍĊÙÙĊÍcÉ2ßġ[÷ Œy™–[Ù:8 B!ĝùž+8’Ù°ÏœGÚOz7i£Óû²˘ŭl”Y‹í(§áğéOß!ü€ĈÈS°ûŸC !¤1.T̀1ȍn–íLGoÑ­,gcgӖĤI„sĜzтnĞÑ/´ġÚMĦ°äœBÂ0Ĝâ„VL¨èWŽúÀDH u~{˙âŜguEè_ˆċ"EŒ³°‡²JÏħlKRèAŠŝA49ÏZ€Ì˙Šŭò#iܘœo€:ä \ÑBfg_ıdƒ H’\›6)< (nü0޵>€V€úF@„Lç_p–‚:ìéÏÀihI>²@ž@TZ¸ïIÉ0›j̸äĤ'AD(òĈî[ÎÄ|,n´xıŒ‰ŝÖßĊÙéÉ/Ŭğqùâùó/Gܸ÷èErzvñïÖŝ 4xôá–â1gĥ¸ËS˘„ž[͌Ĉ)Œ7\òdßB%"A~Á‘dhAô> Ġh)ç ùž|µQšZħñí…œ™´µ’$ÇH‡ɒ\^ÍÎ&8˘ZW8Qƒûӓ74H~ż°Żó‘Yx³ ĝFŸrpt8˜d™=W’}ˆe‹DòrŒÄ ³Ĥ¨ŝ™w¸¤ż}>5ĉA|F+ġU ĤVĊÁ x§`KŭÓĴ]Oû2jÓ] ş;GH7µœ~†ÄÌZZÑoĦ靲›!>4*Ħi h#RE?a$HĠ_­ GÀ¤¨ •ÖHlÂ÷Q ”´ñıÌ΂I•’hĥÄ˘İŻânyòsğŒT 'WïÂL0K,$_aׄ\چV£Á³ĠöBçÙ%ÖBgµîèĉŝħŭd‹ŻĝG20bHK]œ=+%yj€£GXš‡Wo–:„úÓÄKħŽ şˆiD^˘‹h*áN ÎTşûıż#ˆôv¤ô“ıĦá“t:‘Öƒ²Üı%Àm² ˆüÒF‡v`ù›°Nš‚p°Œŝ|żƒ×^|-oħ–şŞ²Ÿı9?~ääü,ĞŞkéû:ÀPkù××úÍ˙“,ÓtÚöĤšôĥ/‘–R@˘XrŠĵž´N’° …ĉNâŭ);ô" "~sé?öë N­SܒCšˆnH-ŜaŠè^èÀJĊiŝ…XÓ)á-ĠxŜR‚^ cà”¤ÔÙщ‹RÒW€C6ùˆµ ïëߌş×2ÎY—°Ï ÙWϣЇÚ\„aŻ$-€ĊsÁwÉ+& UŒşçÊçğ‰Z$‘=ê Tñ­Ħ,áŝeä1c!r)ûM5 Y• ç#ŞĞq?UúYDx4 … İġ”ĞÓÚĞp_ñhôk³DĥRı÷#ɚŻ;ôÊ_-ùĞAÑ˘F§ñ} Z*–5>_ĥ`ÄIúǤ§p2gzżCZ9QĤ§œ Ħµí³%FR/ñ˘ĉ%0Ͱnö0Ñéû²ˆŜGüƒġÄdÉä=.‰uFÒħÚÍ‹CÌxè<zl…Döóó6JP<“ĤٙesݵÀíbQ’ċı2Ğĝœ •dœ:„Oä„:K#âv‡Ŝĥqĝّ~@n é,9žPÙPzgŝ³3ŝš "IhXyn9q#ŝsqC€šAL0ĈB7zAS3tĉŸf&àGô4Žżqb‹§•D‹‰bšŝgžċwÒ!Zžp|‰ôü"³ĝœĥ·‡ìÄiçœ |(5Ĝ˜Dµ9WÌ┝³$‰.†1ĉ–ks 23ÙÓIž‰yüŝHÄêñ”àş;7ž9tQ‰²ıKw@œ!(탲Ĝ{RP˙ä ŞÁü£žÈeö/'âáÙş…°(1}€ż”2*™ŬÌLe½²‰•¤;ĝet'YĜsò‡´ÓHìüñ,ħ€ڇŸ†(Ÿğb/ÉĠüP‡!ŬùĴHôcŻòJ,H˘ù+=á‹ŻÒžJİOE™swlƒÊĜH ç.0žı‡ĤôĞyòÎ3ú´ĉ3c›ˆ™4ĞΛú(´… = ŽœgñH4ˆûZĜm¸Ú@9‡=Ċû ˆÈÖİZ;8~ӕ|ˆ}ÒżĊ+²%ÁKH°ËĊ’ „ˆËÒRçÇÇ/ʊ"#ŜuŝÄo;,MÛPŒ7’'{~`M>Ÿ•àBŜèswŞâçCX5{!êü¨:ŜlÎ1 2ÙìÀ§a|"÷‚Ğ,"jµ;Ħž—>Üa+Ñ=Û-QšTﲇĝ;jCKR^zvŜ[ކ?z›YR×>06ŭ§û÷wHt§ÇÚëJ2ß> ?şĊÛNO'#Kj;ú‡<ÎT<µĊ‡2ĥ;–ŽŒú„ŬV˘ˆĴë…Ü |ĝÓ32ÙâXÎ,Ż.j>UÈŝZ#6ô|ħ0Ġŭù(ŻjÁóŸO²>x’ċ5âĊh҇ÛĝŸœQóx¤ˆÊ^?/%}YÀ‡0HĈr큰\’l‹x·Äu>a%użÁvj+ÔXJ e?NĦ›é9ÊĠnÂŻıAh䟇;@r‹żEèÔ<Ġfsqn"1ÌùZ}ÄsO.ĴŬƒûɵĉĈ6ˆWÔËysn#7ıĞĊrÌtÛĞ”ìGrœÇs¤‡ÊԌğl3X‚ Z‚¨}k˙b M’Ġ˃K GÛĜfdu/äB|ZfŽ YdaŻ4EÎM÷lƒ~0V´™˙/D-“¸mûÄ$µó2Ü ZW¸ŸĵȲ{ĞÖP]ÄèR ŜnA HŸá–”¸gLÌŜï7&Qĉ‚dĜdŝċĊ (ÍdóŠ)¨Î$€fˆçá¸ünΛhúwv£‹‘ĵ ("Żk½È/èdxtÂÇÌŠÚĉöîÁĦĝìëno­(Ìü˜~2Èo‘µĵˆ (oä²ñlÜ÷Ĥ ÎèΏ;ì Ċ`Š†Ç‰$¨9MU<ÜlBC_Ο„lĜĦ9’ñ~È,´Ċ@zÁäT)w&=@–`ŜŠ·\2BtC[ñ޽²dŻO܁ĞZ÷ ^û!Iħ}mÜ$KÔ³˙>YT„A7x[Ïô9ÍWX–…ñ™H†ôF6µyÀE0O2—äĊŭ­ö˘ ‚8&+CÚĥĞÑX­lÈQ:gÜY*wÄ^İŞ]×l Wl5÷&r›-W_!;,”Ó*ç‡ï­)$ŸgÛX³OÓj3Ñgŭ0>grÒŠ¤´ŠžàÌu€·w>)Œ=ÏdêòlÒÌrú9r³DZĈ‹‹B‘™„ˆÏSn’Y“‹ÄÒO‹@ñŜˆÍëê˜'ò "ğw÷xCıßud^à/ċUŜÂ\£ü,Ӕrr7ŽpÚÓ7!'~jx:J™ş½‰Ÿê„XĈ3‡#4 n97áÂ4ÈËß"Er†v`ÉA Dë@!›ŝ}"ğêy>šĥېoÀ§!|ş8rİ*Ĵ³:*oòš™‘ŝ’ ²Z{ŝuù çN4çÄ_òħ֔˘@0Œ&­fdërs‰‹Q÷âߤ|LÏĝü9#ŭcʛĝ¸ûQOo^énk¤&MƒpEJÓÚ'èr|Nóg ”ż>ż²Ħ¨¤…d&$Q‡ó˘Vë ŞK#‹§ñĦOü‰†ğÓFñžçĞd­=ßéìÂZˆĈÁh:“¤ĥäóárk|s˜óэ 1̌·DœRùMÛİÊQÓ§Äċ"fû Fŝ+p„ëòôIŠi&öD&jßŞÈżÄ_Èè|ç†={şŬÉQĝ3‘y]óÄŜ!' ïÇH[4™%é6ġĝñ0pe{ÉT×Ë/á\GžO[šM>CM™Ìc‡‘ò*ï0—sB´5Im \Cʉ1CrğĝŽÖœ" ™p4ZÀ=KÎm’öÇ÷SóڔBò€í)WŬ¨2o2•Sá‡ñRE˘ûĝq€İû.gÑÜ$šÈ?‡úOŭ2‘z]§8 WF:—˘;Ĉ!g|/Ñ&{Ħ.z~Ĥg;Éĝ-VĉÚĤcż|‰úħŒĦó ”€ ĵ,€& sUğ…ĵ>0§_ı IĴṳ̂gĴ—!˜_¨âÚFeDy›|èN}x¤B%¸â ¤²ĝ\:tƒĈ+žtU§"T5§Àˆ÷•ƒ,œ7Ġ“Ç7¸[¨KA‡êĵRJÚ&6 <–ûm ŜĦƒûƒƒ7ú-÷X`c˘­$%¨¨RêîŽĝÒWÑĜú~LÙo€Yp tƒjĥ˜ ¸/ŝ÷4ÎlˈX/ĝ Šê¸íşŝħr’}ôŝšĴĝk‡Öğ[iˋ AX “VPÑÔ5463·°0736ÔĠTQŝ+F“×ĥr_èZ|V d 0ĉ@ċÇëğÜtDKËúˆŒ6&>ŭ;~Di&[âjĦ”lÏGÊ >ü~Ğ"ëûroğí$$ìùb„_~H 1ƒlhùnYòâìÑ;Ĉˆmsĉà—àD½GĴî}4•;ÌÎíd£·X#êß1s–fŽżV“ŽÁé‹ŬÒ5ä^CƒŬ² .÷²~¨iäÓ7A7ĝĊg˘PÙ´ĠTíWżµRyŻ…F}“ĤċPĥˆwıtuô*>D<ûÔXp~½ĜnÌ[k"ÍŞ…2 ½ñúL„·Saô.ݤxgBÊIJ]İèŭ3;ĉM4iÔÀžŝıÍö„=@v1ÜNàÇn€PӗšÌò'?áï‡lê=’ïT‘†fÑLz’›¨†S?€B÷ÎÀ öLbtóÀÈ.˘]` 8;Ġ]x^­Á$‘Û²ƒŞMħhÔíŻë´É+b&ˆax7öë€ j{hÇM{²è’§ŭx÷#oITggê(0ùv’ÈĈ;^µó8­Ż÷Ú ³ëoĉĝ£ñǽôĦr+iàħ;*şy8g /˘Žo]6ßB[QRXz>ĠÉBEŝğ*Œ…„%µ-ĉ/Ûz<ê '}„žò”¨Ŭ’P[Ö÷:_1Êç äŜ\/(>Ûî}ŬÊᵿÚaL&îváhêNTÒûQ7Ŝ˙t‰(ÙŝfœaÌQ•ż°îpCÄ‚^$Ó]ĵOn¨A4´A•¨;Z¸‰&żZ^<ôÈúž @q×ÈÀftq#̅/Ĵ›Ù ¨~ÈÛħs´Óı'р™"M˘)_Ò=Ègïç?!û³’İÌ$Ħn z Ĉ‚Ÿ0ËÂàɍ<˘ÛŒ&Ħ7™vf3÷ÑÔ.(,ל¸C,,!ŬUĜÉC? Ÿê¨—èúK I÷‰V÷ĈÒJ‹oĊwъZ=Ħ­x—#UÖe5ò]t9t·p ÒHBĞéŸ%zëM³éĦ6™(>·~táçğ–|ŝ@Ì8{•fžÇğ%h~ԇ% 3€`—§Í!ÀTĝ3=R@=?ĊYÁoÚ-&ħ·Ë[M‘ÙW‹˙Ŝ/OtN˜b}Y+Nv~<„Ġ„š˘2k’FħÖ{îb½Ŭé£ĝà‡Ŭ[×Û(Ȇġ~: IM’Ú␷ ³0¸Ÿ}w—³Ĥ0B”2pßqċU~‹ àÇŞìä˜ËG}9XèCâ€TŠ„(T B^TßÂa‘oà‘Ë1ÉÙU–´Zò_]Ùán ED„5wŬ͆ĜÁlېĊj$È£žŝÔ+H†ĞŒlĵû >š[ ĉ~ŻMZ#ƒš†Ö`CÉâkż°Ĥœ‰òûµûd(Ğó°Ĉ½bğ›ĝ ›~}Iï̅ĉ¤áÙv„€NXô? š·xyĉ´ĞÜ!‘ûC²Á‚Ànéž$äuŸ‚?QĤŭ1 Ò$İÄs0Z]+·ö“ËĜVÊÇİ…re=fšġmFZµ]s‹Ĉ=€·8ŻĊìÛz\)wñ8Ñıżá²e˘É™tŠYaGaÔş&pxÑ6{M;66zLXġĞc'Eŭ.}´ŒèŜV^ÚBÔèÁÔ,L…ßĉ|YA[˙3ŝÂUHlUÊ4·è 6˘´jì 7œE{à JÛoA%¨Ż÷ €½ùQĠ“šx/HħSġİ›çİÒ ‚§³ àtlFeÏ(ĦSŭÍeYïân^8şkj÷6VsÌÍÌÌçXÙ8,p÷Y½q×Ñ 7Ŝe•5÷O-”3ĠS™{:`TiŞó6G¤ÖO+6˙^à\)È"úFċĤĝ×½ĠêŞĊŝ´!xŝ{ "Î7 għE Ñ>XĝNY%&äúbœó}½uĊÎ-c˜ Ÿz`„.Lµî•9=HżĞNÙÙÁz *|ltìMû–kKÜ5  @·ZFقyŠäÜgŞ­SżçŠĈáw)=möb݁Ìħ8qEŠsşjµŒÚê5ÍzÊäN}¤lĊ.ım‰\m5L…ž#Ĥ@Ê+Œĝ­]f#îŻ÷Iê‘Ŝ-ËŬGĵÌ3ÂMéVĜˆûŞ FO9²˘äQÛÙ§H$l&żšTœĤߑwÂĜá%8kÀ:Ìœ€ÛçmâĈWY‹Ĥ>Úc¸xö·/„³Ê[yÍ/ükTĞRv¨Ü"îuq­wxš!ġä$P,ÒñŽzR­7 o‚a,}4BèĊïLà˘Ĵ‰Çv¨Ê‘jĵ+ʕߖ Sá۔Ŭ ïµù"À‰LĈGż³A—\̓|Ĉï§A֒EË+$İfT&‡m°‡XATÍzypxBÖïžÉ?ĜİÁöZ@½y}3üâÙSǏ9rôĝݳoF?} ÚöÁİ?LÙɞßY áÁË­Ġ6)Ĵjż!,ır„‡MÖ$…xiQIë §ż!‘’wu‰"*bsìë(>œ ¤H‘ù×DŬˆò6˜ ÏŜ&ZFuáĠGTPğÇĴ‹ĜÁÏò&ŞGŒÒc Ŝ D­SáPŒ8ŝÀÓ-˘3y’j˜†żÓżÎ-š+Ê.µ˘^є §ë…íŭžpxLsék²ŭÊĜíĝ³p¸‰ ˘‡ñK0²ğ“ünÚIéŬ"‘§³ĥ£ÉhËÑhò1òzPĊߨŬàô&á3Lƒ¸”‚cĦ(H|0Ĥş÷UÁñ~@í.û-ßKÍíTsf>B× ²iŻTœ7ö]Âcú‹¸ûDœuw½ĦjÑärò=<Š´bì·ıäk¸İşĥù˘ûfĞì„.pŠç eÖ¸$lt/iÎ7üƒ1yWßÌeié°ÉÑ3’’!ƒ‚žĝ²ĵj3Mú`3V°Ž&ħ­ k?݃˜Ev5'u Tʛ|Y+½9tnuĝ|(ïLàdÎ9èIÍ?™àδ3ž‚R°ĠĈ¨Ż‚R@Ù닎š˘„"k°`íĦȄom³‚R° ßÙ\[YZ”Ÿó#;ûGN~Qiemsg˙ßÁÎìp[Ċ·„ÈCkÈR‚¨ĤcÀĊ×e‚RרV‚R°ç™´N(§ŸœĠèçr&¸JÁóĞıôœzd{?É+…`ÎÉĵ;Ò Ñ9ߎ•m“ ­+ÀšJÓ6Wá˳ƒ!’’gF'¤/Ïôí"ÀżÍ!íL˜[ óĴÜbÎ!ğŞÙ}¨ïPğ+TY^Kš˙[AŠÂOİÖw[ËċN¸‹™öĝ>6Oħ²]ÇbèğzŝäÓY­3—şß,ŝğLx?@‚ú!ö1˘ĉ‹ŻU{@„@Ĵ üĤMä,p  4ô ù%ğ _/ {Àî"µŜŻ€Ú$ZÔî8ó O@;–TÖ¤ğxĈìJÊsX˙Ï`)ž@0OG7LV[Š=ŸIär/ Ùŭ‚™Pë"îM  E5'ï†n"˙—â/Z‚Z&óZv‹ÈœêáĵuDŒïO0^ÎCµ.tO†‚ŸsóƒQÓ µĜHâ I‚ÑÑ|·Jèl}ӃÍGx* Âs‚â˜ĜĜÏ{›çH BŞ.ûb zY‹-KşèŞÑ‚ˆ’‰³ßKħ)9•m“Œż‚@ŠdÒggàk–ÎNä_ħĈä@[eNJì}~Î&J"P ”Öw ĵ’T XVoAì>U!TbÎĉ{?Ç0fC|aTÁ3˘xëy³UGüиŒü£Fɉ#XíST1(Ÿ;ŝ Ĉ‚zÓyA ÷’1qßq|Ëé9%#²ğ…—l‰.)Âż: >żĝ‰ş@PÙO6˙ Ĵh‰›Ü"h¤0Ù ]äĉH>‰YVOn@sӀÍ?ìIyŒ]à9eċl†¸ûd|,Óh~ <ĵŻgĠۋD7ĉëkŻÚ"îĜdכdğ> φ”%†Œ³"moDcĥp.÷€dW’Ü-ˆŻP…= vÊĎq‰Ÿœc6ŝ–pÛBû>`l ³FPj†ú]ôŒîâ=òÍ”µj박xÇ;Šì`äi*ᆖöw/!c–ÏXÏıB§ġ‹‘ġŬ` 0Hcşĥ‘ĞA îfFk‘j1 Ĥ[@NâŞ:qq·ïŠ6j7ÁJ_NY•Ĉ`¤Ż•$ÚB2¤_a5=QÈäV_H˜Ĉ†çm<Öïk4‰pżU4Ԏw§<Sûú>'KĜ0È[ùáĈVŞ˘D¨*8ĝl9r%ĉġç‚ÊĤ.H Ê˙<ž€É êjŞ,ĝü:ĉʑ-> P $ŠŞZ-ŬwC%Œ³JâOúè ’ž§Ŝdĵè” ˆšküfñڞoLĜµj.³„İ wŬı[˘äÚt#m•my:k"ÎĠÒÇÍXLTż I àäcµd­hfw0U#–Ó¸…#rŠĜş×#‹ë§…ĉŝTÜrĉ1’îŝ„îeÍ<Ĉä(mG›•lĉĴŻS`EßĈ wŬ͘Ü36ĝNۂ&ĵ7›3ÜeĜ™LĊ ş”éNF’ş$pç.Ä6h ۔'“!%7¤€ŻP}ŠĵTóĉä7Éúb!ÄÔa£9ïIñ#ėŒĊRE}ĉš Íş-Mú:µ°úü˜^IŽdé&zšf>ühğ? _ ’ …ìP²ŭ/Ö9Še˙ŒÒ~=as×L˜„Âmf{ Yï§uA“Qê$ß"˜ ‘ĜġŻÚ%EYö‰=ñԉ(ğ=Ÿ;ñÊK˜²A7Ŝ³H”h~²Á­ıá.‰(݊İp[ĥ›RQyאÔNw=Ĝn+G$H™Ż>÷Ş ^îh}ĉ³›\M…]Ş”šÉĵ%ŝğO\óäuê·ÜŸċ•5uġ ġu5•ċ?sż~rçò‰ŬŝKĉ™¨ ŠFDE×Mgg֏&l ìĠıĠĉR˘œíöE€$ëL q•GİĤÛZıœş˜Uʈ¤û.£¤9QtQLŜŭ`!EĜëĠ7ğ,ÑééûÓ2ŠÔ*ü÷. ˜ -ò%İGM$ˆş£•óLĜÎĵ­ 6Óµ™`ŸÏ˘$ó€ŸgI9ÇúeOe r6/ 2vÇ,_S- ñòÊé°×êè7µè67hš÷I-fĵ$Á’ŜÏ1N%†`²Mù”£|oµÁ7ÈÓQġx€âPäÉ×'”·&*í%léQÂĥJ6ĉ˙M  ü_JĜjn´C…Ü"ċ½˙Ħ„½ĝ_JĜħ˙/%ìâ˙P–ŭ‡ĥäoJĜò˙P &ŭ‡ĉú§(a;ŝ›ĉüż”0˘ù Áèġ=ï?”°²˙˘„Ċ (améa&(a~SÂf”°cŝ.ĤĉÂ˙‹ĥçİKáQÂÂ/…ž:ĵç˙PÂTL]ü (a@šúC óûC 3ñ K‡RdEìQÂÊŝPÂĵaR‰žB0ĵŝJ˜óSÂv%ì˟V€ë_”0ˆĊ˙M [ŝ7%lÉ(aËŝ‡vñ˙K ;öż”°˙C ğכ0p ĉŝ_JX Èߔ°üFÉ­˙E [ú‡™<À<;Ŝ^)Ĝ’zċ6âkzRћ [“‰xäŒwĉÒĥÂbò‚á.÷Ğ]ÇĴï§ÌB˜sŸÌ–vĴ0¨ësOáîGqSeM*'× GG. ż/jZç›RÎešñ7셲Ô-Nµ'âS}¨*rbt"TRúüĜd¸"mG?}bGżm€:ĵ¤O=ħC5ÎUé„b|İïĵiG[š0„·Ŝ‘{Ó£93üÖGʈĝüs?ĈñÉÂkËԈ$5ŻóŸ:ÙĜDĠËc‚Ž à<֜~–×u\öhsAò­›<Ħ# …@ •RPÓ14·²[Ì…àħµ27ÔQSŒ„DĦoäıéÄ­ä‚ĉQ+žjË{vz:}Ĥc/Ğ&0vç§ó^j$˘Ú²k…“ĝĝsóĊeßG­ü™œ£Ĥüï´âC KĊHv7;ñĉKĈˆĈ `^Ñ@ížLÑ_: ·ÇéqfȂt~ÓšbĝäĜyiɉÑ"Şı5>ˆg5疔ò ì›1í2§ÔN҅Ĥ˘÷ñ$9ÀoĴŸĴ4‘MċBösSÄûê 4*³'Ŭa&lĦÌÏ>3ö_rqoÁŜ…u ‰Ÿ0ħeÜDS{ÖâċzáAwIíċÙÍ@À‚mT›ş&Tŭ}4˘ĵ_Ğ4y2Ħż€sĝ˘šÎĴ;H,cĦjkݤ`ÒĝÍĴ^HĜ5Ú·]XĈ{¨$y…Ŝı•dòĞXŠZ½ç¨´´ZDïlĴċ°,eU6żçŠbq{€_´StŬxÓK‚äŞ×#XËWQ˘ÑÌIĴûċF-Ġr˙‡^>ĞîÙ6sDÜjےQ>w 0vŻ+”H²ĉËßϨÎs¤ıè°}ë÷óááÓġ[çµXʜŒyûŝ˙oFAî;ç÷ñm²GDÌ6=Ża ş“v[‘´Ö½îÄĈżï1"‹şßn†–K’,.4à =‘ßR è‹6Gô.t ²–Óe÷7aYd­ÀÇS‡C Ñġ+Ċ>ÌĦ„ĥO_TzÄ/vECz†·‘\+gŽSíÊX‡É [€ħƒġÛPĉƒà4)`ĴT_) JÙWñËh/^hét²¨ÏÔĈ Nì‚N~ɢfUWÖcä6ÛM¨żçı.úcħÈÛ>%x‘•‡ž ?*iW&ÔÖaH]AŠ<;màm Ŝ/5|ħ}Œ?UâpF~³â"X߁fHqĞĥċ`€İ!€Ĥċ½¤­˜ŝ&ċ]GL!(x#nY3ĵ= “@x<3Êá]"&Ñš/úŒÈɎuÄ$0y.)ŸI`  )‚†í³“À_€ ÛY…WA„Ŝġċ'}ĠġI?1 $IŻ|7“À…˙™nŸC£è‹­eƒĥ!r1 ôŒĝ×$n8LQ²¤Çĉ ñÄ$8£Ä$ÂÁMžŽĉúJ²’˙ç$PRVICßÜÑ3`ÓÁ Ä$°c”ĝ·`a³‡$…I`ĝŬM#<‰Ià‚HPU°kc×éRhsĥ˙g¸&CïVJ“ˆI`˙WŞÄštÎÄKW(;À_m'@Â~ÍN·7RŽ‘(¨ùgHš›LL×uLF2ôżà4%˘‰Ià%^Ž‘ĜxÀ$(şb¸ĈRü2¸Ä$°ÛAêÛô ÚK^(ÜC¨ñ€ Ì´Ô†I L£ı‹›óáĝñŞ?Œ}˜ŻLá1żó6˜>Kp"“ê+cÊĦ˜‹÷€Xs2 ħqlĞxí/Z„ÀWı#“z['úğYĊm:A;)oX^’ı6JŬ*PJ—Μ„Pìà~‘6Żô\ÂŜHMï °VŜP ­q#ïa6û!k:Ç÷SMżáŸİ;ú&NIÈ_êŬ'˘pi|ô‚˘Ph5–éEÒ:Ä~ë„jŸïäĴcĝ™ü°˜Áz5"(?f ŽwCxëCITcpÂĈ~DĜ‹˘Š>—óÇpfɃġ&Ó³ĉJz;gµfŜÙîĦK8£Tm–ïğùĦ iĝ |Ö0¨R]‹:°mŭŞ%^î.ó¨Ày.î^KV­ßv êÚ£„T ³ĝÀn*ĝpsßrb $ĤëħŭNf+ g·§_Yb“ġJ˜ĝXŝeETÔ>¤A½ï6h ’>[ñĦw 81:V.yċĊ`,ŝ09óĠŸ!ĥ6G}^uzËşŞMòÊÄŞC…/ŒŽ_RÙ×;uU^âÔDߪñgü›)u˙xçÄŻ™ı‡ìVġPoşĥcşÈXê v‰â;Xk.ŭ…ğ‰À ’9™) ¨‹îR%›\I/ÖèGĤŬTš‹P3;”}´_µâ[1(~Ä"ÉÍb¸V.X¨òĜÄ^Ş °˘ò­ ]olĞdwò%ĥƒ1ĴLÛé~Ħġ̏>#“lÑċ@‹ßƒ]„ àiä8”’·ñ^3ĵFе4˙ŒŻ _Ğ‹ÚÖ.#ċfëÈĵĈct îˆë$qвùö¤M0ċ֌#ráxŜBÂQBW0D|&:ÀOĉ†bŭt|&ћ&’‰s§‰úż‡˘­!lÀğTQZùĴĝWŻÉnŔ÷TĦdíîçöpéÖöĝ̸ UJ×iġÁ›ïrjzˆñĴQ ċù™_ßż|z?(ħ—./6úŝӗïżfĉ—7t‚3ŠNôÔäĵğypµ“|™9>{ühĈ9=ı÷ìeIÏ_`ԑ´ßÌU‹!˘ċÙJ%Dvi,X``•ĉñ÷ŝ˘4÷¸^Nˆ$Í;qfüİ5bpŜç€C‰™ş Lİ!Œ ó "§) ÓÒM$û|Á#YĊ8#¸nÂS êÛM?€p‡ħûù{£äVÌ[wâı páÊĦ{„Z•†|h=Âħpâï­Ï–à\›™khRŸĤ;÷*ç­ úżÑĥ„"nĈ[Š8d4ÖjštüĠ˜ÓÒ`¤^28_ô=o'r˜˙VÜĤħϋ İE)£Â™½¨Oï  Ċ·ù!!ŭŭpüËı*Ħ#(ö@|+ĝ/uiamœ'ÚÄ Ù ħNàô8mèt+RA",֑¤¸³ë{àH‘<Żè°!*½×½ñk”ïè*Żŝiyô‚”½ôÉfkÑİÎßŭ”Ho³:€îşÊV F9tY}§€]bÔv‘_ŒÏ™žèéhij¨Ğ­­khjé虘ĉÌòCX£Ŭµ_c.ì pÒ'"jĥĞ€;ÛʐŜ‚§ğçĞÂÀÑzóÈ M–=ÒNQÓz§*Ú[Q^ÄWËQE<HPôaĊ;IŽħ [DQ< b¤Ċëá$X#N‰ Ĥj?ᴅÑt_ò+|bAŒŠÄUÎ_ÈÑÂÏ ñk^üŠŜtïLĦ<P½úmÄßò#;yïEç–¨5´ÌÑĝÛa˘YÛHÒĵgâ@ﲍö­ßÀ tWıîš}Ièµ›ı@ÍĴŬÇw²àmù€¤UQáӓZ+qƒés´Ò|Ê%ËqÎP²!²“Q ·ÔcqSNjÍ%Ò>ìg¤ŭP&ĵ÷JsžŭĜJÀd޲AùÈÊU€XÚփ׀o$ñШüA /TĈ)VRdQĜÉñÂ*·YŬ{Eäώ0£µ(Ëòñ ˆ-ŝOˆ˜H`Ž ÷Ĥ˘ xµÎ›ÙüÂäñš_l'>”¸It§ó'ħž¤=V"ˆÜü)]|~wĈĊ@oÒòÜ[ON_éÇ+ÛĵL„P”&iµ(x˙ĊÇ~×u ŽK=Ğ 0 ñ5û àm1>ĜQWü‹ûƒYiJÑPTHÁÄkەÄ`ݧ vż§HI W\ÌèĉóğRŽÏ—CDĴö$ġ`“ù§A ½)qïŒġƒĦċ‘"óK àMnvîN1ğÙ+È  ùCâ…7TàùË(ZÑ̑³ò"{ğY·U„ ŒŸS~ÄĞX„,mdb¤â/ ôNYñ-ŝDÂ(Ÿ &Žáz[Àħ^,)7PΙ  <˜}nħéŝĴÉ$ŸôŒí#]ÒĴĉ4Êż%r ŒiÌĤœá€ò%ż”vnÚÀ_İ5yZYdÎŬ#ĠGxŽqm­&œW-ĥ _-۔K߇‹—ÔÉûpo#7ĝ+EBĴÓRİl¨`W¨"8ö—B3ħÓN*Zj‘‚x1ğĤ>oÊEìƒĴn÷yAóH(²˘kpàÎĤŽÒL3-€àu‡QL€…à‰8eâùŜèÜ^ënq‰½-‚4O’ĉ…^^š/Uv{9Ŝr4+gñ–ËĴùÊä•3!‰-‚ÔŭxĈ^*ÍlOò>‘ÑK2^˙”!Ĵú÷GiÒaEûžävLC&°1ëÙÉwÀm9+•[Ž\ĵ÷21#ݤŞ~íûák  U%y‰/ï]<²e‡•ŽA@€{ÈÉgY œîÈ}²o‘Ž0B×\tô}= „!× ĦŠ^ó'ä=f4ŞÉŜŒqh.,#™+á1żB”Éâr Ŝxô7§[òí²Tß4^ïM’gš eŻ„ĝîV^Â\Ô;ÏtB<šaB 딢VžaJ;:x½ ƒ]+‘ĉäcÜ]ÙĜEŠw_“Xĵ ™ÉRvÏ-íw„šŽ+À‚í”+[t€é-ş’ıÍġ‘Ż+Ĉ÷Ñs›dWÛÄŞónm êH°ĈĊġHíáš/Bž‘ òIB1OİĞäœjÑ]€èMBŻr\µûR(G°u"ùíšöŸ¨áĜä†`3ċ%g5ŭ-äAžñw@&ü:)˜•İ`\9ìGı‚ljYVÒ܊q]+ùûfú£RÒĈ‘½48âdÚ˘+kyħÚ´Í-Ü8#¨óŭáN4:;ħxÊ$ÊêÇñêCj¨Ù•6ĵñĵ*Àò( аëµj>Ğ Ê^˜¤d†úçĦ*ÈğIlfccÏwğİ%î;îĤ7Œóñ™ŝżi#7xšĞI†”Š‹ÏŞğŽœştç/^|ñ<ĉÑŬ—NÙµq•‹Š ¤I¨™{nˆ|œö·ç7¤ßŬħ€(žĞşí~^:†ħ›¸Á 8ô9ĝ"šâ‚µHÂöQ,~ġ5WaŠET9 M‚dQ³óxÛ3TíP5ŜxL(½0ŸZ sc§GáĉëŸÚL’Q·e3M;–WğµÍ„c2mïHßF’K)¨ĥ a$uÓ×Vċ_ в‹ŻPü†+2YÁÀé;îH\ û-}5ç%e³àr §~·×lÏY‡Ħ¤ôiğr˘I½€ŭĜ%ZC:e艅* <&ċž! ìIĤ4Iî ñÈĤİ5œžNÙé÷§Qwá{(!k\W£b7üCÜgê3úШçĜ/…ı--²é3Ñp ´\BŽâ§¨¨Ĥ@ÙĥœuìZ5AµÊߗQ|*(tF×4Ï\—“:Çì? ˘tmr0RZ|_> íŜrúÏk<’8C·MgAĊ;IÖ·:ñú³f¨L`⸠ê‚€XO“ùĤ— ôñ+z ¤‡>Ŝ‹Ìw²³4ÑSW”!"c€ Wĵ|ÇıgéÀ™\tOÉ‘B$Ĥâŝ¨`€Ïïûym•÷8™1´è+S”ñş ë˘Ó³µğP%O ”AÍÎ֝·ĴIŠ;Šg#AĤ·‡8I$óŭœ·vpòávì—ŽœĵĤ$r ŸyNJîúLóÔıPTQĉ>?ϊ1]ċJ>ÈeAŠŞJ ?Šĵd°Ŝî'3é²--s~yŠ~ög|žò˙1l§RS ŝ”=ĝ.jZżŬÔS8Í[›2Ÿ@ënĝáÍ) ‰UĜÂhH… X4Ş2sl 2âŬùĵ3äÔnuwàDĈL;lB|ï6¨ƒ)Ż!œÂ\$–6éÇxÇŬHşĵ²Öw£Š‘eàŽ~+5§xj²v¤ *­ìZğ܉ĵc´k-ê^ĈğŻ u…Ġşžl–ˆù 6_°J DÜfB7”²ä'Vż[ZheŽ Ĵ6byw€Ÿ·I–l·Ż;kc€÷#XM7(ßÈbbm/‚4HÂV `ĤêéaDÒfËS0ùzr„ğB.”*?wÙĦGé5ƒlc Ô˙úx?j;<Ûġ•EŝI„‘(T˜ L‚¨*A4ie}ĝ¤Ĝu˙Żú†³kÒZ6—€Ğğ†?ÈíၝèéIDĜdÓŞ`%LÒzц1³@}Ùíf6òfgëîğödÙMyüğ–ˆöÙAÎJ!éŬġĜÏ%h‡2o˘ûbƒúa‰fäġ­Ĵ+R ÷yeîèÚÑd§rvÍĤ›îM#k‘mSĊs¤Ŝ‚=zÙH…‘ÂwVù‘6rß1ü&ÓÄ1S üšòàÛĵâ€uqžŽR¤ğzw*ù oî4˙T ›Q}  İJA…OÌÌY€…Iw$"1#ڞ‚#– ñ4pŬِëĵċâúĤ`%ôžŝ@â¤ċ$-™ü!kŬŜ4›ŝ)³Êħ5è~vžô+Ç^ôuµž££Ş@dÂà‡/x@"LAUg޵Ğo`xÄĠĜÏżjş‰ÑÌ`]ÖóSë]µpg˙êġßĝó•ÈdUï3ßû1Ö߇Ğ5!´7uĝ´I €2ëċ/Aĥ8_÷Üw¤H­ËâŽÂµFùP$Ażñê-˘{Z_šc2VıŠlÏŻ @mÒño–äŬ ZOUï ~‰\dêIĈàŻ¤ġrÙûÑ5c•fRĝÈ&xÌmj·–ŭ1ıD(³–ú’D˙0í nOSŭÎñċĵëÈĥ;>A$ï#Oí‘$ħC: [0gĉ„pc"’R…DNë bÁgè•9ä[LùüBĊċ"ÛáÜX÷[8 ßIÏìѳîäN0Ò=IʄğTĝ ²áɓÈ^Oş€uÓÙÊşEW>ĈOQÒüG@ŻöĠÈâ:ö+ŸzÖiwx– )¸uĉ†ĵĝ‰‘Q˜€GôOßÓĤĴĝƒ˙YEàÂùİ^#xi¸,Ċ=FA­ĵߍ7^ĥĦˆzAÖ²ïÍjED~ùÓfÁôŸK dIçcßú0vŬëàŽĤiù˙Pĸ}ż_]j&xYOÇĤWÎf¸“ƒ­•żRŜ<~úÎÍÁ+—úz-ôpw÷Xèċğteĉ‡O_ü&ċWeë Ñ!„DXezìé Œeáə-=úâwcÖ|8î£E{ôÎ×ulĴïÛ1gIDfÁ?ӂĉ§ËċĊĠ 6ë|â%Jħı܈w߇vk Ŭ)²áx%ŠW*Ÿ€…Ż‚żô Šö½éŝĜŠŒŽœ—ż1ÓL²ÉÂß鈜fĠ`ıg0”¸n@V´{Á1‡ĤR ˙¤ï‹t•³§×Áài=;>‹³Áà‘#ċ>‘ ŸG`wÜ lWk½žLúNzçßljôƒûÛë+~˙L˙ú1áġ‹çϞ=ñ:á×ôŸż+êÛûgğÁÎ8ċ?Ü9ìn,“1-—‹Ÿ*‡Ħv\óñ¤ż5Y{§p”`SĴĠ£ÂD:ìñ— § ³5y kŒveoĥàwaŒŬ†É÷^4‰Ĵ7ÔM³3‡ğ¤-ü´e“Yrs›:ìĊßdjtžĥR*˙É·ğk1ñ§ĥĦŸáüùŬ4<ÏVƒ MHïŝÓ]YÇ}ŞIï`?ÒĤŭĊÒç£ĤÏYC •Bß%]Äúñ('mı°ÒÏĞ ’"[ƒ‰½/nħÉĝü|Ğı‹£jkž5ñ9ĠWƒĵYkùġĵaâbéCŒiô|Ċö°Zù%z¤:Ħ L–׳óY·ïÜŭĝ”ܲşöŜáñİ"Aď˙™îmŻ+ËM‰żnß:;=ybx ™Ò=Ñ_*Êî)Œ;ä£GŒš|"‰KçpŜġċZ Ż^ŭ°šozĥF w ë×!c’Äâ¸>pŜ[“‚ Ûôyݰò4ÎèckD´hÉ Ħ6ÄznŠÎOÇŝQ´ħ;ÂéšOıu+Q‡Ÿĝ=@RɧîO68gè6ÄïÒân_Ò ~Ş’vŜôF˜ĵ·ïhš+—5ıŒö‚ż…twv:·‰ú†ċ#™ßmhÒ˙]h#v”üixŽ%àî׀‡Èû>Ȅú-òħMb?! Z£…‘èŠC³Ë¨—'ġĵħÍÒ ”¸ÇœÈLúܧ Iˆ•:huĉˆ ."ÑÀHw•-ì³V.éSÑèB‘? +ßáżsd?Couċ0£ħ j_ħ;b†?ĜëwâW8Ġ>à>P;1ĴXĊĞ“ˆ<܆…[2żí°= k‹T#¤a3_–0D–Á; },ÉìL56òa<˘ħùë(Ŝñb5PŬÎċC)/-~E-ĥ<Мħjßġ‚·€Ĉü;&°ğ$)z˙Êy³M_"dlïırӓWîĊù”’‘›_PXXŸ›‘òéMì½+'÷mZéio<"ĊóVîN*!ôswÂçkÀ7×ò:úv ŸĴ|ĥĊBž?iPQÌ?çìÊĠ/:ÑŻ›5ù5F°ê3f$Ùuéú/a,ù2ChÔ"Û°œşÜá6~²ĵíÙ(yl`òŞ"bÇNˆ)?àB À§šsE\@ÖöPìöUMádĠ¸<œqĠèÊ Á–ġ‹|‘m;—u™òĞB(jÜÑ!Ê£7İU`.oĝ"RE÷ó]LĤnÒÚäVۅ‹Ş%ħ”äA#ó‘/Ôíp xÂY.ú}dž|QŻ•rqŸ½lö„Ż+ŝ^è…ĵqêh›'üĤğ´ ,C÷Ïü1‰ĈÒt`ĝŬHĝ,ğj!²Ĥ Qq‚’Eˆ{>žb‹úâYîˆċ[^û)‘°jĵ`µ4ƒ*vÈR=ùi1CĜïŭ7ĵš;sf8·q½ZÉe—]Y …ÈA—‰gŸ÷Q!S€Ùñµ…3k>F-7—¸ŜÂmדʈQ{¨1˙Óŭ“[—ğškɋÏĈAHCT\ l1ò “‘eféqdş¸ĵ–ıëò­'ïÊobc ²¤ëÛê‰CÛÌdyÔÇ&Înù < YĊç|ö04”/{É!R ”ħı•W]!´t¤€3“³SœÇy܉÷~ŒĊŸ€éI•ŬQĠ iĦĠxu˜ˆÔvŜ[KÄ= /ôAmS|wdQ‰ N(Qmk…Uì³ÂFßa¨“†E‹˜ŭ™Ù.hqĦGO…çµu8‰ĵ†fÈ^ŝ+!߉lYûbeĞŜ"ùy#ßE—sžÀ5`;ġˈıÑ`2ċ0(Y]$ĵ_-זFş1eâÂß/ژĈİ=ĵı p¤5Í*£^×\ŽŻRÈÇnĊ•ĝVħżÄż"VZ-³ ÔS13 ŞêTÇR„Öž’Â!ĵCˆĵŽó ÖQċşê…À/Žž×ÒëM9#HVQŭ K‹"ö Şm÷‚0,64Ä.Ì4@\ °w†ÔMP…BL ïµ)ş( Ż aÈndĝR4áĊžóŝ8=¸DXÈ Œ1Ċ‡€ıòu@N ŠéXçp5ÒMĥ&´x­öϓAé:KÏĤ€.dŞ1ċÊz{e:Œ‚ÌüvßJ*îdòù3ĝ;ċĊá>Ά "J³_Pn˜†Ħ…³O`ĝñ/R~7SˆÏì,NşµÛÏ Ĉ@teûġWRÀF1Ӛrvİ•™·˙C+×’°Ġ„Ë7ĜďĤ†`'†ÙîP18cĵ„„—$NOô†¤c1ŜtD‘â›!h;,ËİijĦĤŻy-P‘‚˘Ô&Şá;ĴÀYÑ8sAL#h¸EĥÔì" ĞUUIœĦx÷ĥÌ"9`˘P]·–ħŸ8ëȏ`7wGNzÊ[#”2ĉ¨RW°˙ùËdŞKĊBˆßĉżb[ñ•ŠŬA ¤² _9~Z–…Ĉ·"ç˜êA˜‡û˜p]:úpLÛÛ-RùGh?ĥL9ĝ2›…µk8N!„=S{‰Ĝxüî·U(r•H›^EıBĜßä+G–‘Î>ÊêçÁÀbio·?şm˘Ñĵqí+ċ-“B†_4s¨@O]€ÄÄàqÙ3#×UikˁNQ;ßÏMœ‚ġvĴ0T’îóqš”Uš{ììÍH’ŝ/á‚ŭÌ_Q zĠ Œ¨(g DvÁYĝœŒÙl!‡ċg“›@ëәóxŻ·Ħ$Rœğxûċĝœş~x5ìñžúâÌOÏoŸĜ½9dĦ Ü\œĉÍsrq[ÑàÍğ#Îß~ŝ)³¸gÊAüéŝşœĝËÛÏUBȒ†Ŝ{çt‚¨)ùìrPSˆ[lŽgÔpöÙ²ˆ„sP˘:_İ 2ŝÏ`\ñÒ_*íĠĜ@,–aMôĦK†bíQê¤ù‰Üŝój 䗯İ^Ÿ9#+~dpş2Ĥ :ož†1Š˜lYŽĴhkœgÄ6Ôżğw) Ĝòôe? ΐ–TËc‡Ħ§°û”UÓiC ĥŭżáCl ;vĥÉo&5Úċq=·Y~ĥ_èOÈnÌ[{ì!š^'|Œmĉİ3Ïŭw üŻ9ÀŸrá}˙÷9Àß˙ÌĥÀŻ˙˙·9ÀVŬbÄŻ–sKVú Ğ}Ġà_sâô?sˆLŝk@ĝ"f€÷˙?€ŝí˙mp›˜ë˙Ï`ƒóì@ŜĜ}Ŭ?s€‰˙ÎÊ Ĥù˜êŸ9ÀĞ„_Ò–˙wyb°Î€„ÁÀy˙Ìô…‰9Àí˙û`Ë˙c°š˜xÓ˙=pxŭÎĜ˙šP7µ³HËŜâÔú!‹ëXgŝżÎ[ŝ3kÜ˙m°O¸üÏżĉÂ˙3HŞ:?ݳ_İ6òMk– W(u}%]ĉ¸èôBÀ*‰’%wÖsH„ï żhĈl‡]¤~Lĝ0Ŝr·Â%ZJ£úŠœŬÈĈÉâ9’/ĦËc—ê݃ÇRÎEQŬDxcӍġ…S?b… ×lü§;b÷EP*¤pŞŸŭÚ™3ɌµFô/÷~A„Ĉ3a‚ó#D–dyİz!ŜbdóÈ"ğ蔝À8 ‚7ž{z "fğç}3G0ó:á‹ÒZtE½Xù/N§ÎЏš…çúC×âR j:‡'‰Ĝ1`ƒ2(QÀˆaTƒf˙)gr¸³Ĥ %îÚĦġžjâ‰ך·öä‹|@Îpû‹^\× 0F\˙9 à4żßc+†ÈÌ?;Nԕ ÉBv§ŠĜœ˘Hs²˜74C/Y’dC~p&ĝR½éÓB@y<Ò-é¤KlGÑWH4ÛU³ë=ŞÇíĠá0ž"À~ ŽŞ—´Ġ3ŸE=†Š5t+=…žbÈ+F+̀wĉÜ8ŜFrċN '`Ŝ5ìË â—z‚!{?‡TËO0C}óñ²ö­ñÉGs{èM=œ‹^íä…JQĦVËË WF !]7ž¸VÑKZŸ.•U÷­ ĥ íÍVS,8™ ‰ŜĴkkLÄèî´%:£‰ D5˙|µ~ĦœŠŭŬĜÉ?ôàÂċ×4ƒ!h||lĝcÄä8X„škŠsÀ*wá`¨ż“1ôTHÎpŝú¨ç?› FTSFô'€Ñ‹›Ĵı–Ġ yÔ“ @`şġM›€]q „ĉòKŸĥbc)aˆÂZĜ^–3D”Ay*š'èıjˆÌ}í7{dΣÉñ[Úd˙<ß5KàבçĵÇÀİë J‰ËìoĞNr'0kà ntĤ]JœYċè òìݐç`…Fñ‡èç™Ġ´—àq{ÍNwVüÛ¨fdÏŭx8ŭG—Ĥ+;y5jèÈDJo‘†ê—úÈc B žÖ[Šû˜ 7i {×&Ÿgg=.TVH;Éu1¨Ì$°e^ƒ?z‰ÌßjyOàCDàûHħœĈ×I/ħoL‘DöZ8DB4 zJ-.Œ'p pkë]L‚ô‚,0Ŝ)3ĉ­ŞÌ#A6ÏÇşÁ…˙dL ëdŬP=Ĝ7­N İÁÒÜÓĜ鑺¨ëöÈ]3ÄŜ…a§y< (=¤¨oÏaÏä@ċRjIL;6öm÷*ÍtgR/6Urkı™˘é °müŜì›ë­dêmĥüĝĞÂnÀDM´ĉ'\ÙıÔ^G†¸ÙÓ%”ġ,]ŭ‚ĥì?qñÖgŻŜ~LúüċË礏o_={xëâ‰ŭ[‚ü\-ġ”%à*H’Ñħ_şóJB~+” XŬ…ŻŽ/‡bIĈjŭÍì^>`é.ùkRÈËo•La½I;MiÔ9ğżaí1€4×ۓ3Îَh* <†ŽSX!ĵŝFˆÙŬöWT÷ĈÈtĴ)▆Ġ„Ô£§ûŠ*Ŝ`u†Q?á?ÁŞÑ=tŭlÁ#Ġ·`Œ÷Z T-ıÇH‹{ÛÜà„áÒÙm<ˆâEÂ)`-;QćùMÌkò+#€KÚ€ĜOùêż2KÀŭXO“€|3pឤ– çÚĜñö‹·½ƒDí&Ò/ ywlƒâ@ì˙ĉ&Ll 3eĞߟJŬ y€ODÀ~ĝ‡˜ÏtcéôĜAä*ÌmÍ„B òK`ƒĴĴšúŸ<@ġôQŞu>˙ŜlÀi6 Käö‹(]Ÿ<.-òV˙Î,ĝüOàrä Û`ST60é?y€b6 mÒP şZ4y€˘ëŻú'˙0ܙȨ;…B€p=L÷ÀNçÜöċÎĈ˙Èaċ+¨y{çù ˙É,œïly "0ëT1v^ŭl‘z€ê…<@¨ì˜i*Îáó˙ÉĴH½ĵ;jñİ˘ĞP ’——]üŸ<@R ,j ûà.È(@ ëò?y€Ï ŝ°‚<À~q郓וDöyÙÙ<€Ólà?ߚztşú?y€TUċd˜ê/€<@<äBgó­sra+÷ez)#iÚGìǰ=‘p˙y€ŬÔÔ~}ÛİÈĜ˜öëży€ĜĊ ˜;ä¤û^¸üà}ráoòÍ íĊĜFéĥdÂ1jÍş ˘#›İXä×GŞZ~!+ŽtP¤ §aŻ˘'p‚ÁW·9=dċ-ġ_S!è!^š²f÷iqwÏR4œYçNڄdSpϲMöZŒ0 ҉Ġ’^òZĥ1ÎM?6‚íŻë¤ yÑWîm3D˙|;^ħ_ 1HMÇíyàa}ڍwĊúIÁ68iĝ €ŞÁFèċñZßïħ—BFĞŻeğúòĝ?#`û‹ëylıüŝO\yß_ߌĜĵÌÍB³£€Ùj… „€?ú{4ˆ¤•ġ,ܖmޏùú{Eçx§™Ŝ_ŜâĦ'#żˆĝr"sumµ‘²ßóîÍŻ6PòyL‚}°”_lŜŭl·ónw@ ÉQÛ_·Ÿ×GÌnqż."ĞœìâÁîËèñôYĈĥŜK#r`-/Nĉ@à‹|ċĜw ÷C–Žı‡ä^Ç G—öt/&áfi*§ñĦ!SżÔµŝ ġv‚@ĥS‚9 ôUì7TBì|?HŠc-”Ż’óáŜ@b§lŒ@˙t›eM“Û¤7b‹µ'n’’ïʇóô jôˆòzlĦ1ë<­¨>M&ï#o‡t]@4òrÂÒh0ûÈï&]˙6i™÷ŝ–uɖrÎW°<€ÚŸQ D%1ë*ĤñŽ CM.´óü még9“{Ž'ë‰^âT.@V·NS}ÂĞZŠÚgàıˆcVŠˆFŽŬ„ <\#)ŜÉÜĦğsí¨&ĵ.RÑ>R.zċ+Fħ>_%˜ĝĥM•XxĞšÇݸé#ˆÏ‹ĝ֏ÍÔÄm…˘Ž°áÊKßڈ‹ZÚġMNê‘ÔŸz:îGm? nuSÀŭŝîÑ£ákŭ̳2Ñ×RSV„)̂•Ġ´ôMĴĉ-_~ôÒ£w߁* ĉpĞżöGÜéù AêN›§—Ìĥo—V Cik\Í Ö˙-bž8"ïs³‚еPĠŜömBPuŜš"ĉûjHP~DэĴ›˘´‘ıw‡¸ÉŜĊx@ âĉ‡]UŽ9"ıx†=ş´Š÷DUìÌTëjdA%璨^2ŝ\@{Ϥµ3ĝçi.MCè^š˘îofé,0˘|F˙¨ ÀJ!ĜU*{ÄYöwŻıVÓ_E—ÉwPĉßMM4²œx òŬĦ·ÈŭIGĝ€×ĝ<Ëx!ĥ^y$šZ‘Ž,Á}ŒÙGE›“„nÙPÜ_mò|T߅şÉ= DŬäúÊVÖ)ıM½lÇĊÒ=:ÜÁÌzKCĜ)ҖM]ÎÂÏĦ³°¸żižP4– Н úpÛR¨6Á$貨nĦ Œd‚ !ßíQ˙2 D)‹ ñ/tN,kĵ*Ċ?k>Ĵ@ñŝÊyhŞİĈۀ°¤´5‡3•síYÓĵŠ Ž ²ñ.PµĤ€§U^|š!“ċO6Á @H×ïäÇZ&Ĉ(yµÚ”ß$1 ğċ{½ùYÓKÀù3ÀŠÌÏH|ŭäÎĠs'#íßğgÏŜŭ‡"NžğzçÉëČüJ EÏUšËì­ùùĉڞċvp—RĥYġĤd€‡1k?žôÓ‚1ÀĤ'ċ“ ışXw pK‡Sw“Ž*xÓY;m’6ĊÉÙŞœĞ6ĵúˆ:jŭp„óĠ›˘p¸Ëö§¨ždĊÎA½rÂĊ ÏÁÊüQû3’&éB”ĉ@ÇgJmAžİ-›€E Íkê_ í›çÂÎ]M–Ò)°‹%ì½°;0œğ• ¸`Û)o§Ü”ê*e}÷dÚAo$°ŭÑçÈĞA5Ĵd—"É!vŒŭyEáż{Ü·ÖÈĵd˘İŭ˜Mô^Zu÷"AĴŠÄ•Ù^@PVŭZG7#Ë{àuë·ħTE:4‡7ïĊġN/Ŝt‰Nw¸ĝ—ÇÊG3Ċxûg°5ĉ_”G…Í6?ĞtkYìvG‚äkµöҗÚ1B˜µÎ•~£t-ĞEk÷œ½˙&-ż²İ{h|z– òoiä,/dz|¨ğİ2?íÍŭ³{Ö.²Ò’¸( Ĉ]×EĊċâÀħÚ/—ÖÂ%ƒ,ï¸=ĥ ´UÏ6› £òž Ŝ÷y;k­Žċħx/:éN7š^1ĵâÇx9ĦҔùŻ™Ĵ ÈD/$o]îH;˙³b™ÀmÛ)˘pqbĝ¸¸Ü•İ­TDü‡yëÀP8ÙħˆwUÔ ğ+nĊ?Göf0¤- ô’y‘Í3™ŠĈĠ}˘!–y ? Âȓp>„Ŝ ŒÚj´K.DPÌ´ƒn˙ÒYŜ́wÈ£ wh˙Ôe OáèfÉÛ.‡”ì%ċS3ud;“G†`$ô¨/€j Ĥ­nO#ŝwyĞ…ż9ÉġX¨–w[)ğ‹%q6‚;öš FòÉÀˆ ĞĥœHÍTÁ-‘9?ù7Ċtżâñj’7¸•‹˙ZÎE(Á Q9ĠwXBêXßLŒ1êAù2BĞó°ĉ’ëkXú ‰^#5HjĥµĜە²¨ÖĥÔ1Ĵ-6@ ‘ô¸`–äC o³Q ÚŞ7#Ráğî˙hf °İÎ?ïYNÜ!2(Gï5[žŝĊğ/ß~äĉŝ)*úS˜Ÿûۗw/^?}pëoGKA0œ¸.ßsŭŸÎ)LÀlŝq‘ §k,<ĝĤ vÏĠ/­Ċ@\{(ı`5=$€Ĝ6l,u›*ğòíV fé T€œˆ ùÂ:µ+I%˘Ë[-$sj¨qÌLß1)‰}S7•E @eRñ§ÖYTɽ!ݏĠğÉ˙9Gä– UH‘÷„mŞÇ“,o=@ô_D7r’Ä܇ ­şËU-zŠäĈ ŻĉŬ…‚FV-óƒ¨÷¤Àj8ñMê”Ŭ€/Öa^§ç’b{%"MŒ³“Z¸ŸîäJa)ŭ×ÁN‹ŸúÔ=á4h`|…žĴùÙkdܓ/éÍzG[ċàM<£Ï$‰Í,ÓÖ,]BĈoÓ<{[œ„îâ/$Œ g’ĵZ‡7 Ë;‡7£ kfN çZŠ uçÀh„ˆêCNÓz0Gr[ĥ2/ŒLŜĠ%ygbMûd„Vċbm'5Pû˜QnĈj1úÂW£üßğ5Í]ılöŻŭ$‰E÷ÁÍDˆµÖ>kFțpXËKĜ„?ŻPhsòı•Ĥ’0 0YrèIV48Cġ?Żî]ċfİp‚Ŝ85=S'o˙Ѓ‚×­ Z½ÂßÛÉĈTO°Ĉ¤XIU·U{ŻĈ˙Ĵ'ĵcMYO-1)€¤éÊsÉ̀ ­|n#A„7ÀixĥV‹‚ÍŞñŜ" ’Áŝ_lvî.MDc÷oŝèĞ…tħĠÜÑ{Td–ğJHf_–éMÒ½;9rA‘ħêú”ġMœ‡Ş"£;İ&)p^>=S³Ŭ<ÜıÙ0ÜêE:8Sh,ñż+äÔÒëIğGS—Œij— ÎKšŠ„ÂbxëhïXŜ’ù=ĈF½?EÖ Î Ż˜6ƒ yŞ÷}‚۝İ÷x(W& 5y–Ñ„`Áj—i`:¨ĵ_ܗ€<Ġsċž#%|@M²\ĠZŝH,ƒ÷Ġil ƒ— ôM™òzŬŒ^sdô%ƒ2ٝĵ—6ú3½ jY@>˜@ UşÂGzÖ£n%ü‡Š’§Û6M>b%~¨U’ j-UÖÄĜuMòâ,ĴqŸ }E6$çt‘ı·ûar$ÛÛ] žK’ZżSMÑ@ˆ0Ŝ›9uĵ Ö$1,ö|î†;À³À€e:òúò혯Á@(rĉŝî§U ÀFwf°!?ñᙝĞÚü€ DB.œàü# û‡``³pġÎ3ó NŒħŞÒî7—£ ù;ċġñ@P  àl|÷€îÏ{,$ÍàWĜDĉ^càCDàí )Ò\ˆwŬĥ%ɅúoÏEt!혽‚.³ŻËZLÖĵ>6qKƒşĥJd…ú•`MÈÚĤ/J*>䗸Ħë{FÂI.•ÀĜPàò‚–Ħ@dûô#é÷° Z1Zf ôc:ĵ‘WÍÜ7BŝS)˘Ŝ“_…— xvžĠË$ŝ´¨ı²ôJ’Iç¸z£Ï„>ċUĝċÁ§hĉ_Úċ µ` É*&?”ß,p5šOy>ġ:„·à éÁ½„55I„MàjL‚jëÔĴrÀP”¸lûŜJ­²Ħ…Œ8,Ŭ0ŭKS5“³ ­uş&HV—{Ç+*Ċ‰šâ7x%èşNĉq( Ê|g°$ı ™xÑR²ŜİáKê”%ÙXÓ9š?¨µnÌA ηák"'ŞħŝXOašĠzÂËЎHûŜŻçÏ_^Ê ·¨CĝTĊ“Px ˆšŬÈêââÌş/×ÙÁ>˜"cĵhë7ż‡ú ÇúşÂÔW·Ïìß´ÊÇĠÖÌ@[M‰pFIIÖ(%5m3[WŸU›öŸıŭ*µ°. €nüġĉÒÖEĈ ˘+Û­ğĝމsğ²n™B?Ü,ôIĊ>ô# Ì5Ò .ÏëïûJ#ê„߸ŝŞMĜ3ĥĞ>a€hŝ‹·7@ĉÜ!š?Mî@–½„˘~ixêĦyižéЏ€CËñ)@=ĝ8³sêZÂğ!™ˆÇ+)ĈŻċԓׄjGƒ}œLUÍ_ÓH,Žħp¨LK§²×^&8qàò“ʰVĞĞ…9PulGLMGߢ—¸š¸­_ö9j~9‡Ì]›ċ‹³?Üט!Úüyä †G¸†/Ŝq‡é§0 Z,×@×ÉwP:8OtÑ{‚í¤ÛĜA@†Üql*q t~†]Ĥĝö7; ŬÂŜÊi‡k M);’jû‡{ET' OÒcgv…’-ÓL{tI–hú×@˜Ùkà’,ĴÖċ^p |dŞ†kàe3TqËOÎtچ_/ÎcvĤÀ50uż•0\ŻÌ^Ÿn²@„tŸ BÁ öxµĈö?×ÀĞo²Ğ{ŝ{ ,ĝž˙ôεóQÇĜ·wEżvçi|Ò÷‚˙^{޳ß\ŭÏ5v \BZóñÄb!DÂbÓÓÙkภ[íyÁpÊN#2cŜĊ_7¤Ms~nQDÍ.5°jŭ^xhÁ²–Ì^áì×À_Ô"+[‚Úgâi–ä.ĉq†ü¸tDŻp˙ĜR#Ù6p ü-÷ğ%äÔÜïKıŒ=v!İ ½@ƒĜmÒvÁ=`·] k8ċŬ¤+\eè)\?Á5YŻÁ 7ÈçfÑĥħ/K Ż;%öòìlıG„Ğr)à0u}7ÊÙÛ0HréÎJP0èŭĊs,Ġkšô šëtġë[͔ G1^ñ {xIÒĉ5CKÈgŸäts8G€i0ˆv€üjx Ʉw[FáŻÂYX„½3˘ĴoäĊW–aßŬQ³¸™sŞ”Ċ™‚öŞ$··Ó“ŻŬ(²aüĦX7šè’·£‚ŠĤ$ ߘN|Cˆ:*áq,m-/7ĤmĊ­ßĝÔß;äHdĊy[ï˙„µ=ğ÷ϛÓ뜈e URËÖoKÔŭ?+ÛfÛ˙¸Lñ=m-ÍMMÍ-m=eóˆ˙ m•??܏ÚâgĞĥX:­;ŭĉO/|ÓΟ÷·ÎS$“ävĵĝ;…˙µ0u^ĥ€ÇŞòaïŒñ• ™ž¨Œ]"Js‹â„ÉRÜ^ONżu#ݞhd.Ĥ¨ž˜‰3CŬżce+ÉqĵĈġ£wXÑBÄğ‚÷@Aĉ6ŻÄ…´etk]ƒÁHà 6Žprtċ> Β— Ġ˜K'ñö ĝŻ‹F •ÍZëġuëš ôšjÔ-rÄŭÁí„ ĴnċŜ ô;°ĈÁà%·Jĝ×֎·W˘ó5ú£”Œ¨c.ÖĵŬ’íÙ“ 7ÈӉ9ö3°D77}OŠÄB„³{ĉôĉŠ/aż¤lHà^ìş ş€ ĞÖ6ÉOĵŭÈVŽĤJ:/‚´¤Ż‹txĤĜRè<ŻĜĴOtmç8ì9á–cKZÓÀy¨.´³kĉ&u} –â‚Ì}É8ŻNöN÷@tŜŞô_!ş"™5ŭu•ĊîF Ŝ%F6(˜áV\vN`ËVÁLċ½ 2UgU4ìĉg>ġÔ !˘~݊ìÇî+Kĵcħ… Á U4tô =coù£L¸òf1 äÁ%9:ÙX‘˙ †…‡Bŭ g˙5‹Ċ;'–|‚Ó_ô*ÂÏ@ĦixŭœÀÂèU:T²FÀ½ÊAëË`àş^àÎD˜’Ċĵbz–v‰U_§YÉ+ ú vvèá§y“Ġϰ_ÎE\R°šġTÍû3];…Ôr֐lហ[óñε¨g 8£, xç…,‹g“ĵ:ú–"xé*š9Ĵ-È~Ŝ'I›ĥV+™ Äò·£·°½ ÜByÉ^"žÛ;Ǩ7[8‹$½53‡µîŒŭœ‰§È›˜m’é€x§änžµ ĴŽ\&Œ!‰˙2†"Œ!˙2†‰âYcH…Œ/cÈ+ĥżÈ70†ücH2CžÌCDçŭÛ²°³kC2˙1†¨ÎCža„1¤}-ꔏ%èÓvôLŝĈy³ĈK˙2†ÍCrC¤¨nÏÁ²Œ!{Á’ICVĵ˜5†é³ĈÂĞ~àm ¸ž;ˆq;ż_ 4›5†Ĵ9ŭvÖ2 Ĉs[}­4 ë˙1ÄnÁòGς1äĠğO_RcHʗOï^1äìѝ!ËĜŭËB“Ò°òŬzŒ!£³Ĉ·§×ÌCÌŻ|ïäbƒı× cˆŠßĠÂYcˆĈ„1äH ´ĥg‚1d/Cö€1äıU*$wÖbô/cÈYcÈĵ˙2Ù³ƒĤŸ€ċ;ĦkÛ cHölÖ˘ú1$Œ!‹ş:ŝÛ2O4aÖòŒ!É` ùĈo"ŝìW³ĈG_™ŠŒ!í˙—1ā0†T ú—1$‘0†\FFĥÀ°m2ŬñÄ# _Ħ zĦ;ÓVfϐğ3Κ]YBaĝRüÔ|…Ê&M‹ŝ<)ĝ/‰y13$]‡ŝ¨×wÚK~ĉ†#‡ĝŸ¤ÍŝŽBp35>aÑÂs˙À À”}ZÈô£(s—WĥYZ˽§$9 £Ùsc£ç䅛ùIŽÈÜ8Öu]è°Ĉ[!Ú§Z°Ĵ†š^jÁ[Z“%–$Ĵᆠƒb‘7ÍoxàŻ€Hş ĈhÎx h.9—/ËHI܅Z „Ĥhġ*ż}RíÎ:˜éîZ`Ħ%Kä‚QА˜Œ’Ĥ‰…­£³›;|ı9;ÚZ˜èk*Ɉ Íŝ?ˆÈjY,Úä:è– &Ûó_EÚ(Ò†ÖÂ}q%#VK?·D^~Ÿ 9@ŞË=&‰(ĝ?hàOçE˜S.7ĥL‚l}ŝĝ—LQ5_·œÒFĴħƒ¤{}˜7qLâ7‡ ˟;' cħÑH1{ÜÚȂ2Ŝ]Ċì‡İivİ=œŝÌŽĈ>)hfr ĞF˙šIâBÂıŸ%í;ëU˙ ıJf0aC?á!•×oĦÙTİ0*žtÊêÒtžı‹<›0³š*0ċx(ñˆï–ÙFÀ·*Œ SQ"À ùÚ Áħ²çe@{è8Ï]gĝ r{fžîÀW„çtéZ§ ­Ü5Ġ!€…ï€UÀ>‡_‚HÊG1§îF İDŜ!$d²ÈHú-vCÈĦf" èí]E$›Á|P U'Ŭx†Ĵ(g%aìĠ³GXöÁ'-ÉyˆYÌÔĜ]c"13àFVĜ]Ž·_6EUw˙á&ĝ‹SĴ/T…}“:"³ä1XšóN9‰ĦJ~7Š'ñ៳OÙ0‘ןiû~s£#Ĝ£i s—¸ŸZÙ_ÎX{YĈë›Çĥ€HC”³màYmżÂ]„ĵH…ĥğù:£Ĵ} <ĴÊÔû–ÎU ;ÚqÍïm;­}wĜCĜ„‹/üĈ'‹oĝ)ĦbN§òÀiŭx‰ ˘ ¸öĠĴ)âŝ £ü?ğUQÓËíxùn²[Â4‘x2;6c†ÌƒN,âOÂ6XYxO G%ςO‡l‘ԅ*]-x6ˆLŭŞŜŜ$l˘ĈAèöVÚ¨h29ÄK”²hìvûĴK€ßˀ€ ?B·›‚ġBİ–ş]9!p„òu@wŜÌmäͰŽ;ï8ôŝGyöVœ‘†Ż@‰šBîÈìâY9 ĥÊÀŠà]·|Vĥ-|žëĤŬ˙´ËĦĉ À‹‚ ôŻÎò%&Zu͆Ú5íĉJ…# A\ĥ Vñ˘­]óéwñ'"ÖU!Hèx½+ċ8ğÄNUîèĉ-dğ\ìµÈèaµìÖ0şĈ]€eKƒ<‹˙Ċ5~81c‰Ì‰âĤù ‰ŻÍÈò|ŞĝŠ$&7·˘š2!¨½ħk° ĝ²ËŒNÒXyŻ’™ıW—CLWÊfÓ}âš>Qóùâz"Êİçşîēo]„9|ġEßß?½µk g{k 3SS3 k{gŸÁ[÷G]úŝ{QŭĴ/€ÇìŞĝöäÄ:W="^Şâ°ŝâgâ†Ñ—w“ ĝt–_ͅ04z„Ħġ˵#oĜŒşWÁ)Àĥ„}œâZ?Š„ŒWY‹<ÁïÒçwµ:€£é² ¤} G •ÌÛk´ ›ë´L:KäÇżÒ7@‹çĴŞŝ‚1&Šôı_ۍ{žœŜ&ğ’`B·ÌV³o—ÌÀ}D—uLÂ!_Á$dgšÊÂJ ‹û+u~[Äù†?@‹$½žrQĞVšĊ“UÜç”Mü' |M_ ŭpËÖVkİ/ܝUûĤ¤C”×BĈĞí`Ş)ó>ÂdŸbéF ¨iX‚ĝVsoËK]šîÜBĠ{ÁĞ_GĠy4Ó{TJlg³ ĊĠğ52“nĠ³XA¨$mÑ[&›¨;ÀV½5ډ.ä ½ûñÔp]’¨Ë…"– ċeˆ…˘ĝ | ûsoİÂġ‹|OFŝ&]ŜèŞwşĴĦ[áèw?ĞğˆĴ/Qŝèh¨ü“û=9ñŬ›W/ââ^ĵzó.1ù{îŸÊ†ŽÙê ä‡ğŞ‹>äf˜(²¸ŽëĈËIĦq8^ġ>ÒO—ˆ™ğ÷g Ÿ*¨CĦè…ĵl°Š.¸ˆ’tSǁcà,DwŠn…$ƒQf3ß.˘I†`gÁŒ›03rKuO4ï“:Ú;óH‡şž÷Bşsú’”ümnµ/XŬ(†Q:öTQÏe`Ş~MÈ~Ż zİ£ô ²—;ı_¤Ĵ[[-Ħ”ŝ0Oĝ›(ρċž2>OİşNÙeê5Ŝ׉ŝi_ΈoJŝà¨,™Ğ•ĝÁżB LôKfĵ4ç vHĥ'"q°/mTU!ä1\ ĵßEKï74NoÄ. WH Ñ3ĦOĴċB Ü äÛ°ƒXXZíĊ^açɋû›é×°DEh†E‹ÌÉy°a:Ğ ÷HPàH éœ<ŭ'NCÙ䝠j5Y˙éLÏQiÑìğY´şÉ^…ŭ5xíaUÔ2şĞ8ĴhíûÍc&BX= ~şM|exP \·0Ŭ5w‹'İŞ—ğ`d½ŝjj#Ġ}ğ·ÏßR…@KjZy­?tġÙè~÷Os B,€ Èa³9DGŸï‡ù—gW­÷²Ò$”“"*–ŝûî}ĞcÌĈÔĞë­a¤àĵûeĠ>Q|w.ÜCWA9×x…"û  Ŝ£aê° Nfò~ïÓB´W`=і¨êáZĵfżĊ+ÉdŻïXC¸¨ôў™§úäĠU‚w&ä ´ĉNMv† äbħtCPḉ†ÍW5ĊDìŬıı1ù<öJÌĵqží$~›ĵ› ´œġIš€rżŠ\À6ÒS‡Í ûÓiğ-Âġ{żPa(£`Û‡$ĥKîMo·4¸"eú3LjÍ`bĞĝnëœİ뤌féuĝFp ˆ„5èW›Ş+=ƒ… ŽÚİÖ4h™u—(Ú@enG­fÙÀ|Ñ·PÚËû,múwd)’—Ĥ˘šÊż@sĴّWóĝN’[%ûœ0Ì6Óö íëĵäW.C­>cċdƒ˜™Ŝc2›k°œTE°iäĴbˆ‡ä z›"‡˙b=÷‰ÓĴĉtnŻVA븍YóğNŒUv{Dµ–_Ë%ĝŬİç–Â5MĊ9ìÖ·ÈqNuŝ~weğŸ•şA;Ž_yĝ*1=·¨˘ĤĦ½³ :Û[j*ŠrÓ_=ĵr|Gƒħ ˘J¨[ùmżòî7ħ šhĝv+ŒH›‰.?—J°ÉsŻA7”Ĵħâv ë|·YŸ,ds¨ĠWĦÇ|Ħ!Ž.Ž÷z°ż‡5Óë=‚üqĈް˘(R—ĉ`5›…eŽġÎĐÊħÏVè²JŝKúŜĦá}4Ż05>Çt#íoöùVµ#í?UU%IZ1ò×Tú3~äü·˘óÊ4ġk;ĉBQËFħ¤ÛLĞĦFĠn4U(;ƒĈ²ÜTÛ~1(LìñuÒͤëSs\ùğĊ[‰ ÀħŭeH1ùQżìvƒ ï h}é.ӍIË­—Z ĵfEo'=d{Ê˙­SvÍz‡À/–ú2şG„şrúğĴES§£ĜàVM”KżƒwŞSŭèzdŬp›/şuĴ *nŬ#;ÈĥżÏĠ ïÒı‘À„y#vİXér’Áәŝ“òBëˢµ ݝĠxù6išïöÄkw*<>gĤ“IsÏV Fƒ•à÷ŭ„5N,dħĜ•DÀË ê•Ò—€âĜŬóĠè tÙtġsċħÈi-ĝpë`ğ™š$Ñú£BèKÇÔÖĠkéšġ›wíŬàÀŝ½ğÂ7Ż_³ÔËĠÖTc@'Ñ%ĠÌ܃ŜúPJ,”*?_ŬäĦ@şÚüŬħĊp èH‰òR…·^@4dBû’÷Y‘Áh ´÷Á*ˆbp⨠úì\’t`2´[ƒ$İîŻ'Ĝ_|iÒÛÊñêRŒµExÙz!ù“ŭ3O HËKħT;Äğˆ€„mì„Ì”ÚsÁ/[òŽ‘n(UĥmE}ۆ×!ëGëàÉúNÚ¸d"˜loÄ;›,dżOݤ>ìbsr?îġ vrRû+ïÉ~C\oĊĉpG}!>·@Çk¨§¸ĉŒ 0Ϥ˜h[‰xUqnÊÈEsV“ĴSñĵÈĵoxÉr²ŝ£éÁsÊԕxĠV ‘àBĴċ¸:j÷d„—,IqTpe¤*üŽÖ—„¨ÇòĤM€ GE"ÒñİÒ{Ï”uÜġ²rçtdŜÚè  gveÛ5‘ÏáÏĊù“=Äß<şÑß٠ʁÄU@ф"˘b%*ÂúçZWD¨š9ûo΅QÂóH˜+½BÙa­Ì>Qùr—£,$Qï•Náƒé˘(À›ÓyÇ,¨B—àÂò9X5ŠĴX° E28‹7òÄU?Ŝ‚‹Hl­Â VR•Ï N?Ò'//ÁżÍCäáİÖ¤Ġ œh9™›œ*/deÛÄ LÑ/0Ì ¸§¨NµĵQ1Wü˜™|jÌdR8[Óĝòڙ$QOÌË÷™~OÛ8ûB‰ätéĜÁ&÷"Ï[ı=“Aj ÀžoXÓ •íyMHƒ˘8 ò½ŒrwX1ó0™9C+ùM=Íħ7›ş‹v¸XüĝÜ9C_@ħ›&"jöċ˸Áo4WÎĤĞZ_£xĜK(êaëĤ ´•ÓıÑU N´K 9’‰ß*&ò‹ŽĜnı;Ô>%ŞËŻ\‚8fâ?çd„Ò²ÎŬÉÑkš„3Şċ°uqo0Ú Ñ=ӂ׃@]D%6ç-Bµ½T#O ×ZğŒ@ôK:Gf `ĴŞĜÍsĊĠWËċÀĞU…Š”á‚M0Éİ' AĵÉŝĈ˘ô7÷ÏÚèçî`aĴĞĦ‘@ĝ‚P Š†ħ…ƒğ_à–CçïżI/j쟄{Ì`=̑6-0”˘ BގĠ@NÇ÷+ЍEħı›cĞXĜ@F$0b@^PĈĉĠŜñ„QXʸ ĉ’-UÄ;nĞŒ€XħĵċŒ.b=ÈK[LU8ÜBX£4ŻNŜĠ!”œŒù?ñLGdI%?VM4j²=u/ç^Óù¨Hċ·xœ (¸.ќ†WĦıéÊÚSë  ˘†b ŭê.ıÜQĝs“Éï³gµĝ½(ğAñehÎÜñxŝıiĵGïN™ÙsNS—Î̘x`ĦŠw)eßhˆÁ>¸"›VqĜÈ´§ 7§­ĉN>Bž1-L`|[+úĞS×b8 #°}8žê3p˙Ü+ˆùOäİꗏĝQoâέ½>ŠûĤ}öâ†Ùük"Pq},Ğô˲¤lŜG×KÀŠ<ןĝWÄéVšŞq}ŒyßuMâö]†ÊËÉé@êl) ÷è몰ÔyÄë˙Ë™Ÿ‡6„ġLD0ô>á_ġĦS_áLŜ;Y7Ÿ k•ı"wñ7҆ż§· ÛÙ?TtŠĈ—S˘!´‰óQÄs °­ƒrN#?Ä}Yñ”ĝ ès{*ÔV€64ħԝǎ&öŞÀOĵ!ƒtoLs ÉĵLŻIF[~˜ÂàCRní쌙ï¨PI c?ĥDİ#rL°Bş ˙2ò€'ùğÓÀ¸ë,TJ—.î>’Ż8·µÙ\ŝ¨ Î1 —Ž˘¸ĥß3ûPżÎŝ5À‡èYM´\wSl~ žİH\`Ë* ˙éB|ìŭ˜Ĝ}ԇ ËFtñ]Pŭ›ìOdƒµx-ähÍŻv൧ŒP͙,NŜ>=TÚàgĠƒüMďQ|8ó„› ärĥĈA(YñbĞ27^zìuÀ}ıƒUİ÷­rԑè/Ñ s\ĵ~Ïİ[Ï>|Ë-jlëîƒŻÑáÁîĥĈŞâÜožŬ:µgŭbG˘ˆaiÇU‡î§V r=\ôúĜRcq”ŞìşçE"q[!‹$v"sŭ%=ÂjÛùÜ_ĠۗÇaenVBNĠâWÍ! ƒJdOìá›ú¨K"Ż+BV8Ĵ^Ù™˙ƒ8ıüÄÓĴĦĈş ĦòLˆ²›hUŻîBĚŝN?tßÌo˘t ĠЎF ~4?ä͛[ç*ĉ¸‹§şJ›²şŒ :Kúq —ùğ[-½BpŒ’ŜĦ´ÛÏ():Ês2›9K+Ë%=TÛş YhBĴ†MŬħ0™ÎȋA5 ŠŠ”— ‡ëÄ+*%W N’Ŝ0 e(í=ó’ĵ g „x >NĤp3L‚{êÌdS¸ğ™Ÿ*ßx‘dï.¨3^áéêù¤m#ŬÁ¨G7Z΀uDú‡²àŝsbġŽ×şG\|g#ö˘|2S"˘AÙüÁûĥ$ĊhĜûˆœn·á½Ïŭ$Q½éXÛóU*¨¸óİœ1|$ûŒ§Q şñ üñ=Y7CĴac#aèğ˙q4ú€[•s:lħĥ,€y¸²™ƒ‡ßސ-ğŒ:}ĉÌé¨È£vm Yċçá`ĤĞ<Ğ'3dµm‡ŽIŻR,´ ³ï÷5”€=“uÈMèŸŬ ŞA žg²GħœSÎâ¨ÊŞçmĜxú=TÒïy/Ŝvۉ&âtƒ‚pE’íŭA~v¨H $ŜŽ+Sü~`;ĊĊ÷´òŜY! ~âż N?ˆ°|@œe£ıhp÷È6Òüê鰝Üuû.or$Ĉϙpd7EÖĴÇfÁ›Ġy>.AH÷ ¤v‘_ÎxKC†×ù†tR°Z²²B|.\Z. `µÁȇN™0ÌŬB m hBäA‹µ½G>µIn,ԟ¸&u+-ĊöÓóe—ÁĞ˙qÔÜh ]h€‚ïòÉó :ÈċğHĤ˘ì)Ĵ÷>HĜ4wğÓïᏄíëFÀn2Un-t™Ÿe(z“ŸcNœ_Œ,kd]W~Ìûğ™—E‘=~âyŜ¨É vÏq9âX²^D<Ĵ ŻŬŻHvupÊ QşçĞ/j#*aSĵÒ(K8cŸ-fóŞoû”cŝx Œ\[ĤI!)ıy_?MÔ÷/ZÊRPM—ó …LpGµ§Ä\Ĝäic "%L(‰KCDLBRJZZJRBL„„äް”Ёgž 1)Ċmc`fv&œuÑA)²– Átŭû#îJ$Šĉ²kòŸ™¸ßÛĠ÷#awŝ'ĤĴïH˜‚ò˙DÂîüo$,7ŝâ#a—ŝ ğ,Q"6ŭïHĜÂ˙DÂ46~ûo$Ĵê„>DÂŞŝo‘°Ë M×#/ûŸH˜/DÂ4„Ħ=óŻH˜V"ŝZñŸHXŠà*"a˙„˜„•jéüíħ—ù9 s—ú5`‘0•˙‰„™ŭ+Ö˙ŸHXA9=ŠmîöŸH˜X£ĈŻQKsIwÇ4—@;ĵ5tecp᧓ÖĈ@‰;Ž­ÎéÖ7ü‰‡èAxÒzŠ(8‚ìoċâ1`•”ŒúƒÍôݰcsżéïğ&èIèg nÑöy…,ǚĵ­䠐Nĵ ÄqÎ"İXeUíÊèäC#Ôù#gàŠ.jÇd½_H•Ŝ”ÏzêBH„Ħê]D>0Öúxħ4˘ô²›*8ï! #ÉŬn˧½UÁiûXM6ßŜái·7Ĵ‚Ŝŭ”_;˘4Ŝß-o‰ñħo_ż|áÜÙ³ç.\~ûal|â7荷ġĊ ¸4Öçş{4–4¸OxŜ4 Ğ ‚Ç۝.íáO-\Awò@J{œ/˜ÂÚ_İ"ҋ·bċŬ0ÀN §ş<âço’Ĥ.|ÏbĈÙ£şW8Q£‡“£WÔ¨A•XŞ#q ÌrFĵKñ:BGú·‘­s°ù˘TqSİ/ñäášßy'H~ŭ͎@bşFñ-2ÏĜbp·î‚cxÔĴÄċ ~}ÙŭïƒĉúŬ9ÂëħÀ‰3ĥž| o3ÖuRZ+ô—hŽŬ%ċ–RŻƒ3Ê A>4 ŸšÖ÷màP úµMzZ=|Žè“¤RŽbkĊŝ4Ğğ…ƒ:ŒċĴN…CrÊóñ-è’ö‘0ta-널q*ŝQqtĴo;Ġ-Vâ‹Ĝ§áEKH&/9$Ċv4`™>dĠS]üïKé@SÂÚ/"ş'êŽ6dİ•ŸĈù§ĉR q]/˜Ì>4—F5Ŝx€É?× DÓĵó2aMI'ĤNìn÷³Z ËlùġêÜĉEfÄ } 3×ĦûNߊ}—’]X^ÓĜÚÑŬÛ_½Ŭ­5ċ…Ù)ïboŜşÔĠLƒà ÀÇlÑĉsŻ~µ@Çd²%ë>ĝè„`¸ĝDR$Â~ß Ô§A$èڟIl7ĤÒĉʞÔCv2÷TüÓJ)²Íĵî„.bxĦö}éw~×)U²O&Ö°CLò@'ç iIžfĝ–`o İÛûĈŽ2ô>âİĈB'Xµ Ѱ‘ö%è–ñ7 „Ĵ4> ΐŭë,%Ŝó Ûı‰ ĴÒÓŞìħ‘Ïš'WĜoVÓ¨i9˜ îèK€Š^¨Ôĝ[$ßÁ(¨“ Ĉ7IÔ˙¤\œ™ëÈ;$ڐ‚ÄwJäÌñEÊ)w‡6 \,8ÂĠÙGl÷4%ĞYv%ĥŸ–Ŭ²€sĴq2eMê°e€#ĈcÒnìz4%áüç´ÌŸ &u=Â/°3¤•€²–x…G ÙWOlÂq×Rdpg>—b™ĊkŠœ˜Eĥa‚R°° Ş\MÑy-i%ڏo€)i^µ (qYSïÑĊV³ôġr¨ÉÉ ŝDr¨*˘°:2aÛ .ߎÄnŒUñ PFRô8–¸ñŠWƒ8Ÿé-}tˆyxU1 ¸¤cŠ$š^àƒ ֝¸ƒàn‡DXOüjD54y‚_qÒ•[Oì2VŠÑ½›b}NÜ*ĵĉŬ`ñħ†Ĥ½ĝ:”Ġ•Ä.hI™ ÁÄê'D4ħlKÊŜñvĝÁuoD–vw{óDµ½P4ŝJ€ì+Ig°Â=u& ?™KiÏùá ï9ˆ^v“1vvêMe2àğY ÒžMۏ­”m΢œĉÎ#(€ÙĠ Áf…Ħğ”r„żYi$šRü‹toTcx%Ħö€9Ǟ} ŭ0¨çÈzßÄSħBĈ›ŭ!HOx@%g-c|.ÉsŝN0ĜŜĤĴdĉİkCg`á_tŞù²N_tÏTµ3ùàTŬB4´ ŒäÛ ÑSLxèÇ ŝLĝ éʗF'ïé“ĤípêâoĵŝëĈˆñ•.¨T*£sŻ´â­×mĦk ìĠöGŜ¨îĥäĵ÷f}ġm}ÛĈç6Ĉo·‹£ií_ŭ|ÁPqÜ/=Q”,eäµFRi÷´çŒĥ•¤=żr0dħ“İ–‚ƒ:;÷<•F£Ói.îŸBeH(h™:-9xċyZIÛ(Lw—&ŬĜáe$EFEġĵÄ ĝŭżn›‚ĞÒz{|#—ßöv+( ġ7èĊG’·é˘ŜÚl vÛ뇿2U†šk×ĝK]ïç}[L%úái Iú÷&G/)Ó76LIñúú3O‰Â.(߁6Š.Ĵ›:HvžÚƒúvö|‘Ğ£páÄ×VÏc¤ÜkóNĝ5\=ó‘ħŒŜÒ„ ѧR 6Î~"ë9•nwĥ úá;4ċ°ÌÖ·`i²h9m–Ĥz~šb‰ßŽ³Dl¸]G¤ÄövrbÁ Y|Úöĉi1•§ü"ètî";ƒv×,wŽn[·ÀUCNB·AÌeßÜ=ÈĴ§a ĝYÜıżêÁÍFZ5­†zÍĠŞ6™"Ğù·Ğü‰âoŜe4aÄÈ~ĉ&šÔ£ĵß,Ġúy6 @öañçˆÄ>#Ú$vó]LY—É?ëÄ·€3Ş9ôÑФŞÊ%W ÎÁdqĦ\E£:|wÑ%3 ´µ`ŒdöËS2İï‚lĜ·ÌdĞĞŭàìGW7şPÏñèK<ǵ%ï ~Îaĥ­B½÷öíğ÷Ÿ¤eĉŝݨk뙚EÉMµ–Ĥż¸´{…ƒĥ$| iE;˘ÓêC?TúòàBu*i½ñß7—İ“èsĥïÂ&°tÁĴI>¸m¨Âşñĉ+–$ı?ıcq³ş€ĉcŞ$÷DÎÀEM²÷Ĵv3CŝÔT´$Â85„vƒQSLù1żÔYĠĈ++‚÷.hݤÔ߉:˘çgêĦù\ÎŞ&rtx$R\îÂÄp”ŒĜNîk Ä †Çİŝ?g4Qğ˜1N´)`r²6* ĈP³˙ĵN Q ù4„÷ĵµ›èĵ“9° 89_6Ŭ<ŽĴĈÄ~zàVuŜ|#v¤a}É÷ı)ˆÀ.Ħ¤oċtŭΈó7½x—”š‘ġ3÷ׯܟYİIï^<şy>bçúîVúJ€ ‹(ııŸ\ÑÚ°Ñڔ›UĦ{çw"ħĴq7˜ #²óOÂ*`<ç䅨"Jë ½PqÂUĜ˜Ċa‚WdEg,ĈĠ<Ó)ĝáOU<Ğ'Äâ5·sŸ˜LÔÄ9ñȑá£"jOùċĦݟ9/Ş“ˆżW—éš.Ù2ŽżìI-˜L˘‡@ àµXA›ş Ĝ£ïÎ8é~DoÌĜ˜OŬ"2[`4}Žŝ÷;ß.‰0ĦäfÁ= ŜòyÙŻ´ _'Ûö şdNúoÑËÜEJͅ°W<ƒ>aûHŭî44ì(Up(ĦË/9Ûîż:ÚċóEŜÀÀvÎ7ŭ?“3ùR/ñ§Ùĵ‹Bĉyĵk˘z_,~™]ğ„á=×dìëg^•ˆœĵİF İĈ2 †˜L¸[Ċ2§^;‘TĠâ'u½5 ]qĤ‰,‚ĤMÇ#/q’ÁnpóµżTGEìŽ@d²(:@›JRZ‘w3fÍûHCq”,mşäà£ÌètĈÛŝ|~p"l‰£Ħò?Ú0*C\FI]×ÈÔÂÚÎŜÁÁŜÎÚÂÔHW]IFއ³0eCÇ%a'|ŝÓ6ß`´!óÑÁ%ĤÒdTÜ?ò=˘gš#(‘¨ÚÑE“9b'‚Şh§án’¸×£h2-Ħ9ƒîĤĉ„˘{²Ż=¤Brz=ĊŒµ‚y“ùÀ]U‡ÔnNFHÈ^`öïch>'0ŠKjٗĊüUOô/Ï\è"/Û@â)ŝRÊ f'²aòÂ7´äż™?P­ó·ÛVî×° À;J; ;Kù°Ÿ@ˆo­ha³Ò"îeôí°t=żµÉW)ġżD>·ˆàZ¸A1ˆ‰tĵ@s‹(·GĠVËÔGž Ÿ;äñ5…Bà’ĴĴ„#À0CÛ°Jх™D_Ç÷ÀPğKĞ„=‡‹µ´ŠĈ–Rnà…\ÛğR/ 5ñ/A7tĉ(ĊħŒ}†1'{İ*q• ô"Aĵm['+Z…ħ½óܘ䛋—­+ïä§B+²+Ü A÷û€U!ażwĵBèĜ¨lú6ÉŻ·)a—$Â0¸”yfŬçóAրv§+Z.;p/ıĵgJqŜÚĵÏ1—‡xÂĈGU^J”Ş02iö t†¨”ĵ*ì‰<Â_ŽùœWÛ ‘ÁTOyò½Ë,=­`tŝ3ԃ9— ˆ¤Ŭ„&.żB_µ¤a“9GçRéöç+ù“ß6İ@Ÿİ7ŝÎOXöÚ3_üè ħÖHĦ;+זáı$çœöí •hVç6š^ĵ ÈìĞŞ/ħŒ9Œ3ì2GÊÑh‡^âÓPL\¤.ìjwzˆß ,+ÒÒ*ö&ÔmwĦvnùëèILĊ*èò‚ïû àû—PaÄF†{à!ôpâ^q‚¨/S QğáèÚSo!ġ‰ Ż0Ž?3ŭq†‰$Úéc4sH8¨H^e×/C\óħsÈëıÏô(AU‚ŻŽˆŬG^û!ĈşĵrĞc5HĥİgxJµڈÎá2ÁHÂR¨[FsYż›Q„lOäMbo€ÎFÑ]ûĵaı×ô€Üd|ŭ;ĦoÏ~¸Çۈ˜Ë/ìiû(dÂ!îŬ×\‘÷íCÜŭkgܽ}ËĤ B7mÙûàñ³×îÇ}ĝ–WÑÜGÄÇìÑöR „.0†;EÊÈ{ÏìvBŝŭz°9¨ôċƒ3ìñZl|ӁMĉ°˘˜ŝĊâGB•uiˆ ì°˘5gk^-új†ÄÖJĵdCĉP;ï£âĝUPDÑ{Ĉm\GžóËwE–Ġ³ŻJŞ@~ĈœvlşÖƒ´s˘ÁLĊÏĊÍ+Ĉƒ£üORġ=äŬ +Ù[ nxA âĵJÜè†D@×O²¸YÍe:ıĈ]¤ÜŝƒzT°X'\ †ó{ĊSѸ^Ùp­Œ ıżÉ°Ŝ„yÌa§ŝ.:ʟOb˜fD”ägrS‘x˙:r]ĝh†w›Ğ˙m7S+ísŝŬÄp! ÉTÒ+bEs3Ô@pxžê\7ħġï E|›˜İ–9Ä.Ï4B²!˙†.Ğ|²D½~áKÈşw™·ġP·DîÀ=ÄĉéûËb!ħ5ĉ{?şûSŻ\µ§2\o5&ÒĦ}׀\8&íĥFÉìÇ8͉ Ġ¨¨äÜ KÉŬáT½ıŬká|Vœ3?pßċgɵ]#˙äAĝaû{{şğ{zû˚eC"d¤Ğĥ ùÙċ}óç(‚6†.7ÇkûÍŻĠ `6$_ š+‰RĠF$6s°ŝÌ3‹Pa‹Ŭ0”dĉ—é‚†[ ŞŭUÜr,İ™MHèP§?şe˘Î™z„œjÜèZf‘žR&°9O˘1`êğ´C_İšYû_uóîbè…ÈI/?@ĵ¨Iyçì2cĈè·ùü£BżİçYs<°M!˙ÎEWŝVùÁG¤œrú™óyĵzq)c/ĉŻĜŝHWUJ-_Aîs—HvuÉıŒ}÷œÈ˜?\ dêJ9Âaz‹e8·wΧßÄ_I!f?ş¸s  êï]ƒ,oƒA u.ö\UüütÛ:’e*žJD‚@a›$€e€ÌÑNä¤ôn q¤t Ż‹@Ì´u§QĊ,Nŝ~} Ċ‚=zĝë6}2ĉhĉ>’¸h‘ı[žCLŸYùrŻ› BŒü#^mnîPuڃĞçéJӉ@ˆ²‘ƒo1ïÓrŠŝ6´vġ ABAC½]­ ‹rÒŜÇ܌Úìë`¤LBèÒşóV~V=Ê Ŝ?/#ü ˘âĥ÷%àhĤޞo™+¨è¨,ĝƒdµaġ·Á,°3Xħúûó9ĴŒŠ¨ñé:ĵŭŠ˘Q‡×ŝU"g膤ßxGe` H²%”rÍÁS-IëÚĤϋĞ>Çr­aĜĥYÓÛ„t.GĦħä+ü&}~gğ3#;G{ċ@ÜjT0<_"cÂSüû˜‹\Q—‘aWĦäî}ä ™Te•tAùŜèíe”Ó#xóÌgÎËsHċ·òm\‘RFÊm8YqˆÔf’˘'a‹ó³Qf ĥ‡žÓŞìɉFî‚Ş˘AĠa,Yh-ï|ü_A×àêK[1™%oÖċ,ò ‹"­/7ùM!ÇZ¨2 v,A7t,GVuŽîĤÀ†ë†ha6~•,Fmbċ+ÉzX§ékK  ,ŝŻÜ!Cġ&+ iâЁħ”ĥV†dzêŻ`üKˆ2˘”‹~Ŝġ`óŬpŭš.ğğJşÊž‘_Û8ĜXYÜ öĦĞÎÛp înl ·˙IĵsdŬÂı„0$µŒ­ú­\şeû={÷îÙµ}Kèڕ~ ­µ%„XFHJcîÂuGî$ŝiR0n”—6̃;€Ĉ‚}qec§ík¤§2ԒWß-›†ëèns(oû<ˆ÷&)!Ê!_ĈO™’dÖĤċjµ8m!ĦÀñĤÊì¨Ä˙†K C  d-]ñôë‘ye9öĠ]\‚½7&ÜÑQ˘/asJÙ=Úı YŜ1²]Ò1UğZGĦ›Ĝ)“òñ5¤(안sWƒ™|Öä HîDŻá‡‘+pxÀ[+”<ĉ ÚP˘·{H4ÇSı5‡[#ÓĝvJÎĈ“ѤÌZ‘#\+'ÁV9@Ė"ܝ²HyE”kL°F…Iµ€WdÚÒbêòjlŽ3ıÂó“İĴUtŸ~ôÉPBßHyĊ ¤½ĉ†$äö˜ĉ7ZĴ£šÚ„ìçf¨Şç'ywö­@ĥŒ7.„jC‹ĜïH:&è@5p`ݐf Ż>˜˘˙œÛĥCX.j€c‚ş$ñzNĞ‘$q†ïNBÁUP¨0: ˜û.4Ĉ‚Gxϋ½ÜħJŜiBƒJV\—ßŝéÀZÀ ıÜÀÑi#(¸TÍĈ‚çŜĉ$- Нîá%ı &1Ĵ(9ámÜçú”àz^ŒĤŜ(ê$ùÙŬ£]ˆ_ Ôj6ŽoAVôuz“ŽóĞĞfp÷#›X9Ş:Ċ£~´ÇĜ1Àƒ< ‡p_Ó9Ż(Ŭp¨ŻĤŬk+eüxW8ĤĠœħWȽ) K ĤµH…3ŠyR”Gz4 ğ“‹\UÚ$˜gÛGÜŸŞ.$ês Gî­Ò™A= Ûà˘FÄB&Ĝ#Ӟ@ĥR)ï4W-뵓Íf.Ħ?›m)B]µß8‘Ż%MJ@t½f¨uißT• y˙$À‚ƒşF÷RÍ3€~@ î q…ÓŬğ…•LŒœS ž$#3àÔ½ÔÏ^#*ĵ<•Í|ç&RUG PÙ57ğ޵ñc>úŭ°­Ş´ĝjá8>Qtw­ħ0ö‚ev+ĥ9=zë|mQQ· 8p;ñwËP‚ĝÓCm%ż~p9roXŠĊžnNĥ6ÖÖ6ĥNnž‹W‡íĵüàuòŻżmC ÁĜ#-żo°SAQQíù[£Ó›ۑy-jˆÂĈkïMà…W+Ħ"ĥ‡żâ}7jĦ„á~ĝYÔàh)‘gfĝĵc²S— ‹Éĉ÷\ÒsäÌÈ ’g1 T872qEIxw÷ô ñ€QĤC#œşw´ tЍ“ûÉ.USûH‹Z‡Ö€’½ÄDò5"½ûëˆ26ÑÉ}F_Â̖µë-S5ï,Wħ‚î'ˆ! €Ĉ"ĜĤftŞxCÏ#§^" _Ĥ Z ¸îÙÌlR½Žˆƒ5.§„rc’˘AŠ}ñ€Ġy°ôì5£[ÛqêAJħQ‡ĉ‘ġ@ž´Ûhĥ”ËÔR[›ÌN;ToàXƒÜü}^p ²e˘L3e”SġŜH`çĝAڜŻxÚ\rh;ëš< >ĈÎʊèfÇĦ‹~`5[DĊk˘QÑà|A×ECÄàl ^ÊQġÒT*T5·e0µ7=ÄQ͐˜½œ[‹˜Ŭm<~öµaSo²êü×z8W&^Ŝ4_ıTImûÛÏĊ|-„Ò/œï‰sŜĝPO[cMEɟ‚üĵĵü‚?%5m=€$Nˆ6Ԉ żĈœÛÔ^rÁI½ù›.'âPfŭ×óĞÀI@×AŸ×öqŸxkÎÀd:!D÷¸Y+`flӄr`êèTSġxËYÄb— ?XT4¤Ż ŬRƒŭX„Ċ²ğˆÉžƒÁ˜ü5V{(ynŝuíàxg â]?u˜âP6ŜÚ‰-Hàp<ú-²Fô.÷ñ‚8òÊ0ë™)êžNèÓu³Û·Ñµbxµ„8ò½ħ8ñİŸ:Jµ)âœĤٖħö½Û‡|‚Ÿ˘¨“? ñ‹wâMĉpXJÉ ƒÙüÒ}ŝFjˆ# ş L m‡-á.wÚÚlòòž DĴ—ÀÇ6˜%đI„6à2S{Ju8ĉ@²ÁP#lÊĤF–(4ç1ĥà{€:mbܗ%²†’G‡AéĦ`;,"‘ƒ‚XşïhħĥÚ/hĞDżÈëċ³v!k†›Ü)Ç9żç ]àö~ŞÑYŜ4}FT=NPĵYĝKU^öĊqKÇŞB„ä#ğı ×½:ÀMöĦʄ—B"ÄUXÏ}'ިϋ~ĵŒĵ‹3@ċ]€üâ[ÊìŝĵßPĦ–[b!İÍjL:µÔP Ħ(Ĝy[q0Fĵżħo•³‘˘èlĜWJYoî<ÏeÁağŸ8{éÚÍèÛ·£o^ğtöÄáŬaÁË<çÍĠS–š‹*9ŻÚw}ዘé-y{&ÈF‚ˆ.=•“`fEìK…ÚïT!ĞìÖbyĊ|Ĥà¸,ÇÍx˙ QŞÓŭĴ †F) —Ħú$sêB*œÛ)/R…ğ!ĥ_° (&bàS,ˆS=3Ŭ´ñk„×ßş{AhîoÎqŠ{Ód+_Oŝ Ĝ9×MŭRÓ.ġÇ "‘° Ĝ.xH"tއ Ğw‡żF$ĞÏĜ(ï{-Œĵf…%‚jvT~ƒea3Ĵş _ĤÍĵL¨B’|İ>+R¤é3òĥ ÒÂ>Ú£OÁ0Şĉ#8JËnU\ êÙ7 ĦÎ^“° ĝ4µH"kÄErˆb_Yk(²>œûEÚ˘”ƒž‰ZUŻG˜Ġ´³ĵ_Ĥ—y8LH°|kÙפîr!n›ŽçÌGœżüI†O‰kmÍĵr³ñ„l<Ş öĝ1î÷@ êüĜAAy„˘ukÚÑiÖ' Xüú‡Kq§ÈÌ!œY|{•. •sŜWv[úġ;E*TıÖF½ÌmY.H/Ż Yì8G̐„-%ODT\\BB\ʁtê?˙ĵ’s‡şö2¸àoÉ}µÖŞfTEğë餟ĴŠßç,‡ÒtWŬ.fâC™‘Nâˆâ҇ġ|VÁ kŬFPgW@ "ʃħó݁ßıcàW=ÚH|Ĉ‰mÄ˙ĴĦ—€§†$˙?ĝwgd~žn İpî]İkìZ_B³v^ŜeaÓ_ĵ³4‡jf²~ĵÊ VA7İ‹úë-¤ż—d ”5ĴŻb•vɒX4ġ V7 żµN8¤ïo@ŝĵXħ5›vTà£Nà§£Ú>˙ïz‹|n‰dé/Ċ‘Wí˘Çg }@•áÇhZ“Ä6l‰j˙{€Y›ŒżïèBċĉßbkf‘ËĊ~XhÔ5h›,Ü´­VİRĴî<ŝîÙ×`-öğNskXŽîe•ZA.ü§ħÈ^Ħ e˙xÇÄ£Œ÷@Qòü¸Q ^óĞW“^°dÙÈ^ö+XÇÏ ß4@lŸ3§ßşSä‹°ŜÛ6$ıìéo€ Ôٓ;-:iE£Y˝ÄÚ_…èiĈ›^6pŬ)‘ •‰ ĉŝWc èeÄRSî¨`xÇĠ·yMD9„eYŸbnDíÛ¸dĦ³,ôuµáKWÖvÎ —nŜu#ĉSVèˆjàpSŜÛĞ;›+IÓ/‹ĝĜXĊĞŭDUyadJ·€Ór“1ĴòŞ›Ì=FüéNqgr÷è ŠM³„Ê‘ln÷bEár÷·ÓÌçĥˆÁÍá™xˆ?ĵb÷FʰìFäĠĠü×`ҙ:/İĝ€WĉĴéßOħ)ä]1ŝ İpĞRÖ^tù@Ğí:öBÌşĦϓŝŞ!€h”r€dĥ^m›)àÂM´ê4,~Š-ŸşĴûŬĴĵìż/ÇMĴô_u ĥM˘) }<ĴˆûÎm…(ŽŜ V~‡Ñ Ü&ÌKoâ.šJD6JT˙alÇ7ˆĠËĜùÛ\ÉÂN}“žX8}^ĈúȟJ“p(ĠgÔĈsh‡eŝÑSĝ îèĊ½^ävĦ™uA–ħ`ïÈ;GzCIŽyĜkmĈÁá‘QĊ›Ó={E.Œ_U–âTÈN~şUnO5^sPµ}0 ĝ³K1ŠŞÂ_ù‰“ÍŽƒ4Ĵ0Ò äáû3F͵ßZç²ħ™ú„½Îò€ t ˜×apÎÓË,Àĝ„‡ı^›N¸C…Ĥo<ĦƒžiÀÛې%WĵÊµnŒË)½¨H•€G5~kB¸…"ċ°çM´Ğ{™PU§MWżTÁ—;\Ÿġâü6?[m‡£41yÍ9vŝAağD]¸}ïá'O?ĵ}íBԑŬaAŝvs4ċĊ€BbÈhÛúm;˙"Ğ~ÍPĠ—Ğ›œ`Èò:œPÍÀş7{¤‹„V>§ĉQ€ `"/–r¸eçìé4;¨²ĵ[!IĥıŜ†7œ1DtN4âĠPxr˙03zwbÏ6.uU Ê5ˇtœaĵ b ✇½Ó::Ñ„.ĴšŜOxŭ~òÎÑk`ĥşi²ÔDê-‘/ÖUŭĊ  ÜƒĜ˜…òâ(!Ü Ÿ+‚P ÄXè÷|ż=´ÌNUOŜrZ‡BŒx2퇪Â:°$}ƒà‹ĜGĊZ?"ÛÂ1O½ñ‡° T^‚ ˙j’[*8Ez?fa6úPÜĉŻúÂ&ñ<Ù_n`Ž Rd́>IZ7ġxï$̘ĈÈméùÙF˘×ùżĉҎM5.Aüf.J(=äU/Eċ‚ÌÏ&@ısž‡ĦĵPp)^şAD|s)^%J—ì4ˆÓ.x1"(9¨‹(6˜WvÚ 2w§‹fx5wŭäI×S?Gñ‰Â˵¨$Ċù‡ŜĠMáĴĉ°:Ú[X)äܛ‚ĥÙs\{iÚ³KûÖzÙŞJ‹ŝU —””’’”ŝ!‚D‘V5´óZğïÒ³´ÒöÙd[Á›s!N EJ3 ŸŞ{wh"‰ŞµüFá>úó”Ğ$"ïw·†7St2‹V§Ëx°VFt–F^,€ps{â… ”^ëÒÍâ"àŻ Ÿ^öó9.9{>â]9Gti5ïĦ’ÄĊ™dIÔ1ÚÜ_üë˘FÙü‹ œ G€Âm ˜°;tž&k`³îB"€îX·á_²ó†€âÍü"´–tĦBßû -ĈŜ“N –Ê5ŭ˘À–(:Ḟ'.Ġ/wĞĜQĥÑbâ(vlĈÈRz‚Ĥ6Kl…C`ß8ژÀñ:÷?‡Àğĵ˘Ù–Ä!pĴžLÚêô´á(™2³=?˘/ìk°{ß ıĥÀ!pÏ4/îżm)ûĈ;QRâïwnŞk UkȆqÄ!Pĉx/ûµ%bózfĝŸ1Yï<(²Ûàxdž$ğ!“ÍJ%9°g‚c–eq|˘KĤm|ASOx‡@·}/Ëá8Xü*b™™ìì!wǕ·yCpäO ĥÀ!0öĉİŭaŝž.v–ĤFş:ÚÚ:şFĤ–v.žŝaûOŬŒ…C`Ë 8£°™ĦĈĵ·WvĝÎe͖Eĵ*„C`ùË}nÄ!óD*q|ħшFÖ yM#,á[ëâšÎbgn%Ù܁Cà6YŠÇ;óo ÏĵĥA,_³{Ë‡À8Cò8êS·tM#~CJ=ÀŽñ}Ûßp ċĈ%8NïC`‹+íŝBÌĤĦo!ŭ~Ŭ0“" ‡@m½ş6XÎ!–Ù˘+ ĝïC °B_úĦ[%šSÑ'C*A¸·ÑÌ118ÊA8ĵä'Ĝ×á~Z£Ï Ğ­˘˘abŞYi!l`ğ(öĞÇĜ°³HÖuü›¨ïÔWofĤ´cofñˆ7ŭ1œIWŽ—É͘ò˙cìŻ²è×mxĉHşğğ;T@)LlD ìÄBEQEEħ)$¤;”îΣfŜ}tëZëzîç½ŝ\çç\r:Ç1ó›}˙~·ÍİĞ׍thÜÂ,…\ç‰#ÀÎ!šçb)ôƒ§ĊŻL„ ˆž™ŽR˘Ĵù—­JçëjşH`%Ŝr\5HĜÏED^ĤF üvĜ÷FùıĊ\b>úċèٝ‚p.£íM˜ ™$ĥx[t.ÁŠî)zÊ ċ–Ò[áw4*%·Ĥs”`?À p|°ğ­ĦŞô{ŝ×Ĵϟ>}Îúš˙½´ŞĦ­{püÏ(=7ÚY“›uÔo…ž7Šò€4(üiQA‹ÎŜĥXŒD–ħ {ûàŝ/§ì`ıäè—Qb=ĦCáZ|œħ=L›ŒÂ Ĵ:˘ò0aĈ¨âñĵ2P„ú+§ì8ëËk(JQÓ#gD+xzb]#Ë5Làµ2ÎsBĴʤ|ŝÉ­·Ë‰,n1í xÚ^Äè.£eJjĠ}–"ÙS.ĵ3|Ÿ&–‹•viéôòûûğ é6ôĥƒŻ˘Šoî+ٕ†ŜSvŭ¤ÀW•qDq#ĥܔu@¨3…è ƒYTnàò`RwñÜmx0Ô{M Çüıa² ÓW(à6Ÿ$²h“G‘ُiŜ3Yb&íżċ!dÇ\’Ì'Öq’{Ĵ.ϗ›Ò/0‹Miá@½DàF×LX4şĥ™y_ko×|ĴĠŻû°1z:?tIìüŬ{I]?Ŝƒ›wuĝVÖ ‘Ŭhû+Ö;ú}žUuĠ`ÑŸĥsŞc|€§êsĞŠA-oŽ90†OsĠÑDÀ¨|äf"GPŸy4-Vù‡^ş÷ò·Şĉ΁Q€E1-š G@€Et6W}ûĝòŜP˙UšR#$óə¸EĞ„L(X4]ÑéĜ›¨ŬòX´’OLġ§ŭéF€EÛ\­bÍ?Ş÷WÜvcYh-Xo2Vór{ĵ__„Ş\êep&+\šj„,˙€ĠùQ•bçğör)Üg6ŻEŞIƒCtĠpši1óŬ´|ŝ0Ĵ×úÜIÇYŸd” ĉv ‡YëŻvħĴoÚcv ) ŝÑÀKšw(ìӁı·?Nz=Ĥooó·çĝçrà&()BXĤËħŠH&’ÖÄ}Ž(n’‚´ĜĞ^)_(5|‡§ĈÑĈq˜ònÌX½ğ^/ï£a0ŭƒ z †Nâyd ˆĞ4żÎm‡ÖÚGiċ<Ĉ!tġÀo'ĝT,˘^¨° Ovxƒ{>BRÏUˆM!ži†ş•xÈÄ!B4ì7û½=IéB/û‹']Št=7 Pyĝö ĈÛÑıíôâżcĝÈz‡óg8Í1n∨8}|<ĥLRĦ[c .ùêC‹_Ôh͉ĢYŒ3Ó]žvçÈĤrĝD€Sà"[WŸ-ğ9yîbÄĠk×F\ŝiŸ&‰ßán;>ĝÌCv­ĝïHS’dàN_¤>˘y—íImçdıT£&'ËÓ67p^Ħž•p(lïšı óŭ èŜI²Ğ›;FħĴš=Drú5èƒîŸ/Ҁgì)’/ĵÈĉÏĴ'Gâ—ŬĴ$ş÷\Ûì;._ĉcÒXŜc¸ —·ÈĜÍÇĵo•wDÊWkâòşWj=î#7rŸ”WI½1.ëÏAË7–yˆż-y4Ĵàíh(¤¸ÊtfSC6ZÒ&k3÷öÌA”³ÎBƒĉ2?{ŒeËû-DSJ‹‡×€½0 6jr§a²°né,ÔXÁqp`şŜ†|pŞu²k"„Ş˙ OÓĦîíŸ<-(qmĤ/˜W2bbì’W@=– *Íë)Vċ"Dç6s{fÌOC šÛ9 žÇµQQߔaĵ0˘”üSúñœc‹ùPİ•W@2Uë§ÎŬuWż‘ÀŽĴèÀŞü$”WŜ|u­7Em#óŞAµß?ĵ¸íTÈîÍ>îÎvËĴ–,‚Ÿ%VËìœŬ}6ï9uíŝ‹ßk;˙ƒĉGڊŜÜ ^m.ϋ’ĝUWFguĦÀ/W×é‚1FÏ/ĥ| ”!WVJĦ|‹ċŒ})ŝJ@‰ş×§ĝŠ˘Úà<Mrĉ†pûô|†'7˙;‚ Xħ×A”šĠpI]›ˆä î›ı&!xz²/U' ˙¤O ™èZ‡Ĵj:HĥİŸ>öA([͖êBÜ&‚â:Ô`“à½OZݍEıĴqÏOóÁ!çÙ” Èm<™³‘m+Îo(5ğSĈ•sŒVĜ ° óP~„d´ñb:@füBoLCқyNÎëÛröGÄèi]KĈ%ò‡n9gÖRʘŜH:¸cCÈɳ+ÄÊ;µ4UƒĦĴBÖ¨³^Mşoİ`ëN@DĊ²£ÛŽûJƒò™=ˆĥ œhuFwŒòaŝ"?d]Š–!^u~ ïힿ£@ÛÚÄ~ğħHeuçZ_Œ7£­úÀıgŽÊ…TaŬQf)s0aވĝşq$xK ħ@ŠYµm˙x•ÜŬ YMş’cèÓ2PğÌ´ ġ4–†3·¤Ží†C×?•µüŬ³ĉ§Ç†z;ۛêjkjjëšÛ;{‡ĈĤç˙(C&ZË>%^?´ÁVZ%(·´ħghÜ×v D •= uT‚­³áĉğGž­÷׆rҚ`Ċ6ĈzÀ/ĉ £Ħ”uâˆ*@Âf2 ÄUT7V"‡šßa~XE;ˆŻç?ÒÉJµ@że7m)ܙïŜK‡L²ĴrSüç`ƒŜ3²un$pK>Ȟ™rĜĥÓìş;,!€uŒi‚KûŞUÔê;d+ÀċWŭKSг\lĊl29Ìħé#zc)¤ ,gıîäK KŬéhâEoÇVŝ$Os:’ÖHG,­8ŝ2£ÑäâRê•[ö!žŞúŽ‹|ï{ğ‹7• Ĵœ‡n˜“hE‡Şa˙7aûİL^·™ >§‰À_Á'ĵÄħÈ"TJ-°0“Šc[’ cÌêΑ‡ö­F÷ÎTòÇ1ÜVí0p‰ĊŻA @Z·İÓLÖ°GgS ÖzD†d8ıŜ‹‡Û5e’Y°[pĊÌٜ½Ş¨ˆÛƒl2'êÏÚٌĤ'Ûô¸QËÏfaB“zÊC‹h>ĈkŸ|_”¨Ĵ„ žà€á,°b…•uL,í\<|6ĝùoŬĥmĞżß;Keia‚‹RùÁ0x!! Q#= _c ä!~-Sİ0ošmx~ÀRáÖÛö¤‰Án  “œIĴ›Şş7g–Y PÙŬÌÉWnŻ÷ “‰Ö$™#­XÉ&Á= X;Y)rbâş= ™ĴK^×Àz¤Ä2<B×Á˘WĝLóJdó dj7 vı£ûgË ùàqĵ‹š]Ĉrî<\İcԉ'…ħ…|dÎħœíşçŞw ĝ°#@j”œU2ŬïI×çLÌ@V\Nğ0§í„oŸ¸JoDĈnj?!É]Â{9 ½DùVËğĤF= šc9Aıœxᙚ½ü^Œ‡¤8úŸ‡ÁóEh mÀú9Ĝ·@*ô)ÄWÔ›ò=„O€A)ĵı:µmCWuLì#[•³nBĜ>”µ- Q~=!$¨ë²tX‹g;‘ÔnÍ>5Gt@~ÀĠżqc“Ä`8“ıI Ġ>ZʜË;¨Eâ·ğQdTŬP¨°íY`tM•EŻÓäBEk èáAG5>”$¨aż#â%À~Ħà1;T”şĠÓÖ Bĵ\M˙ü@<ÊĊ+è3[Ï­ĦñéEMP,gjĝeÄ{ AʧĉxaÑhËw/Aı4×E6x4÷,0˘ÄœoT1˜57ìĝIZóĉ˜GµQħM™3°#-Žä|ŝ*£Gé ĉOgÇnİ‘œ²!A—½9Ħ£c“‹ZhY VT(n²Ê­Èû&:VĦÛĈÚá<=_j×˙!ŸiŭÈj0‡?…Lh‹ħĜçıġ° 8À‹°ž!.Lé!‹ż WÓ`8Xá{è€xp‚Gùû˜îîç­ŭFıíPÎ^ádäS-íĈ²³s€HÏ38À.ÈA}â>òj@Á™}‚òıK~&7o+ÓòSdċÂ#R~Ŝ6·Qž0ŭ(ÙğPB]½hñKğ[Í_.zíx­ |ât‹Ç)ËàcS;w’—…%É \žoñA—ài¤ġĴD-²oöњpGwçÚŻÛ#̵ĉ+ğïş.˘ħ¨U)…”ħÇ_݆e ,ĈK7=–7…u§ì„œÊÚŒ~żµV (ŝzë|"j]ıé‰Rި–íĉ{éíÄ1cL vÔg½KŠğuùôѐ};·oÛĥ}gà£§/ߊKz—U\Û18ĥ8ĥ—Ĥß ßlĞġç˙FÏí˜0Íu|ş²NÌZko}a@ÌZHîLéĈĤòŽ™Òe ëÀh² Ży=Î. Q‚z[DB5Ŭë}ìŻk¸„÷Ôá?6p‰ë&ìÑÖħ*_²V"Ğq=É /XŠú´Ì_O²ôè'çjmà Ŭ°œrœQ¤+×ߤv|- ˘_ šµv/^ü^BàŽîb?Ĥĝ1ŸPĥÁ„ĉ(Fz´°Räg‹Œí|r“ħBë3ċÛYaàrBŬĥApÜÖ#žl=‚èà=1Żí ÓĦ@Éw‰o‚µa{ċ ËQû 5ßÂûŭ—‚ċÔKòA¸ú÷™kıÓ§²À•9ğš– ÒpŞÓMcup _ŝDX#r ‹*NÒLż³o )$YÇ Ħ‘.Ġ$Nşž|+Q£VÛ^‘#]ĴÔ¨öñıçV¨Ò™_XŒN—=eÛ-êÂ>x$ÙKĠ:‰°ê+ËĦ†ë˙˘ ^żŻş@/pçӆ9|´äż1t6älöŜ/è†rpùĞËÛVh‚:š!:Ë|‚Îß“û³ P°Ä´ÖÂL43==C ‚˜D+Ĥ„€’mû™ûĉŝù Ÿe:D/„&ŞıbÛċWċ0YZè.¸ż×äáBĈŝwKFñı†§; Ħ™âr]/üĦ´ĵüJ5dÂiĦÂ^É#° ÖEwc>^£í2ì×%ÔêùÜĝmti*Ğëˆïž6VŠşNCKP·JN’*WÈÈP‘žMVşĊŝnJ;ı”óĴ< á'xŸI ȃ°ÒDġ놜p`½ ´Ġ³™`ÊpšNç^ ³š£ĝAòË)K…_ßy·àĦÔ/ŬòŽĴ3”ĴvXóoï)ÄL}µçOvĵ@‘áŝ‡D*NÑV!şûGÖÀ5uté+ĈÀyYŞo)ŝmIç)³c']ñ>£m3Ù Ï1'ùwO„Ö¸'„o/CYĝž*#ŠŬâ^Tß˙CÌÏÒj?ĤÖÂ×îĊ{*WÂĝw£Ĥrm§|ġoµÖ:ÙĊ£Ÿ¸‰ü´y$*×C.<ĞSڍJ/İĉ9(İ[ä‚JÚiU/|.߸xğ˙ÄĊC~èżpñÁ.<u͊Ĉ˙‹ïİS˙o\ü¸Ĵi²\üG%ħ§˙ĈĊ_cVÚŝ.Ŝí.ŜħŝŒ—{˙?qñƒ˙àâ!Nİvüżqñ0jù.Ŝé˙?Ŭü¸x€|ŭŸpñKŒŝââĊ\ĵĜ_\ĵђ˙.0c˙‹ožŝÁĊ;ŭŻô߸ĝ€‹?ö.~˙‰‹÷.Ç?[#6pñn˙ƒ‹·­d^û7.ŝݘÒGÀĊ[5"$”ùċżqñêu=˙‹0Vl³‹ŝ_¸ĝè˙ÄĊÛŭ/7AŻ˙€$w‹rVhD*ĞE(ß$Òü~Û(XY+â Ĵè{Œ•"ċ*Fƒù‚+gß}^×Ìñُ–ÈiĠġ-ç}Î9ŽÌ~#â*Gˆ½ċ³@›?ûĴΓŭŬŒmmD—˙à>tˆGîî|çLŽ^”ĤoİÁóA§{ġŜ‰"u¸ ŻŜ/I²z8ÂŝĥK0Mx˙#g>ŠÉÙJxÀ†À)hî$Ŝ÷.Ȑ •]uġ1’yşw‰(J‘µ=ˆàĊO·eÇt%à>dEs÷Ŭçß˙h˜fb­Q]­uEyY3Ŝ½MM}û.cV^QE]k×_gĈœhŭñ>ŝünwsENjÈġ`Lvp"K°• ˘Kö>%ĈOߒEı ƒŜġá“ıGá hǔʳ&>çGŭx€%w}c<´"IîŻĈ›KQœŜ³ú΁(9ŻÙB—8: ħ¨=ówċx çUxÌùħœĴ†PͳoòĞĈ˘ùµ ˜§(vĖŭşú6€ç<ç]ŜW§%W2jϗ6·†ŝbÁ—ŝfv`ŝ ‘JGıÈJĈ= E{ŠÔV nÄÒż5‹l„Z²§½‚(ҝŒ|¨§GLÊ!Jë1[cf°àïא óÇWËö§ ÷ĤôÍgï"O&uGSÉǰ­ÜÙzZ=ß]çŸY“ ‘Ü!íd?ĦyM爵u.xĊCĥÍ~S‘ŝZ×ŜnWÒĦ9赟cŝ0£›…fÈ:ariĵĈs— >Ì‡JôÀ΅û*Ô 5Ĝ'[Ä anĝŠÉŝr ;cö`ŒñÁ‹‡Ç3  ë…ÉfW[ñŝW’îáÂ9VÍu;T~}B{ĦöŜZe EÙ;òû(ĥú6ÜE™ŽŞŻ {RŬPĈ`Uú­Ĥò@Œ'óHh,^ı9ĝBÌó̂ŸMżûG&gàä÷gË@8ÎLŽô˙núYù<ĉBĉ•‹5$à@ĉ—7ġ8p+½ ä3 }%OÂVŞó"te—·­ Ĝè÷HoâO_{ŻvŬ–°^°ğ^š+<ĴKpMèÇ[ݚ‘…×1&͓‡ÇëcìôžàCîHRş2<—`€Ĝ~Âj6PUî/tҕ2}%ıĝk .Ĵƒ^Èì1ŞÙĉ9à,Ì"ıv÷ş‚éƒ´Ê·ÙmHû•À’Î6#ñœi/ÚöN ܞm”çó‚ß{´ô²ı·bÇÈİ£şĈ“Oğ³ĉúS÷”~ÙĠ¸żd‚_˙ fÛb땐oèÓnÁĤ‰5'H¨#È ;qqŭ¨á߆oç-kġd_Df—Ë4W‹Ż˜yIÙùgċt Ŝ>ΠBB›˜BU)>Ej3ña¨¨mjíäıÎWPá£Ç=´Ë§“µ)X%èp¤òIiZ­ŠHÊma#¤ñStƒB‘²Üómîˉ„’ÜŞ+ßT“ul /茟w£;”ħ5plıĥ˜"àġz@WbdĞĝQĉo°aÁœ+T‚ĉ]M(^‘ŬŒ$}tež³ ħƒİ iCÛüUaÉ{ìRkÒĥÁÔİyêiEóÄÄw¨c¤Ò$ĠŠàú‡BÏwB Œ†Aq9NŭYÔî¤ĵœY!^Ŭ,³|6½Èöm(ŬŽoŻùÁuˆD¤ŝÓ;„‚8Ö&ÌÁî§è7döoG ’ÙÄÎ0ħdá*˘dA„9=gĦ1”ß˙Íĵ…]ŞĤ£™ $ĵÔ!ìù¸ôgġŠÏş·ÙDVĦHÀÌMÑW@³zvħż×ÙĜ?à‡¸4‚4xÜÙ3uNPâúlÏ>nٛ3ƒ'Dĝ÷w°R-dzc·µ‰TĝÌóed݃ĠĝŻËşˆüAn'ğñSÌ/7pĈÓä1χí€ä:e ž[ċ3ĝ× +ċÈdYû£ŻaF7ßñùF! ˘I™Ĵ>ûħvց Ĵĝ”yl‡·™–‚¸ ÷ŝށqè_u(Œ‚h<‚â ZfvŜ;ŽE&~ŞL ,j?Ĉ^mŜXPÜĝÜĉĈ×GíeÉdı•3ċ·<À_dy Àeí=Ċù€ôqNes ż[2 ÎÊ#ş—áĠÈ˞ÏıpíÛc³ ËTVÇ~~‘ƒ37eı÷ġÌ^—<7Ġ³“΂Zô\£ â7żñéí÷AĥC9ÈKŻD5ÌÀġO1jîµĉ{ġg{ée Áçà>3GMUğ y7= mHb.y1_ÖIZQ÷– #œ§)Iéà=5‹láXZpvŠöHZ8’ŒÛì•6ĊżĈÄzNÚÄâ-ûWˆ3+· l(ät]Ò@tt‚pM¤AUœ‘çîdC °Ïĉ‡êQ¸—œ.šċüJ‚R.Ucs|ijóxİÒ!Í·v1َœğûœ4AĝI—sÚ~îѧÊÎq˜î°çÇ{[~|Hy}ġìħ ]Ûü7ûùmößĥ+(äĜÙĞяR>ülé'pьñÎÊOÎmwғgŒĤÓğ9³8£ëë­ÍĈB]ĠëJÄÑëâ7&P}KÒ/ÎlÑé%ܽüYàÚ’ܟpŞ@¤ òğÎ+:ˆĈ.Náí•ÀDċ ùĊ~k,Iԃ?3žÙĉOÑ~…•Ĝ!†),ŭË7Ĥ™ir$öi8eH,ŝTP˙甃†~;ž$G‡m8ÊÁk’i~ÙÁO ħLOÁ’+À„èQz!‡90ÜMƒEŸÎrö!ĤOèñ KÎPQüÇt0Éŝ$äu7¸cA0—C9ġàÎlZ0ÀSĜİ´dòœ+ĦëŬŒ ˙§)gŒ9èĤ‡*.Ë~ ÁPä-@,׎Ġ™ñŜ“DÔ².ÒÍÊgèĈ…ìhaı$,ۘ²gpá*OÙĠ¨y&^ê†$³:‚ĝ„w…iͨÑù—Ö$…“íXĊ òÒĝvñDûd5‘ A5‚ófÙµ×mùQÍÏ;ħÙ²ÌċÖߖP7‡—ŜÛj* >ëÀĜĵyœ9ĝó͕Úû&qKh-ġŜs6&%§˘wl–ñ× 1 ùÙٙ™ÙÙyŭŭß³c½-9)1g÷x/Ġ’€$1MLÛaç•7?™ĝ|W^l 58I„M·Ŝ+ÇçêĥésCCùFÙ,Öù|³Êo{½–=›ĴŠ‰°ê“ڈ̞böHüR²Äž Ĵŭ¤Éúċüh”&Q†ï<,ÌÔÁJ6@ŬJñLsÔ£šŭT`áƒ{(ĈÙX’œp4ğ˜~†QfFżÈúŞ&’„ß5Ğ[ 8Ö·"† ö\ €ˆ FƒÏ\ŸóÔ'~—™w\€ßïlŻ&—(uòàÁ´ìN(Ÿ˘ä:È ŬŻ‘¤~IÌAwá?␓|ż’‘/µôós:+`Üú‰tsÖÈlî&bÁ:|P£Ëä;Šù}!cv•O{;íĦ` ‰â!+ÑŻ“ÜÏayF FÂŻ Dì9ÔjMż†ËJżÁï İ}Äî‰È§&Ž÷Â|“'âĝ“ŻÀsdd줠ĜĊ‰Ñ3˘ü;™ÏÁ¤ž ĊiŞ{6§ôŞ‹Ž1>­áċrĵJî6Àĝ¨dO¤o–Fd7żÂ{_ĝ)Â=÷dŜ8>’sÚN ċÖÛrż|BaïNı)&ğtۍ÷ġ€‰[¨ÎŒ=şÑV‡ÈwĦ4iuS[ż½GÏGŜ{òòmĉçœÜü‚‚üܜϙo_>ıyŝè^?[SuiB6Ï+cğñhlf5Á™ĞcÛRFòŞğŸz°‰òû[ô¸Q1ğÓ9#ĝxŜIx")ú½èĊ‡Ŝl–E¤7§O°+O&n[.`n\ıx×|bŒ=\ZÜnNĥ;T`şYİVˆñsfçA~Ñ3£ĊOŽáQˆg˙tD<›ĉ/&.E^äöQMè>ŝFZ6ğF·nò„˜ï+a£†ž„9üsn×ÉŻ˘VCĊ v˜~Kó‡ôŝ ĥ/q‡ü2@ŭŜ8°boΙÍŜ$}j…m ıóôÚ/Hò/“fµ„³Cbèúµ†ëc‘éÂEÀ… ù5ċ|[ñ}ôìĠĊ“/ÉĦÀx6ç"T8`*TÚN#钑|o˜;ĦĤòËżÙTà9ÄœşğÉ'˜ùš‚qxŞœxž"pƒUfM èkĵÂ-GV–cŻ HĞ+ħLKÄ2ÓÈ't Ëq§JÁ¨ìG€ Dè&ŸÙÓ|¤@ͰÌÙŻûĠQAç;Íp·= òpŻèÊ9Ĵïc¸­JW[‘ĠÂĈ~&v!PÁ0½‰ŭT? ¸ß™Úœg×C7ğ,Öaa $Qè<üB˘R2²rr²2R˘Bü˙ NV0û˙ >XÁ.ĥ˙›lëĴàç˙/Ĵ`6°‚ŝaîü7+8 XÁ‹Îŭ ŜĴàÏ~"ä%À .ŜĴà7˙É 6Áìù+Xƒ²XÁj˙É ŜÔŭoVp!?şR• ì4£`óġOĥëÁ,Êóvġğċ·"ĉñ>’şQ Qʇ>1’ɍN€Ŭ( &w†XŸ=é" Ç-ŬÄ-rè;m)böŠŬş“K.jĥ÷7Ăa`Y€%+óžkvC|;ÇIöMSûP—_@ܘ4†DÈċÖbgSŽ8B.œWÍ9$°kg7ç<:ıÙV›8̑y%ÔÌì}wˆ…­ï÷ŠşĉŽŜÁĦááĦÁŜŽĉşŠï°'Ž8şË×ŜLM pxÔĥŬ|òQNó8Ÿë*ŒqVƒ9˘‘èĦMVÜߢËċÔKß&ĦÁ†`3ÜJ³ö{À3Ġ=VĈš~żIŒdц·ž÷Ċ™6ĵ*HŒş2“ĠHĜÁBüû:X/QgçY#ĊĜ[=ÊĥîİSÄ÷$ۀvd(q[†zÖ ŝ£íäcŒuħWˆuîk[—’Ĥm ™Â‹:›´ċË­DòĈl„ó‡—HWu¨é÷çóŻaŬD˘˜â Áûß&ĉ‹òĠ~§ž!q8w}šÖxCGdX|ËÌÚĦ}‰Èğ_@‹÷”ƒy̌‘¨fbçĴTz³èğ‰GÊô20Ç*˜ċ 9B&Ìuú£€ŬHħŒ^Ëosá÷ŒÈQö+!£şoÈVN"ıMÊĉQĴŞĉOÒ ó8qâħì2[Äğ ց§Ç‡K\ž9+Fœ„ß,AL“ĉ‡ŻŞ Ë_/ŒĊ#jça•eQ£kżÖcXżĊŻY’ëàîĴ'L=ß'ñ‘Żç¤I4ŻËÙ=,ÎHiü~;&³dÓıdBû ‘ úœÄ+ÖÚ(Šü!€@\VEËÀl‰µÍ {û6ÖKÌ ´TdĊ •AQ4°]{àJbN=‚ yòıMKÀÌh·?t„êÉìB#I;˙:‚O~'ìEâĞî4 àˆÍĴ5b£Ż Â`ъ˙şf„ÊĴÄ~WCŒcÇ^/GUÏ'™"KŜo@bgG&/K?-ËÀ&oÄĥŒ+.ÇÉ3¤ŸœŻ²˘›/3áŽÄÒ¤>CçÂ{¤ÎHèû(²“ñ^Ĝüw‹žLñˆÀÇiWH„9 ċ™*€;vÙ4qhÛMÏêUħš‹ù“‘ÑL p °]ĝ×;$ħşĦÖfĴ=âHE6òâ˙a†ħ5gŸdBïNj-áË×6qoĜ#}îQĥš}„^q^żf×Qğİ îĠó)ô5óïxĈ Ró{‘Q feˆk•Mù#{ç~è‚á&IL)ğ+¨ö{$!ġûjDŬ?2¸‡Ş—†çY!Ğ*°7†¨Gže‹˜ĵ`vċŜò˙ı:?°öЍ˜öí'™A.ĵŭĈb˜­Ç÷&ÌK ‘Û˜ÜĦà#‹xàĉ{Ğ|)¸ S9TÜr/è<À“07!2Ê#żdíÑûĞ{ĦçĊlé§gÑgĴv´4ÖR–“…N /77p"„D%ċ”µŒ-W<ŭìS)˜f‰İ·úŭ£k—Èó d!·°' „{-ĊQ˜>^-Á§ËoÁƒˆgш÷$o”CÄĵÖğ‹Ċ7Ú!nFßñ ,ˆf¨â‰vì0ĥÂènÑ#]Ì&ˆm^ĉÁ*V!VyxšuÏàÈ~ŞÑWìħ”Ä#샚à],SI, ,Lş?ĉö"ŝSeZâĴ`"ˆ-´è7 š ǝxßÍŻĦ§ÌŻĉΘ²ŭÑİ۟²ï‹è£Y+ċžÏ°ÓġaÈiÖr­Éğ ĜÇħ6fĉ˙ġÉ@”gÀôIí8È^j8‰RPψûHt_ó­Ż\ °à„ı’5Mò‹Gżz-<§nf%P71R¸]' ¤ Z:—@1(Ù½#§˜ğpY?Ú´|÷YjB°t%áXÎWúñéö5-ŝž´ÀıİŜ½tċ‡Ìĥm4ĊèÙÁSb<ğš°/N$ċĞCŒ76$ù“XÙ.QŠmÒóëQ’˜cğbmıiWê9SYûµÈ<gÏ`/v@$H x͓ĝTEÜ6A;…=ŻgÄDcĉÍŬö„6 ¤OV>Ż%eUŝŝÓ <ÀHo{ŭÏâĵ/ïß½~™üüyòË×ïŜÉ+ŝYßŜ;€€?Ŭ°ß•YI×úXÒ)†Ùï™Ù8ĥˆÊçaN 4ÙW1x²¨5JÚñ˘›ù~Ĉ‚‡Ĵµ?kŠSĊ‚ĈmÛîX3’è–ŻÌ‰$[Šè2Ĵ¤<Éĉ cèŞ2Éé Ö´‹GìÔàl´"m[óĦ2}oïÔ9¨/~MûôqşÁWNĴ°R:ö@H-‹}‰ĥ´it=rp!WQ.ga7Ġ %-RÜ)ŒMÔÖfêó/Ŝ/£‹ċ›j$WÌ%*x­`E½¨7Ôß <ż€riŜp)û @G*òla30i[ƒ;ê3ÂĝÚ҈ÁF|­TÏ[4zĈĜx:y8c9”A ĊwÑ?İwŝq?ùf˜7í‚Lhó9תÉ)½Ĥž|Ï9áèĉéu‰4ÎY²S'ˆC÷ÍT[P!bA nuGĵÛgNñ¨$C;ġĴeƒc{‘Ùç'ž€ê<˜‹TEmß1oh!F1Ì,_>.××Ó Y›áAzħzĉ)Œj̝a7D9 !2kâšXÌĈĝMêT²‚{ ċżŜŸugh}>,„Qk¤îcLèš%J e”Šİú ÓQOҖֵġ ŽMÍÛ ??° šìiĞ+ŭšö$êtzSBETZ²&4ĉcŬ Ĉ@…6„ò9·šûÙ÷@ʋpW SĠ7Ċ72YMqkd!ǨöLîAMTĜóÙÖxŽ/›³Ĥ_ğrñùf1‡cŒ­ƒÌwĥ¨jäĜÌÔ!˙éC†wĦŽíàPa×z˘OVá95Ó·N‡Ġ “T‹ê™} ít"Ÿċ¤I¨—LoF9Ïù–ö4éILâz3Ùûœ]0£ŬL{5ë(ò£SŬhĝ#}JÉÒ4ŸyˆÄMÏD£o{¤Ö≁_Z_Cß_£†ÔPoJûcĥ §¸Ş ÈWç 3.’żü’ô"’] 6.‰dy•u(›|áña>"ïÁ˘  ˜°$U…2Úu+^z‰ì5Zg̟€ßċ1ОŜx÷ġ\kï:ijc*”f˜…§¨‚6l ˆĤžÄYM2zİÛHUş==!G]SL ‚ĝ·Và Á’”/gfSa~âġb•0:VëŠˆ¸÷“.|ì~=*Ug×Ğ.Î|í?-x#· { Ó] /ŞÔ˘ g“ôÌA7ĴçgfÜİmĉŞâĵ& @òê‹—;yĝlܲmçîÀÀŬ;·mÙèá´|ħş<‚SݏŞıëĥSq™?{ 6×ó#ùì†EDÎDmċQ0ßò:ÌĤZ~jç9]ŻvéPİzû?Žá]OĵĦnışìċÇ´`‘Öw^0ÑJyı‚"܀Wlċ' AĊk¨r£Ó·•¨ë8oŒHĞkĜIê´ †İĤàY†´İOd]/dŞ×ôöy#ğ§ĞÍxîâ üĈu£^äKĉ]1P§-S8ħŠ+ aÑP ŒÂö1}xŒ˜)w” y°"ĦĠe£Eäzĵ$}!_d,6œğJ.¨ĉ:µ``‹ùKŜ˘Ö ½ñ¤˘"Rì°ô&|•ÊD úĦCd3 ƒ› hĦ˜H}ıÀì)yÚJցݍŒ2ż‚mĜ.ĝ¸ŽËĞWŽ¸Ò˘ñhš}Ïoc“"’‰@˚ÙCäe5sà@ͅU€È-fÍJÄı’ ÌµŻwĉşïÁîùdÏbü‡/UĉLóIñl§`#÷ꏌÉç4Ġo§™…{…ŬÙ0c? & DÍrZ}ċQÓà´ÎBcân°FéżñFAµŻÏĝB9œJ½G@ŝ;‡á̉îšĵ×÷.oñ°5ÓQ5ÏZüße^<„„¤•tÌl=ĥ_¸÷:ŻĤ{‚‰ğ ÏqGĦfÌ qCŸ3Żk'` ôġĈz]pF™ìNl\àô¤›ò òñ­0Ÿ:iĞ3°³ÈŜ żòŜBĉôÛĠ|4‡ç“ŒĞıù6pşÎ*’ìŜ1ûÎÈP}àĊždóŬyÏôîRN`W:#+k˜·D` ĈŬıšeäC³5– ÌQO!Lż{ì˙üUğŽTŞËğÂġßҰȰFĈ•ú –•-òVÓɤĜòz,”V ƒàÍ"˜ •Uĝ&éáXâşÇ÷"‹9[GoS~“#§Ôœ"ԒMĵ`İ=ŝ‰™³P̀†ávì8Y@.üÈ"pˆ #â¨0‰t)WQ*™ôċ0Ĉ Cj‘§&š„?Qù§@Ġ _H[IôËe=]AsOgWӆñúóĵëħ—8"µĉQg욳Âż(p­ĴÎ÷'œä(ß~'żĥAżŜ vĠ#(ŞV!‘/òêz&‰°/cz¤ğŞ(çŭë¤ĝ{Ñ7Ż_½rċêġ›Ñ÷â“^żÏ)Şjé!Fc²§.ïEdˆŻ•*ÁšÓs ÷ġì‚zòïl‡~8EÎéÄûN`jƒñ#Î×Êĉ8mÖH!â^ğç°¸>VÁOq磭x:ĈÊġ¤­L[`Šh y bhnĝ‚4ŬżóÎqüŽ^‚şW³yŽOôl%YqbĊ¤ñ U8~TyŒ'‰Şċ1 k3 Θ'p³ġ™,QR)q‚Y<Î>‰p`Äìµ§?œ×u²ıĥCŻ7cPŬb.y2mıp™œŬ0gµİHrñÊíQ魜ĊˆĜ>ĤÑ ÌOfä)§ž'”½\{*}ÓŻàDJúU–‚t$’ċ.RÑ*o1‘NßÌ ĦÓ3°> Ÿ‚èf ŻŬ`”ƒ#ì4q͒Ù]Ȗ‰z àƒdĞ ĈbĠù#YĊ‹ÉA#ŭÛI‹ ï E ì,*tjtü‚8߁Nĉ3Cdù{vG¨(Ñ k>$Iq|;?™¸œ,ĥµ‰Ž\܎°ĉë4.ĞĞuì™ü#àÔۗ1ˆOœ·u¨Ñ§ġ° *°‹¨î‹›ošÙ4ÉáLµċ>:ħa)qŒ#q‰İ.rÙré~ʗâÚö„ ˙ùJ ékŻ-ŝ’r˙RÈf—EŞbÄ!@Xm醏rÛĤ8œÉĤÌĞ›ÍĊ ÁP šĞş páâöç &ÁŒ}z`Ž<’?ğjĊE³¸ŜŒ ‚@=XĞfŽ7…ı84Î"g ĥ}›½™ž>a!ßZ!âπ¸ı*ŭDĊËIĦ˙ 3½œÊSŸCş7"‡­0bîCògżÂ`Ŝ-ҙŠ$Œ(şb@6@ŝŸµÂk8gÑ$x´BßĤÌ{àûlQHŽß$0Ŝ Xu7‰f0";ç ”3Ù§áĜïN5ڐÍ4Ĝ£;†ûĥ ĥĴ;⢑ ‚7˙ŒçA"/pD^°~LdO– 2£íTy×ĉ°†ï/!IìüĈáĈGµĵَ$zBéf÷‡qĴó)Äoĝ­NĉŽácùa  ĥĉzŜÄ4òoo]$AFù5\Ĉf·LÀ:h¨>;áÂn÷Ċjâ<Áp IĞè-²Yé½iûžƒ‡8uúôİÇܳ}“÷J›Ez*ÒBÀ‰D(<âj‹Ŭw_HÈ‚UDKvìA ~”,ħhëí|ˆž ä]_£ƒç‹ùK䞴⇐ÒÓNlün¨/y&àí7-İ|n/ĈYßvJ–ÜfċĴċ .Ö~ÔHıXÓ˜~ŭf½0B |HÇäáŸÍœ°)*~‡Ua‹néŜÚ7Ì"Û4N˘Ŝŭp<ÍΔV.˜Û‰dfˆ5w[ ĵcnÂġ? ͐ĊŒ€sÀ³9hñµÂ3 =ËY#\û“߀•<0WĊ‘$µSd7ÇĈ&ûš‡Œ_#"/;Áo¨EĊVÉġĤ“.1íċğrèAĝÚÇ!=í<>of<˙Ï —N~>Ç`½iŝ£„SĴĝžagIkĈjù÷ùŒÊnêò ŠmżĈ÷ì‚y]Pŝ)Vh‰o›ż EĜñ‘˘çĈ&Èċà/¨Ĉíñ™ÇĤˆnô#}%Mx[§˙`a6ħ^Ġ /ß~ ’€ŭM´˙ĵòp‘çáŻòG”Ż:µĜûrÁĴoqûíUxÁİċ¸ûêĞ‚÷Ξéî[ܐ-îËM4ċ!›ŸżAż?p€ŭ ¸ĵĤÉr÷-!—â€*×OL&—ĵşşÛQ ĵ‘ĵ*öûġħ üxŻ…(JW÷úˆ üó0q¸öĴö7íH° b•‡ë˘˘ë3Ĥa™  À;ŭœ˘m´•錑h]ÄôñÌĝm Ôá Q–ı21vNTàĝÈ8TĤoη­G- ħ§ò‚×™€UÙ3ŝËê•ż\€ğ[nÄwÌo\;ĥ†t{Ĉg1,‘ó› “q ˙eĝ_‚Żf(ÇôĉËëÓÖúHۃÑsşä홗Hé½rĞ0|ƒžxÜ/Ĝù’^GVà>*3¸ê²‡²>ÄÂĝz—@G[Fßhâ9” }J~)-{Ê[;Ž\ĊCápN‚÷İkĤó¤µk‡œiw;tÛÎnÊEN†ĴlĊĞ|n ]Ŭ%Rî_ ¨Gƒ¨Ú`FħCìáħÚìĵl5Y%zjşRö™Ĵžs ¤ċ/f§—‘%÷–a}wC9ĝìBŝ^ED6àŭ§9Ê^páŻú‰ĵ“èµ;5˜Àçû-Ċ ²"äiÈÇëÓŻn[ qn)çíg~(ïy$Ĉœénüöċ]ò£˜[W/ž=u"<üÄݳŻŜŠy”üîË·ÊĉŽrĉF:Ê?<<ğŬÙ 1$~ċċÛĤ׏ƒ”²âiÈ è†ˆYî‡B*6˜uÚN'ó&W °jĉLĵE÷ĉ/Ì~„zâ;}XÙ^Iò²ÄİÙËI çzX™ö„›˜ŠV!Ż.s@˘"{Ä\:ÚÔ Á‘ƒTƒ/D“â.Ö¸²£Ç…ÉÊfp.Rş;méMsŞĠ–Λ^C½ç?ı#ĦĝU¸œ@nÍġŭĜ"_%Pê}>a¤Ĵ'à~LßEßqÙá‡hVׅ|"xd^kî­ UïôSŒĊĈ ŭ)ċ‰Ĥċü–uçȽÉúZI[ˆî†'À9ĝŝ_SÀE8 à$*¸TYİ’êgħBU~6Ż0ĉıe(ˆ=Áß(ˆÜäÒÂ&o@—•p•ıC‡Ħ'15t\HèX˙Ü-’sÖ¸G€o[%^†ċȎû‹P™ŭċĜÀ}k*żÇË1NÍYc2ŸŭVh^ìÓ&s™É‡?yğ Ekàġ Fù ‚z÷]âĵĈ­˙p'ĜËL1>ŬeŜğOŜJ|_XĠÚ3< ~"ò??€ dÌM÷´VOĵur·÷2]> 0bäÍĵ‚ï|¨e'Ëğğ­AI"hèUƒ‡ĥ×ÁÇ[û<Œe1"kïƒŜJë{>²ñÙÎĜK~ŞġŭĴ|ż şèŝóxħĞñÊm|{ħg’ÖƒıŝcBBLJĤ"ÄĦ.9Ê­œÈ)YùùÉ0ša.瞈Âü‰˜Bv›Ç¸ ü~UB/àVğvŞXIı`Á œpŝğրëp8πŬ ´•Ĵ­—tX¸ĈÙß9´˜§Tçh‡/fœ˘W|‡²7Jkŝˆ ’…ĵjƒnì·Š÷DžÈzb|5 Ü TûŜI)“‹”—­ZxL:È%¸ÑŠŠ³ĝxHáµéŻ×•ĝ´°Ù·#/ŸĊ<@£7P×À {ziïÏk‘"’w™ġ^¨ĠWü‹âiH[Ä<•Ŭ"íƒmœ/.dıSœ\_n~żÎ@´)*ôƒ=’èÄM_Ó Ġ•‚$ÍÙS½ó–F„lΛPïé(—Ö†ÛE#³+ëú&c¨Üâó; <×Wġ!îäVg%BBċ•Ó0^êèı~ëžà#'Μğpñâ…sgN Ŝ³u½§Rc 9Q˘LâQ2qŜz2îCU Ĥ;òÜACA1Ŝt=Ğ‹‰ŬŜ Ċ…JĴ8 ¸âÉoçm„io+Ne$ „ħugÌr:·SâûG j=À)çöÍċtž’#ğ|á´ez;Ġœ¸ 9!_ŻV¨W=ó¤H$£Ĉĝúc{IËŞbUò`·ocfÉËç,ìCö,|’­ï·áMw€+pŝ#.Ħ 8‡$=^X%Tò[yÑd 6üĴmĜ>îÒ@ÌSvà9ò²W|+ìĦÖö .˙D½á=òêL‚-ÎÒJ*ıC°;ÒĤBwm?,>èê|ċġĵQD‚#à pžáxió|Ĥ°YÇo ŝd‚]2^mƒX^Ÿwŭ=ĵñêŬM²İd\PH –6vÌ]:;1t”Žż—¸ĥ5bŸV šw'Ĥŭğ£ ïœİ";K8}·ÍH’;ò™SikEHz'+X3ÙAàf†qküÊ ¨ä:\,gÍ5Ş4TÌjߓJyôÜÛk§ÌƒEtœ÷ÜH-í"ĵÓ}ġoî?à·ÊÚH]N J@Ħ0˙QĤ@]HLNŬÈz•߁ó÷ŜÔ÷M‘ÒÔ{œuDȏ²ŬŜ{½ %İ|²ÏJ İı ĥŠñ‹PT^q|žCJ°[eϰ*Nê‘DÖĤM1ówH’Ìn÷qJvŠPß-ŒŜĠGŒ§'îj˘+>aÛ¸¤.Ë˙ÑĦ‰³Bâ×ç:6’–`I W•6¤Ŭ£^Ȗáß˙i ĦÀ'†Ġ‰'™ßâw‡™pĉüfRœŭ?€`p¤ö|xż×ë3,ñöÓ²şì˜WHïxwe íì‚ħ%̂½BŜ7#&;ÜWyú½&}0"·†hBvpĈÈjO' Ç_ÀQrÑoe³ÑL.?€šĝŝ_‚Ó`(G]=•/£U3ì‰ğt˜S,³L9™wX4Ÿn+‚Gï+'FDꆰ %ĈkĴxƒŝ²ħÉĈË×P”£&'Aĥ"ƒĠ{^‰d<;ŭl9Y"°”Ó³„,ĵ!s†Q¤„HoIÇZ˘áŻ1=ĝdŝikADÄĉä—lĦùĊÁâ$è…L(uDCĈġí6ŞÀù•ÔsÜz:î}iû0lÀ < wù˘ìô ÷˘]:wúä‰'OŸğt-ê^‹ôì"x>Œ†aöÜp{éû¸Ó[ġ$7, j³ŭzFȃËB3„$ôà‹ĉlàËIDútŝ$ŜCb£[°ñô-҈RPc&sƒ0yIL?§4P‚ĵüÙôl²5Ié|/+ch&'£”)kÊñldé"&ż˘ƒwä}C° 2„2µĴH çĞOĞ@—/{'#—Iĝ`ÒFż s —á-™üİĠÔ8xŸï~\âxv›íǕ9jĤüğßgц†OĦ`j³à4˘ûğFnäš]Cż4­ì‹Û!Â! ='|½üĜR^ 7Hôfn£i½²œpʧEˆŜfĞn’ħžJmù ĝ\‚?ê îIOuıœàĵĠ)RìöİjsëœO*° ÁÛìoĤ”ƒ=[H…Ĝ uú~èA ‹œ‡H (CôàÛÀi= ÌbŬ°4ÏXdE’Ĝ]Ì~ĉµ•‰äĴÁw³‘3•˘O†|„ï×Ó×Ïoĥ—8–Än5A)’[o~nĈ8ÍYqG|-” _„¤Ĥ…ÛÖ#×^Öĝ{`|zžù_‡80ç§Ç~7–Püڑ­nš’„1BPÙÂ÷H\Vó›nŭ|sĞ…$1Û[2Jœ<÷šñGôW8Ž| 1¤QġCrĤ87Á)vl8q%8ï sŠwKĴ/d@Ôáè‘ŭx„´rৗ€%ŽŽŸ†Ĉì~şú Ĵ‚´gü Ċôûĥ Jö@Xċç:—yġÔv`Ċ–눁ßLœúÏaG˜íA#àĞx>ą‰äl£NYË4U‹­^ôŬyĊžO”pŽĞloz{F´pÜ5y¤;còëq'=Fˆ0RĝïbÈŜ?ĊQg(†”ŝ)†A1Ä Š!Of—É6W‰Ż˜}IÙġŻbÈ9ĝCb·˙§ò@ÛfżĞIĤĠŝ@ŝCNC1dëàÀVtYQ ıFCL?à…ĥÈòüûJÔş‡…‰bHÁjšÄa˘ÂϽŠ!Ħ²-Š!žP ıŜ‚ >÷ŭŻbÈqÂÚûíŠĞ,Cĵˆb{à[Ì ) °‚î|ŝ§’xyŻ—Ĥäc—€¤’Q Yïżk_HĜħ'……ìÛċżž(†è(I €^|’š–^{/'ŝS ù|‡(† )‹1ßĜD1Ä Š!²W 2’}üżŠ!˘ŜÏħ–ëP ñ„bHŝ6(†<„bÈn~˘rX‚ĥş€(†m²şò;ž³ħ-Ä?˜ĊkD1¤lşu`p+CĤŝĞ2ĝËn­é’jßĦrˆġîŠ!·!|Žf‹!ğ(/gWˆW5Ë.uèEĥCĝŝCJĦâĴ2ú§²÷ßĊBd0†…>Zĵ£ĉôMRv3˘×ŠwdÇĈC˘˜7#ż™0WŝU*ì2˙ŒŸ€Ç€ï˙~È${MċKëÔAH51+Z­¸£ñ$aÍĉ ÊòĈİ ’}ÌaŞÙ7dbÙöˆ{+Nž'tpò˘˜À‘ŝÙ;Ê`&ó (\\HYŠŞFôħsրa)‡5úx9UúĠ 9 $tŒâcnË e Ĵ†Oi„ßâ(PúĉjèhŜÈ˙\ÎèÏgÇÜu@ĈĞ´Ôït\Ahĝ.ŒŝŞĝò,êäžġ.V†êrâ‚!˙ê„ۄŠËİZıĴßs2êٗŠ_ šÀ\SnÂiżJ Òq?öì'dŬñ7‚ċƒ~ÀC°–|Í'÷€UÇ~¸˘ĈŻĜÍ;¸%N,$š fO ¤‡ĈŒ02=@šĊ˜Hvĉâqy6Âİ>Orҋ÷&ùÂÀŬ^3{Ħꖛ4Êc”ÒÎäô~:çŞDC…MĥÜÌîœ'V·ñažF$ (-öÚw%1ĞŞsŒ`„°fÇúÚëÊò?§½LŒżw'êfdäͨ;÷â_Ĥ}Î/Ğkï#ŽœùħÎŞĴÄ+ûĵ+A;”.iä)3ĝ?îÌıĊD)ıžûÔËaĥ§™ Òn·ŞĜÍ÷<`M᛿äw!’ŝİjÎÈ3.çä F¨C=2#1†ˆ€°žš!&‰ §%¸w4³_£?°÷Ĥ¨w=ëßééN?Ôş”u[X(‡+BäħÖ7ÇCíÂX9ĵn˜.”Sݲ}êàÛp Že‘pŭVA9PÓĉ@ĝAê§>€=F!ÏGĠ—1ÏRòĝ÷q쵀UöK˜#M?l™kżp×KäM§È6ÌC~0y8Ħ³ŝµ¤ ‚3vÛ̓ۧ­Û_ àŸ€]°<ˆŬB÷°Q}fżˆšvt˜ ½…DÈÎù|EÙÏÌ£$ŝNgRĜ|İ ×%V‘1 NêэŬ‡éZïlsÒúĈ=yî}Ŭsw•h›ë9™ÖˆqÒÂàEy²Ë'vÏetÉ£‰… w.5™s³‚”Ċ‘í`vĉ'ë+šgVF,çC•6%vpĉĞîx+’İjQÀl›o~}ÔQ†iÁµl€€„ŭ|{cï*p>ÌW >ıŽˆKù\TŬÒq€?Î(‚ ġPstƒ]-ĠEŸSâ"€j\êaVÖhĠŜo˜°2¸µhñ!4Ç£Ż›[ċĞF%+zßİšçt$nRBù–GT2狎é“ùÁvŜı˜"è›1;—ı†Ë=caâÑTùBû“ YŝâàB’1bÉİßLSş;×½[ŝ£e=É<§E?<ѽuĴ´Žqë—Ié|Éı³ßƒt”ùYV1~'äAŜ ™wt˜Š~™ġĦ>bïAoASï6g\w‚~]í\žÍ°ĈʟÁD|•—èLġa u6&J?(}NÛİŠ 8ĊvàCÉĞ…IçêħŝûÖĦ YŒñ'Ödİàzĵ(Xkżc A­œ +dq§n=E3‘Ġ°–p*ŭqK×Ô1.­÷xŠ‚P4'Oç*;WKà>ŝBL5—†úŒÖ „÷˙ıl ŭĉĊ"_fĵè q<†ÎĉËùUyƒFŞż@ä Ĉ§öB½ĝfÁşPŽ‹ì‚^w‹mĊ€ x–ğ .}=";q…^˙yŜ +˘Zp üÒ*ìû 7}ç:€oĝY'êîèX†ĞHY‡ŞÑ@ Ël*—ÏÂKàÓĤóŻù!ŻYİgóŸù°£$ŻA¨2ŸdêŬĉ|Öàd™S‚ÇĦ“àŜccCĦ<²w‘Í-}mrì’4}s5^àA‘jĤ3E ´ 5¤H–†ÙßwKĦúça³ê€ˆ3?Ħ}Ê&GNàŭiûŒ¸Q™UW GñÙşÄ 1DĜì/`á3í9ħÁĞô @¨˙ÜvôoËÀÔżCŬmu?‹ó³?½O{ ?iï?eç˙Ĵkëú bj àÂçvıtDèŻ ŽÍiŸÁY%ñûm!f”X7‹^Y%ƒríKëÇ'1ÒÊ ü<€Ĝa7ž×Gvg?°$I¨†Ċ9ırĵz3]úÒĜä5in§ß‘ċ ;ĈА@̂7uS̋Ĝ‘üŸ9·ùt ™'Ħr?èE: ¸p•ï3ŝ {Ĝ:Mù#+ĝӁÛürÁ‡+uÖE°`ÀHµ£Lĕ öhÑşŸ›\ߛ„ŭpáÖ/pÔZ‹ßîçÈÇzú• YÄÒò/)´¨”1ĞcóoR¨\O&ĴŬĊ˙’B.8‹–w¨ô-E&ïŞ™÷˙C ÍRèU‚ÚÓħHĦ€:DB½a•ĠEBGÉĊ …^#Hx‹˙!…~ŝK ġç’<ĠÇHR(c^)ôĜ˙Bkk)4HĦA*ˆĝ‚úHĦĵ–§`ö2ĝé)Ô<èy#A ßcġ—zú/)t 2-ê ×""ĝ)4ôâŬ¤´ìâÊĈö˙&…vµ7Vg§%Ŭ½ú¤PE^£Ò*ĦHBO˙%…Zí‰'HĦÏƒ€¤O@ Í?eÉ ¤7)t8˘¤< …jŻŭRè1 …g BS}§$ıü˙’B?˙C ]LPŻ)”SlA%HĦ]°Pġ&HĦA³@ }¤=)ô*Bó˙‡ú~fäÂWï7Pí(u^xĝ—Ú¤ÓäÌı“Bmtf#¨EIĦ––È„Ĵ? ñ‚ Iî“ÜûÀNx\,Xh=Go0ìe;AÏüúXħeżĠu{JĊ­Gż َċ£¸RY½ĤꂑŭÌLq­òI?¨‹&÷ĝu Yè&ĊÌ^͏x’œ FíJĊŭP>ìçD„NŽL]“áÚÚÈù`‹êĈMOÜ<Àәé'¨üħ&ĵñ¸2˘îèÛ°X_ù¤ï¸ [àœ)Ĵ‘/8z,a0Y|S‘B–u:™Ŝħ€OÖĵ8êŞïëb†ĞĈçĥB"€9Ìçè‹5¤ĤmħğNĜnìK=àhƒ`ŝ²ÈÛ&KSÊkp•É›äüZ€ÉÚЍ=AM€fätƒ!Yŝ•kŽ&Ì;‹üèÒÖüU%c6P"ı¨ŻB^·Hv’”(8zŽVë‰ĵáœ#ݵ9œżġ샊]ÎW}“³Íîˆ[Âuñ[ ­H†ïHñNc]ĉòÈÌ}mtù;VßE’MÊq\– ­B„*TĚLġe@8Ħb9¤CĉYzħlž 1\9x ܟÚÉfĥ½ ĥ%^ÍNĤ6ÍÎ/ˆnêĜVXâ{$ĉ}E×$XĈğë½Oĵ}>tçzwûfÚjÊ r²#§ ĴĤm`ĥÔŜ}ŭÎó·ßĞëÛ4k²Ğâ}Ìß%`& ¨{.„3Mİ'‰WNQĞàWmLvgê~x”ƒ2{ìâR²Î!BmĞŸÔIVQ,Ş|ˆZàÁËÍ\Š IñBëŬrTûŝÌÈez@#ç1ĦNxgHÚşpK\äúB£âŜ<{’K˙+çÊì|—ádħĴ$ŸĵÑĞġ„s`6ñ’F-şò}‹$KÌdŞ~ijwŭqžO@ħm\_ĦĠŭà{ ˙™x„<S³‡|m>ùĉ¤ŠFyêWÓ[„Zŭ Pü‡™Ĥ–€hĝJ6gl6‰Ĥ(Û‡'qÊL£˙ }'~}ĵà*ô½Ï@ııIY÷w­’vkЎ|Ù Ŝ—ĴŭH(û• i3Ì("°—"šEó!¨[w˙xüö@üúvbWĊĵ!$ŭ€]üä€ g[;#^ˆ Ë“çŒÓÒTŻ<ÈËH“Rç'–’%÷”rî/xż™`—zU˜²÷żĜ ‹;ŬĴf°ZmRPT×ŬlûTeüNsX/ŠûâÀùîïOOĴ[˘ÀG Ġ,Wï=˙ í[}÷ŸA?ċÀİħĦ_m-ÍMMÍ-mżşú†àĊ +bb]]˙-íÁù½Ğ-ĠÌ$ŸÂ’u'ž~ïžmà÷¸ ċ°1ß_ hùŬuÄo°éQ ‹Q}ÓI‘Ŭ˘Ÿ°Ü Úĥœ=ñĈ[€²ôŝ§t$yiÂÄ|ŞY2Oy^TéÓ}Œç&Ȳ ˘¨Ċ3Ú·F‰óÓq(g?şÁĴ²°NżâñžkúğŬù"M‘— [9l*ĝŠŠìg½ä]1\&ŻÓÚŞ­Tû[WıİYÙ ïğëÂcô8“ŝ_l*nĥÊİh䜙ñÜ5ò×À€Xš2óĉS5 Š!˙ĝN;&ż_'Ŭ˙y2ŞjÏ>IÉŝíó¤”q#Á´]@‡xÈñıl5üYġżôä+Ĵ„>Îm ß$Ġ“%Ş2ٌŭHÀÔO š>V˙ÊÌe\ÌĵÌ£“…=’‹a—-G×ÂĴST4bĤg/—"˜RöóJœ™ıĞFrͅ·@AŜ-exc°ĊñÍÜt²=ˆ³Ğ ,‰lƒ{Ş›Ió`Î4CV‰!â+oVB ûÍ>S>DĜ28BáÔ Zü~ÍU‡}#|!“í…Ï.îZi˘@œ€­fbëċàä•ğ )éŸsż•”˙Ĵ‚ŸŸċ%ßr?§§$ܽrò€ż—­‰Ú4UPÁdċ‹Ï Û' gÈ·G‡WiòbY$ žl)Œ™îiĊ|ċ͕âˆĜ*P†LçÔ$ ıÁĞ-r EB͌,Ú'OϽq¤H7âe[xá=0ו¤vwfäœï~pç(rí홉…IùZty;FLê–s™YlÌu™ŭU@Ħñ?§ŭŒlĠ’I~“ĵaĠ@…ĵŜŻzĝVV˚ϛñğhuŒĈSHçĦú+›r’mŻ:úyÑ/½_#?v—üŭ9fH<Áġ‡;_Rùŭ[m ÄdtIzcûı‹ ×H8ùŬˆŜÚN8IJ|y>N,+€;ΐ•Hö´$B‘öKŝ%]Ëıïáwı-[†An1WnÌ}ƒ“!p›“ĞÇuzĝ÷kÚfÏñËÇΘe Êµ@ ñ|G’‘ŬGDy@¨ğ‘[u$(vÉÓóéžĵ\.ÉĴâJˆünp—†R¸­.ŭd0Ğo:‹""öa?VxĊMÇ߂ÔyĤ) ìŽ"ċWħŬ~ċu)_pĈh{IzÜı ß&jù#„:˜‡OPXDTLLTDXRâäÁ Ş™Ĵ :—^Ò>JşK__ÙnĞÌpRĤÁCfċíq8(¸])ƒuÄE{DÔùf5“ñó’7Ċ0ĵĉğċĊĴñd.^Ïôùéd;Š8QĞ…¸7‚&9€GôH7äaQÇ|ĵħ™ò2Â/Ïnĥm ĝĉNséċrn hdqnp—ρ†e¸Ċ’û.~{yWÇŝ—ìPw'=Ĥ³EĴ†à™OµióGÖ %j]ï6ÔòO°"k+ˀİ+’|ği‘\óñ*(ˁIċĞMl-8I–ñ£Ĵü­b$³ĝïÛKi lFŜú+ ".Ñ pˍZ%…™żëb3;R/—„m°]P™rv½ı BTµ^4&íGÇ(˜"`ÀÛÓT–ó.1ĉêéûvl^çíéĉşrĞ›§÷şÍ;ö>}5&ñ]NYS14ĉ,ŒvüH‹9şŜZUŒpɘŻ?›R9Âĉ"ÄöÁ’˧v0Ù]ï‚ÍùİUQHjˆvAüߎ`m 9-½ŭï¸aFۚύ·$Iƒ ³vżÍë+¸q  Y…çğ’´-tä“ĵ2ĞıXÂĞm]„=‘‡8@µşmJ5>½ë˙ħêyvĤŒB#Ù:[¨ T³6ıKxÄ40™7À#XyÍ­bi7$‡ĜȀ9ÖxċôĈI¸ŭ·dŬ?ìmŝ'& odż!ĝòƒÔܟ­½c3Œ?dhBËüóCHd˙£3c½­?sS\Ŝ`o$/'fî}ĝ~V <&Ó/o0wĴŒMH2İNjo­P ê†‡LfC CÄ=`N9– ‡Ènûš“} `Œ¨Ċşn‚5ŻÙ/NuŭÈê='O^ġkĜÁ# ä{Ş” ìdmÀç.DKUñ×FtY)û¨ôS€óƒ{W‹˙.ÄdÓjûĞυrKàµêìX,ĝ–ı=‡G’ĥ³’h )ôuÌ'°İı (ŸÍ\_ġtÁŒoċŭÑ(²nèUĵû0W‘x$£]àqĊ÷ˆÁ&òkiS*ž¸Żü´NħMP jÉ£xHĥ~Ŝ‰€O€>xÈx×ħ£˙hİnÜñg ;¨PÎ#–İÊÎlFħÓ%ÔżÏïC֍´ÙQÂ%Ĉ\—˜%f€ğjqCĵZgÏñÁœ£Ìqĝ½5$yWa鋛π͍fóírTíÚ ˘ċĜïËڈòÑĴ7֚ÊğòÉŜµœ›b^4ÏŞğċ,‚H¸Ŝü9‡ġ¤´h~“àP6×òîŒ'ዐ4ÛpîeYß<¤zżŠ ö]Ĥ-ŭgÌ-,£fdċèµqÇŝ°“ç#ߌş};êĉġˆó'öïĜèċhe¤&# ıpĜKk/ó Ž|Uü RĊó}e/Ïm0ƒ,ż–ç™w¨™Ş|´Ó„Ÿ`E§ġ`s?oşJ "ηêXóEáFîċQmĝÀ“•ĵTëĜ^Ĵĉ¨2˘}ù7V( xĉà55tù[fÈRA”ÙYœŽUy“ ßb?Ç2˜”ñ›mġBÜZfVÂĵÄe\§ĜµĴCöÍW—HgC7ÏÊĞ–A$<€Oà‘èÎ=˜Îßŝ#u‹fŻ˟>\˙ĝNîo­’œcÔĵ¨mëLƒ/ıœ/îİ2AĞŭ—ô&µú òŒ ĊÚèÍEP êĝ‚0Oé v–ù•ϵ>#F=ß< tˆg!…t’h‡A_żv6Kܰıg0+.’=‡ÌA/ WŞ3‡ĥñ¨kÛä²e)+JXĉ§xêÓ̈–ĉ;%,|rx*RŽĉWƒ}‰Väè\Ò"Dëĉóƒ;]pK!{èžYtK6dkŭ$To!Ú$>›k°­é"îĜS@hš¸ ñ ùÜp¸ !ĝˆá!­Gh$˙nü_2Πŝ%í‰ñĠP"ĉôlVì3äK5ġĉ(˘Ï< Ò÷ÍİĤŸ7°fĉ*Ħc;(Á‹Ĥ½\g.|‚)ïĈ‰ÂĦ¨l 5€}u· Ne|† Uoƒd6ƒŽħSEt+Ĥĥ {fË ĝbñ×Ò2ݧ2Éĝ[~x¨Ù#>ms—…$£-Éúo8ı×MbŝÚÇ'ÇĦWˆŝƒİİxD;r€•Ë š•ù™TÈÔÄLġ’)…Ï)î7>ôz³<"ìU 7Ŭ{„3JoûÓĈl0÷Ş·:#Ö^ÎlÁ™ŭÏN­[$ Ê'^Y•;Î>|_Ú64Cd½Ĥ†aéW’÷)-%)ááŭĜ˜˜Ĝû’RÒ>ċ•Àšpx úákf¨­ôŭ³;VËò‚6JvÑşSÏJû™ĝLkĉċµà‹àR÷š;ˆ-4>ŬGX£îÁİ6ÊY‘ßüz˙çÄG1½lpòz¤Î̃ŝ†×7‡5ݍ˜ÄOM=G,^ÁÑW„oß/f’.Ü 9oôÉ[ђB—çÚ|{82ñĞĵĊ“e$žâŻd¤_ħ|e³{-Sş"İp˜Ë–Ômèµ̘ß@‰Á:p?z—@M™u%Šĵï(Ápŭs;ċìúŸ: äÀ>F+İâ>Ìĥ6˜?KŻÎA÷‰`ê#NĝjĠ™ ôšŻ¤Ĝ1%7l‡Pk6ôH­5F“ÑH†­|vâ‡É)SV²u2–cŸx½ĉ_60ž-:CÀf¨BYbÄTĥWȞ£µF‚IĝMşUӘ\à“{`şÉŬ9Ò³qĴe\’ŽcWş ö%XŞimç•êRˆ˙XMQş16óPŸİL>0A4"ş9ĊÛE)Ë(0ĉİGÊٓé~RˆüV8ü~ 0§óœÎ”=`l· ‡ž”unn„\8EÚr[äÇfxfOw&ž °ġdğĝµ,Vm >'1í돚–ÎŝáñəÙıyĝ™›™îïlİùñ5-ñÎùà-Ğ,´¤ù!IÂ1û€s‰…0›lŝıÍüô‚†oB úˆáĥâàĞĜ“Òəݏá$è˘Çżá°U‘òKŸd—)v`/§ˆn/ĉtGh && 1Žŝ™ħJ”Ġ?BÔè§n-É +ħG\*ÙqÒB×µŽÈĈž‘¨C8WÚà@í7ÖdEż‰' ĠŽz’Ż€ĴÙe¤BEıbÈF XÑÏh/ĉ½x?YÊÔ5ÊZM;áú+Ĝ2"ÑäQ kè|g· íÀܔĈbI_kèfTWNȸô6ŽċbvpW ’Ò#îvÉî4 Ê-҅,aԂBg>`‡I‰söâ?ÛU úŠEmĈ³&r„—–+hÖ÷X "ë`ÀWêVNĴGCıD‘ġŸ~ó Íĵ”q[à÷2PvBÜjY÷eyŽŒ9;6qYŠ{g3'ʒ³cP´x6;ùhŞt²Ż SDt/€zñ&dĞÜĦoßvۖ—ĴO¨C[Ĵ–FĴOC7lüÛ7yx#[yĉoùW&v­UÂÄ'üIAÇĵŽ´ĤÜ ós2S• pÁ(™Î',)ŻŞm`şĜÒzÙòċËĴ-›hĞÊK óÑ c•OBĠÌÉ/ìVJa ‚9SOÂ}L$ ê|8 üԌßÎĴ„wOy·+ßĈĦvÚZ‘^ŭ •‡ê“ymo·ÏÀ’l7A{yAQ Ğ[O*Ħ‹MÎ>³ ĉĜ,”a­38Í;ı.OŒ:56r”Wö>ĞÖ qކz° (t¸/0JÍig˜Eú|wˆÊu.#]?Qİ PÖ`BÙ.`ĠSŻİP>¸T8gÂA kÜF´¸Ï@µŭ§¸ŭ\"D·rçw*Ĝ,DAşWwÓşÁ ë'ŜşK8ˆ½Ĝ’³MzyÑÎ{zV ÷Q€FVĥRqè;ml8ġı½`+÷ëï&ìĵJG˜xy3ñIˆÏÌ$ZÀAwhy† óƒ§%xv·ħžë‘ĵĞ9/u(żçKÜ!'k:Ĥ¨&ßĜ‘|šÙœ<†?ĉCÁÈü{ġ ğjrĦŬĊݐ6Î}5ïŞWWİí2’ŭÙm¨Ĝ˘b_(à菞Ç6ñ~û%gğpy âSĊ`l€r°O$tµ˜]_Ĵ3BÉâ&ëÎĤ”CŒ‹5֜ûôÂNWSE!ŝÊ#"ݳĜaµÑó7b=Ky›‘ùñÓ§™oSž=Šıqŝh˙j‡Ċ:ò"„2ˆ&¤hêşóÂÓÜf Î/ô—§œ]g"NF… Ö]ùĠ ĦÂH¨ƒŻ˘x übÌEÖܝ>‚÷$"D)0{–YĴ‚((a$ĜR…ĥ°˘ôá€;ÈLGvÄB’ b•ĈiŜAĴ‡Ž ˆEL÷íA*Ìz~O£ë~ÁKJ%âoäēDQ•l>9uu;PŻaOx-`ìÂ÷FaĜCŞÏÜ{~ûñ!ğ‰O|óIä½ĝiğŠV5J:3#áú›èMC5q]A˜ûE ˨×Ĥ5œħ]BmŸI7ĉ Eƒ$ŽëO=Ŝ´‡HU£´íìs2x‰‘(>en %1ŭÉ1pġbi£ċj²ı ?ȀîÒgPM;Ö ;Ĉ[I{&:ÜA<~€bœƒżT áV|Èlñ£¨?füâ;50˙XYšÊì;Ż@Zñva8qŞÀĠĞ;˘„hiÄ{bĦdeۉ÷%û–WŭĝHĉ~İmN€@Vçğ˘(œË#ˆ 3Q›rrµž0hŝT–\HŝNhÙÓ½uı)ѧ}í͵€EŭS ‚,0™?äżìx”Dċ~Œ–ı½oàİè”Üş^¨8{ò…€ċ0 & ë­>™ˆĝ EÀ;*ş4ôì"›6ĞA$hĉŜ˙j ìW'öáħvPh‹íÁÏh!JGê€{¨ ”¸Ñ…·+H çû˜İKŭÇó§Äxƒ~3ĞSüZ˜ıC†F¸Ġ^â9Ĉ” vï˜ĜCrl߁Ĵèŝ.ë34ïAž+ĞV>êJ{‰0,†ìÏL˘m`>g4…ŸÏÚJ7V‰xëŭé”ħŜx"Áŭ3ğAúÜ&´ s֘F-+$=ŬÏüÛ şHŻ…f04ƒ6Jt½CïÌšÎŜA' '’ Ĝ1äŒÇ2aï›á€#9üïf7ä´kàİô§ÔÙHıÀyO4ƒnóA;8‚Ú›AÔD3Hû5V´Yñ ˙lE4ƒÊĵÉ*·''˘ÔH+ŜC3H‘´ šAIËŻ]ŠġßV솳šA2ŝXs´´ƒRzĦ| šA˘ĥ§ˆfPSò+˘dü¤üo3èÚvhsIé;m;ŭ ³ĴB˙iµT}ÏJ{ñ8öż›Aħ_¤e}Żjù§4Ò^–ùàô6'}).hĞĜlżö·Tŝ$˜˜‹[Hn"šA§lEĦt ÚÁ½)TpˆnĈĈ3üm Í Ù€{·+ĝù²$h-#)B3èŭ ’ZÔÄäm²wÑ ²úŒƒż’"ìµ6Ñ :@5ĝW3(W;ÚÁ|·‰f{ΊcO翚AC5ÚÒùÓŜ˙nlqi˘` ˜9)ŝb~1|[ÑşÒ ß¨UFuçôQħġï&8µÑè‹Ï–-0knÂ[·¸ó50~>i¸. ßùPĜĝ~Ü4„[Á: âMEDzg{Ğ>=:żgġRY!"o"RŠz&KĴmœ]Vtqv°µ^b˘§Ħ(%ïD$HHVgéê=ç}Şê…`q_Ċ›ˆknÑ[ï÷ĝsòoĝjœÌîäçA°^spİÓÍĉBÙÙĊtÚ˘‹µœ‰wëĊPŭsxwT\ánÖ}E Ñğ=ÌH³')\èg¤5™Ù" >8}Cš'd`ꂰĜġùĥuèÒbNĵìN@µutĥ +³U‹éœò2ïÛ6ĜlÎ˙œs ˜˙,fÔÖa,Y0îė>ëÉŭn֕?{t‰l]“œĊD*‚÷S? h,šŽGb 2ùy6 ô0|@ JmÁ쌘E7ÍÜĤUVžÊQ¤Ü:Ŝ`ŽƒêèSx 7‰VĴµRWh#O˘\„KşF\#kÚ_*eÖ[İ ŬüËXâëŒ/$nRÜGjôE^ÁmÊı§Ûĥ߉NG5ÁhN=|ĤĊYUψğıĥ‰ c w€=KúâÈì]Ô:•ÙQ Rss“ –$m˙˘ÊìùΚz·VĠ9öƒ9WŞKĉħ:_:Ïn~˜0n ×€ˆêxsÈŠ.áo!ÜSwÀ‰°ŭòÈ/ö9|7½ĵ“~1&zŠ2ŸŬıpx÷FOGëE†:*J r£ ¤˘Ħc¸ÈÚÑsîî<Ë,jèL0 Ċ:ËÓïöY  P0;ˆ+Ġt›pE@DXzÓ˜¨×A*"ïû ™=_zŜЇĴZ8ÇüqLYûnŠġ} Şċ7ÈeH– “sxTşĜÏLµFu̎\”ĉÚÚĆq˜{ŒĈțÚnЉF2êW!-3átƒ\N´  eûàË.Ŭ=Îx}%˘_3âNżr²ïÌW _ÍÚ •½fRŭĤ²5Àòî.vYxĉîĵ.%k ĊĈ͘êkÀÓQUN0o].)jBÙ_­2}ğù ‚ĉ˙ ÇInĊìuĉ.Q‹raÛİC&ìá”ĦÑT¸g¤[J½Ù‘ÈuŽ×û‰eâ½Fò5rŭ–IöĦÄÀF.Gt†›Àk— óž`ˆÉ]¨{çÈVteëôaŞq– +1ßş]ò˙hŽzVboMQ§ĵĜƒĴ=9­.Oe ^Ó@Ì–áĈĊżöóÂÔk7^şíƒ>ĵQÜËn6q&żì×&s/‡Hh_ÚHcI:žÍÀío;ʃĠĠpŭċŒ&{ƒááÖ^Â÷B˜cmÖŒx”ñ½ ĝ˙ó8<Ĝß×ÛÛ×?8ü݁3#]ġß3E\góÇ Ö—½·˙6Ġ”qy½!ĝiċżmg`ÙgÁ]kr Ĵqùá‹ıÉÚûżLršn.Ĵéġĵï-×íġÔÂçµü\nx}`†h\d¤.G5ˆ˙bU²G1^à„šĊ*=Qóĝ×%èÚÖùAÙ,Ϙzxşu%şu¤ÓŬ5ÙèUÎ{…,2nE§LñÏá1Hĉ[öWÈtÖÈġ–Š/›xÏċÇıŽD²½K[¤Àŭœ0ed8ġaêvì#\?‹İ—ĉtìħ­’Cħäù(E­ŝL¤°-L'é?há,ġħ§ÈŭS½ħ„xJ¤ŞAÒ~î éJzÈ\˕6³’ïô*ž7 ~ähĊ2ß ™µġĜpŬ£éËÚÜIG…Z‚×2’ÏWò"ħœ|êÁħŜ­$óŻĝ;]Êĥßs·¤yCúfï(Òü`%f‡êÂlô–:j2?ġ)͈nìÇ.q²ċ½Ĵòˆ"·Bĥ…`br;뗃ú`êܓڃÍVDŻQ˘Ü.fC7´?˙ĥż™™$¤çyìIQĤ~§\ßżÚR]ĈÁDÚSÍÈÚuŬöƒá#c>M~ġĉíğwoßĵJ~ú0&òbĝÁíë\­ÔŝdGIÜâê–Ğ÷_O)ŝ5œ‰˘'Ç<ġ„Hd13˙ÛùŭÍèĤ@Ĥ(­‰˜ĊzR÷€ÉT˙àÚĈı ÛĤbÌğċµ#•ĜÀ=K²ĝXw„&ÔŜĈĉSĴQġ[0ùÖEí`êGSĵ3ÛÂ+}kî÷6Šî;üĞ9ikïĜAŞI>'VDŝŝLRĉ5ŝ@PАq„ä>Œß²éi3zÇ 8L4Ùoá ÏŞé|+gÒ¸Ö2’Bñ#¤'sö’ U |ĵˆÓ3ı<S·a…Ó~TO2L-Ĝˆ çjj2{’°†5‚5ÌĜ’Ĉ]ŭÇĤ>ú ıó5̏°†™"È>Z­a¤ğÀĤĵ „@°†-ëm"Ĵa‡ÀV˘ Ló?Ö°mˆo? B7@² x§ı5Ŝŝħ†ġN”¸ñŸÖ°_ĴT Ĉ°†ÁŞ,aĴa$éàĴaeœÑç„5,˘z÷­a` #-­aC˙²†9} iÂĉżäkĜ½OµïĴaċŸoŬĴaÀˆâŝ?[#.Ö°ĠۏŜHüTŝÇ6?PûéŜ?Ö°%ŝ­aMŻB è5lèßÖ°“` ëĝdž5Dְ磜²ĴaÁÒ$°†Mƒ5L‹°†!İĴ_˙i ğ!!xvŞ÷5ì­÷iµè×?°@ĦŭÈĥYÄ4KÀvˆ°†5ġ.kX r.·ġ.)_Ö°ë` {?fFXüŝħ†ŬAžÂġkX5wËÒĴa„5Œïä,bÄÚ+ƒ€üŸ4Ù°ƒA1O=ɲSN4Ħbw6-ßGÍ5V넇Ë\2eĴC°ëPŒrĜ[ŝċŭµZ2y³~èINАAġÄFd˙\‰Àü…¤Ü[ü‘˜Ü+ü•˘`$gë͜‰e6­%½Ê=PƒĴĥ@ħ>F²9|f&ïC{òöó³7ŻÏÇíp9>ÂêΑ\u/6È ˘w!T{ÇKċ6Üŭĵ…ÁéÏäĦ g×>ƒ?žÑ[ôäĝSi`;äÇ=b§  DÁ{ {~ ?żgżóüÉ{w£oߎ{ïá“çoŜg˙ 耉y²Uñ)áb ; †€2!mşĉĝ“"ȚÎu|'„âq)ğŸhyې•ġşS  ğ˘ˆÌ†x÷#W²Ñı:lè‰#ˆ>úòxfÁùÖŝä \~ódF_¸O`ë…êQ޽3"­mbĈÊœŸùµzÌHAEĝ‹“{„ż•“|?-™Ûlœ¨6JáœDŭfód´jû—óż…bX Żc!ÔĜAIžƒ£Z§šñhuDËîV´loʰŞë$ġ[-o0ÛF´Ÿù0ÛË2B-6ɍƒLşˆrcZӞ³ŸŻî-œí ˜ Ĵ•R­%|›°Ó(ÜS$ŞZ•LÀë<“Ĉí9÷ŽÇu*[dqO½–Ü÷ÉĠp&‰Ĥ9ġ·,á‰ïóšTMïB|şÜÑcżÜ‘M}C;@‚+.v‹Ñ¸,:ÜÊħÌ%ˆÍĴz#MĉüàÂ3X’ŬŸœM²DOµ•ûH‹à ş2LQÜ÷1›µK‘\˙rÍ~ (˙²óß§ħŝ÷‡C7"8…0yŝ¸·H‘$QġRkĵsoٛë{ŬLä˙,öD•Wĝì:v5îEf~imËïŜÁÑñÉ)ÇOOMŽöŝnİ-ÍÏ|wġĜ.ŸĈÊ˘Vˆò&n{Żż)ë%Ó5İÖ‹’€ıŜÂTšl­”Ċ‡Ŝ÷cÓßÏ/Gü@l=r½$˘¸+k–ñmŸ"˘V ӌE$İŭ•xû)EÔ2ivò>Ĵ=Ÿ- ž—ĦmĴĈĜ K2ħr7âg…ndÜżËĊҎĦMˆûŻħ¨{׀/²kşÊ„÷>dz¤ß f.7)Ğ'żËiĠ÷,ɞrċy7çɝ6 ĉXÖ* xb£§ħM|%­R+Y£èÀ§}ĞÛÏħלA)]ü¸Ü&lÙ"dAÛ ÷PŸ=OŻÎ%EOŞ9qöñĠю@°ë3ċĉ+TU'ĉʌÍÀ›Ë74ĞèuUÊwW+j·tړ‹_„r‘Š,1ĤÚ4QkĈ½–’Kî Ş~ÀŠÉ@ËA‡~tŞÓ´$§GñïXˆ‘ç „!˜ B%ŜùKC /`Q9Ä|ïJÙñë‰ÓÂöèsW>ŞĊ P‡\+Èm~5€jS€ò›h|²Ŭ€Zĝ¸rgüŝrc³ıáUĥÙ~ċMy/X#çò’#û9›ĞÓ^óP WHLZ^Y]SGWOOWGS]Y^ZLˆ—NäEA.*nîìw829Żažs½ċol·QĉE(ĉ›o|ùÍÀ'*.Bx ĥ?idp~§+š˘˜>„ĵÚ,‡HĴ} âT>×ç£ì’}཈ìÁ~ìĦıgEÂÊóĊÂ%yŠw €IZ0 䒏Yè§è½ó–ü:§ŽÒu K##öû *xK““zEó˜ĠNl"†­²*EĈżˆÇR=ÖnĜÑ˘­XŬm,[Ù§ÒÜ oHï ìkH4ÓUĴJÈ;FùÜUß#´˘Z}'µÉhRn5ŭüĴşîĤ½€œŸR܀Ùë/„ó4|AcÇU9ûyĞpcĞ%Ú ¸‚ܝJ–Ó/ȇC¤„xĠ´/X,Q4²\ÜZÊ]ü ş¨U:UcĞIg9İâêßĦ½½žè‰Gt÷X§7âĠ> Ç4ü£!y{×Ìe‘ “CGDώLŬ§m¨Ä =(òú Z´üİ_XñV!šË›™ı o>šmü < IB‰ŭx÷cPsÊOìÄĤ Ï,DÄVœËĈgkí\4·†×ıŒÖYh†ÄŻÔ&ցR†ÏòúÈY˜èm.Ëy›}éĝ›|Ü]ìm—Y/]j½ÌÖŜĊŬgӎÇ/E'ĵÍ)k){f !ïYD ĞĦħ Ô^W½Ù֌s^܀ŠŜñ¨fÎ=·B \vĤp ëL\/ G”ÇŬx˘‡Éžƒñĥ4>ïŒı™7.4Ħ­ĊĜŻSò+ş˙‚<Ċ£ŻÜ@“ż15rVTàèä‘Ë3]ÛɆñ4èÑN·{!ŜcğQǖ‰]¨goŻzh᛺x*ç,iġX•PÙĥ£gğ”µs—I,(·‚!ÇB .ùĊ´Rg÷<ˆĞ Mb5Ìġ£šw?ÇYu<ŭÒÀ omPœ:PoHîàX™ħBùZ>˘÷ÇU8ÁÜ?y÷à…j+ĝŭ œ6b¨Ŭ˙>V7!oşôµ‰ùj{7zĵÛÂĤ­ŬËıâ‘Ä.ȁ‹+İWĝ}Aġ/œ;‚*iĝs9È8Á!À· žâ‘óżĥR5ž²šŭİJwf†ÎHpÔâžT™“]ì,šHàOĵí” ˘ïÏ §´ d ŝì] ˆ´X£:{K"b·k òd‹è<݁Só+ĜÒÄOßç\jíÄAÚsŻµPú“ì”ѳ¤wlJvY„~ĉ˙j"!Ê&t`&°ÁÀ ž‡¸PSYvJ,dÈmôdŝäI•,֏Ïm‡HÈXmê9}@ÂŜùU3ˆŞ]÷a€Ĉ–' İıí*†Hz?îg”Ÿ4˘°+LWPl×:Ġó =DċTŝ3P„ĉ‘Ċî:)Cġ,Àk¸%Î ÍÜQ˘ú7³žjP·ŝš‡ey›/AĠ–{ާİŜá|Qĵż’R€+ĊĜEâı—w·š g0ĥƒ8ö*ş›@…„më!X½Ï#7áfŭµ_Ûp$ BÁ~üµBñ=ĵ?+¸ƒ9NŞ÷я-|Ħ,3+ÎɑÛT¤ˆô¸WĝËÜbÁm™ €ŝP½¸N³<%Mânì$~ÎVşĦVÒfú5uç&p Sñ`„Đ7Î6˙ĠħH5ûâ?S˘!ŝ–s‰jۚЭcmޤ Ix™Ŭ܇Ğt^.Ïuĝ‘Œ2"‚UâFÒ}Âè9*ʳ­˙îK nÂK· ry~dŒ',£ˆlùʘzğFlф §ĴGĥĈ{_nQ&ñ˜‡ qŞĝşäAŬ.~ePñŭÀ2ÜPħ ĵ•YçĉhKAòµ>ÖÚÒDكDĴħ½›ßöÀŭ!‡=zäpÈŝÀí~>nö–Ĉ€‘ċ‡~ ”H¤µ­}\K.heÂóż>óV  Md–Ŝ/i—‹nPI”÷¸ËÀáÏaĉ<$ċ-/{ñáw[ñµ)XS„YpÍÛ)Ĉ×-"”e Œž\‚[KñĤ`1şïwĵvèÑĈ]’[ ÁKs(Â3ŒH~™“~À)ħ‚#@˙fĦL‘ÛĈĥ‚„­Í–z‰óV\£dĈ9Â~-¸¨—ı‡ùp E.üñyCnrĥQ_OÛHÖ6HÛÎĊ#l7ñĤÀ> ÖÓ ġ×?³ BÁ–ĉĴ½IEÈÏ,äe˙†Ñ2N`{&X4á}šÛ*ĥ;LÍêYR_>m/RҭӗϷŽs¸ÖJ=YÔĜħÖ\x·³Ño\fğ`g™ÇK$ë'BoLCқyNÎëÛrö ĥ}„òˆ%éC·œ3ë"Áу|é~xŞ$Ï+ëÔÒ쨑35ìĴWSİî_ ÄêÈ NŸeG÷ Ú-€ê—ÏìA|€kĉcgtÇHÏhşÌ_äWHĊ–!^uĴx%úžùğŠ´€&öÛĊ„?Ğóˆ8}]1ŜxPŒĥ*“9zß• İ‚fµ9Ĵ3!bĴF|ħ†xHxÄ6²uħD*T{K<<‹ÇJîn1icèÓ²!6Óŝ5.ÔËDbĦì³]èZâ§²ÖŝIx¸pĞŸêílonĴĞ­İİ­klnï웆GüӅÉŝÖ²O‰×­·…8!üûÒ&^Ħq_Ûg0ÖPÙÓPGP nı TšÙšĝ-ÚD&4ĥŽÁjŒ…b€¤wB>DܪԂĦ̐ Ë@ó¨Ĵ*D5ż?ÊÌ\E;ĜˆŻ£‹é$\i‹ß²›hŠwçğö•âYu^Ȳ",Q˙â<4İ6ôŒì@ ˙ĥ[׀²gĤ\ œ·h+ş;,ù’8'ÀöN`iµŠZ}§ĦlEŻħ\M‡ĤVg™ĜŠÙd8ħí‡,·AĝıÈr–ëŝ@şÄ°Ô…Ú×Ç6Á½[ŭù“<ÍéHZ#9Í „Oġ`ŽÔµ ´|8€ħé\ĵ:jX Ĵ#rí% ş‰DpĥÒŜÌ8 ċXŠ^*’3ö>X?ĉZÑßd"ôşÁîmËhW8ppĊq—‰Ċ‰í(M÷Ü\‹ÀzĴÈsiB˘çÇÇΉñí˙Í|aŠXĵaöœ’ĦĴúÂîı¨‚šÇ2?ƒ4Èċ%PÖvH£ZÇ+Ĝ“ïaÇ*³é4Sĥ(Ħĵá€çÍ=k/rëlŽ™ó|[úOMpËX\K'K8!Ċü‹ÀP+XœûŬdp²˙pn9²ìdĈñÍë·Xp´÷¸ĝlMœ“Ù(8³Sòà6ܑż=˙÷ 1Ĥ{’ÂqÔcŒ;çL^]?ë\ä÷7”f§ \ekŞ‚ŭŜ# $òżŻ 0^‰cjğ*0ôhJviC?Š’İŝşĵ3wpĈh:rRžĈÁżÏ Ïĥ¨ƒE™ÁFd§¸šYœ{o‡˘ÍÀÀoPf[—Ç:N.C–Ÿû€÷ž4 ıŭ>·X$ĵ‹|ñCWÄéäsœŞÊ‰/˘[Ĉ´}ât]ĈİŜšE)^aEÀ6 ˜;AÚ42Š„LÁ ŭ[Û´^´9$İd[vòŝ@Âjì 9z)‡”M>ˆ!)X´\ќ§Á`3àÂánċ8Ÿ uKI µ‡ƒúZ;°ù\fœħ…Hx.İoIÀ,ĝ§µ&ĵBÒ%‰Ÿá§Zú~h†”Î8™Ž4Ğ­>‚e`:LŻ$0“".UY=Ömİ] g?Ħ…jÖïqpħÙFù6ŝX[ŻäÇz"c•_ïüI;…)L훒~Ùî/Ĵm Ùúl蠒úİÏÒGnˆ]Îĵ ÛħĵʓUoWTÜZ.š/‚³ µ1v';P”żÉĊyCôĦxŭ½Tú>#HŸ,oó="˜Q›şĊLQħá¤=³ËX½„`] ½+é;F%ċV´ N #œhŽ?Ú×ŝĤĥ˘¸àß³nŬLOży+ë÷? Š+jß´÷òçDÄßNĥTä&E:ê+s^×%8á‘1tpHvߚĵ ûˎÌé—ÜÔfĤwüßIU1.Âé|ğHOû1~[ÙĤ´ĦsïĴĠ Ïb­ĥe÷Ĝj•Rq%PW`˜KÀGÂġjÍ#ĤN3 ÙOŻŭdè'ıD*ä™Ĵ…IÈ N,yÛÀ÷ß^Gʇ@„œ—˘5yYäçí ħ÷”ŭDon·Ä÷Ú#5 ÜÎêì€#…KÈMéwÌ…Ċ§ÍĠ35PEĴfúL½Ñħ…iÍêĊŬBÍS\=Âġ£]Áï³,ŜˆbɁ£ügbŒƒlŝzŒ İP(=& ‡w“ÊñzÄŻŻóG\ŠÑQJ`Q”•B3ì2_R@цeßicÄ&mïûيÄŜ[%”4ÍmSî(>]ĥߜ˘à_= O€ó~ˆ’ŬĵÎ|óŜ1_(ï+ŻÜ|2żñ P]Ĥş+3ZN‡²7Ü7î:|öJΊçM}CŸĈx“S3ššä}êëhz^ñ8çÊÙğ6şC-€Àʨ/÷ú!>³²{ x3_óOn†Ŝ™œžïħ{óĝBgŜ>;%Dï<<ĤН(ĉûËĤñÑ\à„­ˆmĞö²IV?÷ái6ˆñé>¨ĵjSJ$üË +•+S)êZì‚ĝ×áġ~ˆO=^î@Ŝ=,L&J´/œ¨ÇoA8cd'4+zcÁş_ĦùqGV+ĉAL{÷bµ&Ä1luŜLù0ĞĦ[3³Z³ċ“…·ùô&ŒîWëèdmm`Íà˙cۚé?É<íĦÒŝœœĊӌ^röA0O4’ÍżMë”?#qq'Q_ô2ñ=ÌFĦœàšş  fQ³Ċ ĴWŽ–&ïGít›&}Iö‘ħlzÀDŸ#ó!vâ9#}nšC$ïÂnVċŠĴً16K_Qˆ½„ô[ôOòĥvĴ̃€„ġG3^|Í&ŞöÉĴ1LEŝûrñ×{ŝrÌċ‹˘Ş0ÇÇ•k€…Ğ~%|áësĞ$½­éí Ż8í£ úŽ 4pÇbÓoóO€á—DU·Ù›YĠC<t‚26tƒë Ŭ˙ĥ˙Ġdz4Ĝ:zúúz:l Ö˙ëTuW¸n×dç˘\>ĠS•ğÙFJ+ñ‰ü·Óà ĞI I‰ĤÏé Ç·§oĠ#1V{ úĝŒoTNTÙAV%Z,ßÁ”óż÷U\ŝ½ĵJX#6pR›şİ† Ċ3˘Ħħ ñ(Ú·‘mŝDßBkŝ%V¸‚;3Aöj–]Q Ê]"W›£jŝ\z!¸€=d:öM³ħDÒ>É_ ßÉ&]ğÑ÷&–ŽŻX‰ĉ(wS.!|‰`´t1÷àáÌŜÔ$ħ³‹äŒ|gù6Ÿ‰zxbHêê…ÂápİĤ[1^ĥÊFx…\>ʆÑŭċG­MKɤûw“Ñ×à#[MRI9ô?EAò$ĦK‹CŝƒŜ‘_Çï²pÍ1dï|³…ÚCü†˘Ó; ~3“ìħT—WèmĥĈuig ‚>0—‹gÓöö/9 ‰#LFd'ŜĈ`솁Yâ2ÄöògĴí¸!²òLĈàĠ)İciáY ˆC½’›@ÒugàÙ,ÂïB3lÎfÚ˘ĉ‘Ċ¤i+$vžĊ²O§´hÛꁣKñ{,œ/—cíİ“~-T¤ûfvóš·<Í-]~fyêÊèżĈ0Á›Ô Ë·ÏíXÀíwyħ) tऔĵ‡QàÂÈĞ?÷ĝ “€fìÀ9œqŸ—ï:ğ ’Ú°­!*“Šfúî%„†sŒYÄÏiĴŬ“ĝÇĞ‘/Iဎ‚Âö:t§]€/tä†[C/aCê6öWôJŞĵ ˜­eí)n4yïk7Íò+|•ÖíaÉùÌ ûQtSżĤuê“ĴĜ“˜€ PÔNO)kŭ*üI³rµu?ŸE^Ġ*MSZY^Wĥx.ûYnu˙ôN¨żZĤ]!(иJËşgl5†)6&AÂŭ80<8 O?Ô!žT$oÁ͜_LN‘ú™BÛ'{vùZ4Fıż’”˃7<ŭÙÈ$œ :y£‘šùÀ‘fíӟx Ċ {X;ŜÒäXȇbĦ¸Z·ğÏÀ™˙œı~ 8q*ŝÓ :6‡ĠÊĊûSècĤC÷Ô.Vè>Ş…SĞè$ĠĞC”@³ŜÖÔÊ\jö!më_c3ÏċÇanêì‡e…nˆó}1?ÍÌşâi0Ĵ.OÁÚbtIżŽà~ħ'³ĥÎ`==ċċœÎĵÉşoBoí/ 0…{–¸N›L[ĥ5ġÙ:ٔsĜ×ìázĦIM\!œċLĵŻÉO=ìcc¨ĤÍ/²œ"SK™…³Ĵ[GÎvËôµ˜Š„=œ˘ fh|$5żĉ=X'1!·İ )ÔCìᆇsš&Qٗgİ[—ÑÈÚëŸÁ4²á@„hŜ薉Ŝœq’“÷ĵĜƒÍng‘íù€üê@ҍiFR–ƒwZ vd“4ĝ3âV†Ĉıİ9@eÄñżžg²Ż‰û·‘|š—2µ4o£/ìi ˘/êIQĞVžèTÊÈ[Ĥş˜ÑSÈ>qıšóG†i•  Ä•ÍŻg>ç;ôuÙ˙1LŞĈ€ôzösöP z<Ħ˙ĥLĝüqÍHÔÛ ’“HOŭÌÙ Vç>›pâB4†ŝĦ$K,TJ9‡nUëèÒĜ(É@.`aô'sŜZ­\k“ž•ĤŬ#öŜë €G(,†¸„Ï(GÉİëk²wĥo ġ´¸ĠUî§Ċ.€]ĝQ×zì)&Œ} öttáÄi¨È.=ñ&YŜš›˙ŬħĵĈ—ÂÔTyg­lĉ/M9¨X }c‚èî-›C{.ŻQ˘Ĵ8|Ŝ“ĥt²Ñö›m0 |y)ȌNb9GÜ|I”·zJSw{€ì“˘jĥ&4!ûé;nê’ıħŝĉêÂÜ+çc£B‚×·î`g/;¸Ö…DĊžż’[XŬÜ?FH&eîğ§Ù ĦkÌT) !ġĜZÚCÏ^ŜŒpf‘èfA—^ °íĉv#2Ŭö0€B§Ê (­ı܃ΕíĠELb^KĊAÊ4ßğ3²ÚÊ0ߖòŻY"ŽżÏÏŬ²$y?Y„bôé aş!v·Ħ> µÓX½+%ŠÏĴJ×âOr­âÓÔ5}³{‘°Ż]NJ9„™‹;DZâ€UĴŜû‘nӕ=&ÖÜV-ïı'ô0ì’!Ù¨ÑĠĦĥ=G)Ŝ—4™òÏ v²Á ôˆĞz9Ëİó2Jĉ„úÁ%W0‰ •"ySF›°ƒŒŽ&…£X°z_£Rì6dŻ”CÓ` E͗"`;IÉż6à€ĴHĊ}xÄS)Ÿ@Xù.˘ş  KWĥ¨ƒv›ùSì0˘½H!#ÑI—| X,u ëĥhü4,‡ÁĦH1H“Ul”ÓŠéÂûN’ìŻr`†Ċ4HçŸÂGj̀ÁĴMpêò}fŻLړÍ1RMƒŻ7ÍâÂîÇÖèÀÀqWjYBöWŬ:ħĠY€{rXr<5·äĊğħ˙%„H$˙Kx÷˘$7ġxÈ:-Efè;o=qĞŠ0FÌġ•îr„g€Îšcş…ĝlÓġ`âŭ9Ù=RYoĉ÷pFµ)kZpı>—6Ä!fÀ¸à^µ'žìğb´ä6VÈĈ’ (`ɂÍÏéqÑm@u/XRB?JÒu 2BòjJ”Î]ìİ94+ë,·Wާ[Š^¤ù  ĥ|%ϑaw•"ÙüÌöû0™˙“,ÉŸ|Ih˜|%0=xOa'ĦÔĜ§ŒUhê`Ä6Mċ!Càôp]:¨>‘IAZ)™<hÔÓY§Ô[M.°†£€ŭjüˆ|ŭ ÎzéU$CĵA³£OÏÔĦĦK°HEâñK¤hÜĥ…MğÜU ?Kâ̵Yħ °ËôU½³aÈɑoa 3dîÌ!ŠK=–oÈ8?ύ¤Zb-$×2Ĵ-ˆbžµÈ;ÇĤ‡ĵĊ[BTtáípfĥħD$x°VŽùCHXSl—Ĝ…ΖÀ)›îG8ŻäGs Ŭ>ĉÉ8ĥöĈvS7Ĵ?[ċà™Ö;Gŭ áVmàžVÜ#<@„4]ÛċogÈ"ĴQTE˜ġXıxŻ˙.84"êà‘£ÇŽ=r0*"4ĝğġŜ.V0%R„mYžehçż+îzQ#óڋÓ½ àÑbèwôNë ԃËÎQ…éöo°ñ'1ötŠù%<|üÑ.]8µ,™EğíÈê;˅˘Ê˜rkD%ᜳï: Ş҂ż Ħ³Ïñ³Ì)AmX™+)°+´˘FrçÏ3 óħzÊĦ.X·G`ŭíÈä.$lĥwŭ2VÀ²j›|Ĉ*î;͚…mrıèaÒ%<€ñÇIKĦ ġÔëëÜ Î@J×ë ÖËÁ9jŭ/à0ÀÛBp\ŬĞ'uöD£5x™”VäY+%c’ùĜŠÏÑÛîj/ĵF*š0×(glmĤK¨ħĝ!ıbĝwğ{ x/Y~_Ğ™ŝ³Ï5Ütš™ĵqɈï2ÖŻ–'qfz=é—r#(µ=Ôև—˜¨\–]C $(:ĝ] âU‹×z#ŜĠxËŠYĈÂtš!eS-öñ„&mó?²‰ĞֈyòŜo‚˜'À& o½˘œûĊ^â—Ëamĵ PĈ֋êkÍÙ>.ìÈ ·e kÎċgs†ìnênw]:BU·X÷…ġŭ“ġQ0Ö×ĝôŝ¤Ĝȁ>nv+M uٚêjR×dëš´só Ü›tŝÓĈ1âĦ!žìŻpáÇuêT„ë;òĈ£~v™c ogžÓ!Äù5g×°ġ€‹­Żĵı‘…·İŜ‹îrŠëa‚Ġ•`Ž˜ÄwáCÉĉˆġĠ Ù?›iš'>bµ›(†iÓ f”--x5|qx!àĠ6RJŒ†Öĵ•^V1)Áïik?„ŠQ9z‰îÙ;!—TëżîĜ÷ġĵ51ëüâĤñ|֟YŭĠġ’çdĜÛ żİĊr‡XjÉ´-HŸÀŝeÛÊIENDB`‚././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/meson.build0000664000175000017500000004271414712446423015270 0ustar00mattst88mattst88# Copyright Âİ 2018 Intel Corporation # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. project( 'pixman', ['c'], version : '0.44.0', license : 'MIT', meson_version : '>= 0.52.0', default_options : ['c_std=gnu99', 'buildtype=debugoptimized'], ) config = configuration_data() cc = meson.get_compiler('c') null_dep = dependency('', required : false) add_project_arguments( cc.get_supported_arguments([ '-Wdeclaration-after-statement', '-fno-strict-aliasing', '-fvisibility=hidden', '-Wundef', # -ftrapping-math is the default for gcc, but -fno-trapping-math is the # default for clang. The FLOAT_IS_ZERO macro is used to guard against # floating-point exceptions, however with -fno-trapping-math, the compiler # can reorder floating-point operations so that they occur before the guard. # Note, this function is ignored in clang < 10.0.0. '-ftrapping-math' ]), language : ['c'] ) # GCC and Clang both ignore -Wno options that they don't recognize, so test for # -W, then add -Wno- if it's ignored foreach opt : ['unused-local-typedefs'] if cc.has_argument('-W' + opt) add_project_arguments(['-Wno-' + opt], language : ['c']) endif endforeach use_loongson_mmi = get_option('loongson-mmi') have_loongson_mmi = false loongson_mmi_flags = ['-mloongson-mmi'] if not use_loongson_mmi.disabled() if host_machine.cpu_family() == 'mips64' and cc.compiles(''' #ifndef __mips_loongson_vector_rev #error "Loongson Multimedia Instructions are only available on Loongson" #endif #if defined(__GNUC__) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4)) #error "Need GCC >= 4.4 for Loongson MMI compilation" #endif #include "pixman/loongson-mmintrin.h" int main () { union { __m64 v; char c[8]; } a = { .c = {1, 2, 3, 4, 5, 6, 7, 8} }; int b = 4; __m64 c = _mm_srli_pi16 (a.v, b); return 0; }''', args : loongson_mmi_flags, include_directories : include_directories('.'), name : 'Loongson MMI Intrinsic Support') have_loongson_mmi = true endif endif if have_loongson_mmi config.set10('USE_LOONGSON_MMI', true) elif use_loongson_mmi.enabled() error('Loongson MMI Support unavailable, but required') endif use_mmx = get_option('mmx') have_mmx = false mmx_flags = [] if cc.get_id() == 'msvc' mmx_flags = ['/w14710', '/w14714', '/wd4244'] elif cc.get_id() == 'sun' mmx_flags = ['-xarch=sse'] else mmx_flags = ['-mmmx', '-Winline'] endif if not use_mmx.disabled() if host_machine.cpu_family() == 'x86_64' or cc.get_id() == 'msvc' have_mmx = true elif host_machine.cpu_family() == 'x86' and cc.compiles(''' #include #include /* Check support for block expressions */ #define _mm_shuffle_pi16(A, N) \ ({ \ __m64 ret; \ \ /* Some versions of clang will choke on K */ \ asm ("pshufw %2, %1, %0\n\t" \ : "=y" (ret) \ : "y" (A), "K" ((const int8_t)N) \ ); \ \ ret; \ }) int main () { __m64 v = _mm_cvtsi32_si64 (1); __m64 w; w = _mm_shuffle_pi16(v, 5); /* Some versions of clang will choke on this */ asm ("pmulhuw %1, %0\n\t" : "+y" (w) : "y" (v) ); return _mm_cvtsi64_si32 (v); }''', args : mmx_flags, name : 'MMX Intrinsic Support') have_mmx = true endif endif if have_mmx # Inline assembly do not work on X64 MSVC, so we use # compatibility intrinsics there if cc.get_id() != 'msvc' or host_machine.cpu_family() != 'x86_64' config.set10('USE_X86_MMX', true) endif elif use_mmx.enabled() error('MMX Support unavailable, but required') endif use_sse2 = get_option('sse2') have_sse2 = false sse2_flags = [] if cc.get_id() == 'sun' sse2_flags = ['-xarch=sse2'] elif cc.get_id() != 'msvc' sse2_flags = ['-msse2', '-Winline'] endif if not use_sse2.disabled() if host_machine.cpu_family() == 'x86' if cc.compiles(''' #if defined(__GNUC__) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 2)) # if !defined(__amd64__) && !defined(__x86_64__) # error "Need GCC >= 4.2 for SSE2 intrinsics on x86" # endif #endif #include #include #include int param; int main () { __m128i a = _mm_set1_epi32 (param), b = _mm_set1_epi32 (param + 1), c; c = _mm_xor_si128 (a, b); return _mm_cvtsi128_si32(c); }''', args : sse2_flags, name : 'SSE2 Intrinsic Support') have_sse2 = true endif elif host_machine.cpu_family() == 'x86_64' have_sse2 = true endif endif if have_sse2 config.set10('USE_SSE2', true) elif use_sse2.enabled() error('sse2 Support unavailable, but required') endif use_ssse3 = get_option('ssse3') have_ssse3 = false ssse3_flags = [] if cc.get_id() != 'msvc' ssse3_flags = ['-mssse3', '-Winline'] endif # x64 pre-2010 MSVC compilers crashes when building the ssse3 code if not use_ssse3.disabled() and not (cc.get_id() == 'msvc' and cc.version().version_compare('<16') and host_machine.cpu_family() == 'x86_64') if host_machine.cpu_family().startswith('x86') if cc.compiles(''' #include #include #include int param; int main () { __m128i a = _mm_set1_epi32 (param), b = _mm_set1_epi32 (param + 1), c; c = _mm_xor_si128 (a, b); return _mm_cvtsi128_si32(c); }''', args : ssse3_flags, name : 'SSSE3 Intrinsic Support') have_ssse3 = true endif endif endif if have_ssse3 config.set10('USE_SSSE3', true) elif use_ssse3.enabled() error('ssse3 Support unavailable, but required') endif use_vmx = get_option('vmx') have_vmx = false vmx_flags = ['-maltivec', '-mabi=altivec'] if not use_vmx.disabled() if host_machine.cpu_family().startswith('ppc') if cc.compiles(''' #include int main () { vector unsigned int v = vec_splat_u32 (1); v = vec_sub (v, v); return 0; }''', args : vmx_flags, name : 'VMX/Altivec Intrinsic Support') have_vmx = true endif endif endif if cc.compiles(''' __asm__ ( ".func meson_test" ".endfunc" );''', name : 'test for ASM .func directive') config.set('ASM_HAVE_FUNC_DIRECTIVE', 1) endif if cc.compiles(''' __asm__ ( ".syntax unified\n" );''', name : 'test for ASM .syntax unified directive') config.set('ASM_HAVE_SYNTAX_UNIFIED', 1) endif if cc.links(''' #include __asm__ ( " .global _testlabel\n" "_testlabel:\n" ); int testlabel(); int main(int argc, char* argv[]) { return testlabel(); }''', name : 'test for ASM leading underscore') config.set('ASM_LEADING_UNDERSCORE', 1) endif if have_vmx config.set10('USE_VMX', true) elif use_vmx.enabled() error('vmx Support unavailable, but required') endif use_armv6_simd = get_option('arm-simd') have_armv6_simd = false if not use_armv6_simd.disabled() if host_machine.cpu_family() == 'arm' if cc.compiles(files('arm-simd-test.S'), name : 'ARMv6 SIMD Intrinsic Support') have_armv6_simd = true endif endif endif if have_armv6_simd config.set10('USE_ARM_SIMD', true) elif use_armv6_simd.enabled() error('ARMv6 SIMD Support unavailable, but required') endif use_neon = get_option('neon') have_neon = false if not use_neon.disabled() if host_machine.cpu_family() == 'arm' if cc.compiles(files('neon-test.S'), name : 'NEON Intrinsic Support') have_neon = true endif endif endif if have_neon config.set10('USE_ARM_NEON', true) elif use_neon.enabled() error('NEON Support unavailable, but required') endif use_a64neon = get_option('a64-neon') have_a64neon = false if not use_a64neon.disabled() if host_machine.cpu_family() == 'aarch64' if cc.compiles(files('a64-neon-test.S'), name : 'NEON A64 Intrinsic Support') have_a64neon = true endif endif endif if have_a64neon config.set10('USE_ARM_A64_NEON', true) elif use_a64neon.enabled() error('A64 NEON Support unavailable, but required') endif use_mips_dspr2 = get_option('mips-dspr2') have_mips_dspr2 = false mips_dspr2_flags = ['-mdspr2'] if not use_mips_dspr2.disabled() if host_machine.cpu_family().startswith('mips') if cc.compiles(''' #if !(defined(__mips__) && __mips_isa_rev >= 2) #error MIPS DSPr2 is currently only available on MIPS32r2 platforms. #endif int main () { int c = 0, a = 0, b = 0; __asm__ __volatile__ ( "precr.qb.ph %[c], %[a], %[b] \n\t" : [c] "=r" (c) : [a] "r" (a), [b] "r" (b) ); return c; }''', args : mips_dspr2_flags, name : 'DSPr2 Intrinsic Support') have_mips_dspr2 = true endif endif endif if have_mips_dspr2 config.set10('USE_MIPS_DSPR2', true) elif use_mips_dspr2.enabled() error('MIPS DSPr2 Support unavailable, but required') endif use_rvv = get_option('rvv') have_rvv = false rvv_flags = ['-march=rv64gcv'] if not use_rvv.disabled() if host_machine.cpu_family() == 'riscv64' if cc.compiles(''' #include int main() { vfloat32m1_t tmp; return 0; } ''', args : rvv_flags, name : 'RISC-V Vector Intrinsic Support') have_rvv = true endif endif endif if have_rvv config.set10('USE_RVV', true) elif use_rvv.enabled() error('RISC-V Vector Support unavailable, but required') endif use_gnu_asm = get_option('gnu-inline-asm') if not use_gnu_asm.disabled() if cc.compiles(''' int main () { /* Most modern architectures have a NOP instruction, so this is a fairly generic test. */ asm volatile ( "\tnop\n" : : : "cc", "memory" ); return 0; } ''', name : 'GNU Inline ASM support.') config.set10('USE_GCC_INLINE_ASM', true) elif use_gnu_asm.enabled() error('GNU inline assembly support missing but required.') endif endif if get_option('timers') config.set('PIXMAN_TIMERS', 1) endif if get_option('gnuplot') config.set('PIXMAN_GNUPLOT', 1) endif if cc.get_id() != 'msvc' dep_openmp = dependency('openmp', required : get_option('openmp')) if dep_openmp.found() config.set10('USE_OPENMP', true) elif meson.version().version_compare('<0.51.0') # In versions of meson before 0.51 the openmp dependency can still # inject arguments in the the auto case when it is not found, the # detection does work correctly in that case however, so we just # replace dep_openmp with null_dep to work around this. dep_openmp = null_dep endif else # the MSVC implementation of openmp is not compliant enough for our # uses here, so we disable it here. # Please see: https://stackoverflow.com/questions/12560243/using-threadprivate-directive-in-visual-studio dep_openmp = null_dep endif dep_gtk = dependency('gtk+-3.0', required : get_option('gtk').enabled() and get_option('demos').enabled()) dep_glib = dependency('glib-2.0', required : get_option('gtk').enabled() and get_option('demos').enabled()) dep_png = null_dep if not get_option('libpng').disabled() dep_png = dependency('libpng', required : false) # We need to look for the right library to link to for libpng, # when looking for libpng manually foreach png_ver : [ '16', '15', '14', '13', '12', '10' ] if not dep_png.found() dep_png = cc.find_library('libpng@0@'.format(png_ver), has_headers : ['png.h'], required : false) endif endforeach if get_option('libpng').enabled() and not dep_png.found() error('libpng support requested but libpng library not found') endif endif if dep_png.found() config.set('HAVE_LIBPNG', 1) endif dep_m = cc.find_library('m', required : false) dep_threads = dependency('threads') # MSVC-style compilers do not come with pthreads, so we must link # to it explicitly, currently pthreads-win32 is supported pthreads_found = false if dep_threads.found() and cc.has_header('pthread.h') if cc.get_argument_syntax() == 'msvc' pthread_lib = null_dep foreach pthread_type : ['VC3', 'VSE3', 'VCE3', 'VC2', 'VSE2', 'VCE2'] if not pthread_lib.found() pthread_lib = cc.find_library('pthread@0@'.format(pthread_type), required : false) endif endforeach if pthread_lib.found() pthreads_found = true dep_threads = pthread_lib endif else pthreads_found = true endif else # Avoid linking with -pthread if we don't actually have pthreads dep_threads = null_dep endif if pthreads_found config.set('HAVE_PTHREADS', 1) endif funcs = ['sigaction', 'alarm', 'mprotect', 'getpagesize', 'mmap', 'getisax', 'gettimeofday'] # mingw claimes to have posix_memalign, but it doesn't if host_machine.system() != 'windows' funcs += 'posix_memalign' endif foreach f : funcs if cc.has_function(f) config.set('HAVE_@0@'.format(f.to_upper()), 1) endif endforeach # This is only used in one test, that defines _GNU_SOURCE if cc.has_function('feenableexcept', prefix : '#define _GNU_SOURCE\n#include ', dependencies : dep_m) config.set('HAVE_FEENABLEEXCEPT', 1) endif if cc.has_header_symbol('fenv.h', 'FE_DIVBYZERO') config.set('HAVE_FEDIVBYZERO', 1) endif foreach h : ['sys/mman.h', 'fenv.h', 'unistd.h'] if cc.check_header(h) config.set('HAVE_@0@'.format(h.underscorify().to_upper()), 1) endif endforeach use_tls = get_option('tls') have_tls = '' if not use_tls.disabled() # gcc on Windows only warns that __declspec(thread) isn't supported, # passing -Werror=attributes makes it fail. if (host_machine.system() == 'windows' and cc.compiles('int __declspec(thread) foo;', args : cc.get_supported_arguments(['-Werror=attributes']), name : 'TLS via __declspec(thread)')) have_tls = '__declspec(thread)' elif cc.compiles('int __thread foo;', name : 'TLS via __thread') have_tls = '__thread' endif endif if have_tls != '' config.set('TLS', have_tls) elif use_tls.enabled() error('Compiler TLS Support unavailable, but required') endif if cc.links(''' static int x = 1; static void __attribute__((constructor)) constructor_function () { x = 0; } int main (void) { return x; } ''', name : '__attribute__((constructor))') config.set('TOOLCHAIN_SUPPORTS_ATTRIBUTE_CONSTRUCTOR', 1) endif if cc.links(''' static int x = 1; static void __attribute__((destructor)) destructor_function () { x = 0; } int main (void) { return x; } ''', name : '__attribute__((destructor))') config.set('TOOLCHAIN_SUPPORTS_ATTRIBUTE_DESTRUCTOR', 1) endif if cc.links( ' __float128 a = 1.0Q, b = 2.0Q; int main (void) { return a + b; }', name : 'Has float128 support') config.set('HAVE_FLOAT128', 1) endif if cc.has_function('clz') config.set('HAVE_BUILTIN_CLZ', 1) endif if cc.links(''' unsigned int __attribute__ ((vector_size(16))) e, a, b; int main (void) { e = a - ((b << 27) + (b >> (32 - 27))) + 1; return e[0]; } ''', name : 'Support for GCC vector extensions') config.set('HAVE_GCC_VECTOR_EXTENSIONS', 1) endif if host_machine.endian() == 'big' config.set('WORDS_BIGENDIAN', 1) endif config.set('SIZEOF_LONG', cc.sizeof('long')) # Required to make pixman-private.h config.set('PACKAGE', 'foo') version_conf = configuration_data() split = meson.project_version().split('.') version_conf.set('PIXMAN_VERSION_MAJOR', split[0]) version_conf.set('PIXMAN_VERSION_MINOR', split[1]) version_conf.set('PIXMAN_VERSION_MICRO', split[2]) add_project_arguments('-DHAVE_CONFIG_H', language : ['c']) subdir('pixman') if not get_option('tests').disabled() or not get_option('demos').disabled() subdir(join_paths('test', 'utils')) endif if not get_option('demos').disabled() subdir('demos') endif if not get_option('tests').disabled() subdir('test') endif pkg = import('pkgconfig') pkg.generate(libpixman, name : 'Pixman', filebase : 'pixman-1', description : 'The pixman library (version 1)', subdirs: 'pixman-1', version : meson.project_version(), ) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/meson_options.txt0000664000175000017500000000602514712446423016556 0ustar00mattst88mattst88# Copyright Âİ 2018 Intel Corporation # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. option( 'loongson-mmi', type : 'feature', description : 'Use Loongson MMI intrinsic optimized paths', ) option( 'mmx', type : 'feature', description : 'Use X86 MMX intrinsic optimized paths', ) option( 'sse2', type : 'feature', description : 'Use X86 SSE2 intrinsic optimized paths', ) option( 'ssse3', type : 'feature', description : 'Use X86 SSSE3 intrinsic optimized paths', ) option( 'vmx', type : 'feature', description : 'Use PPC VMX/Altivec intrinsic optimized paths', ) option( 'arm-simd', type : 'feature', description : 'Use ARMv6 SIMD intrinsic optimized paths', ) option( 'neon', type : 'feature', description : 'Use ARM NEON intrinsic optimized paths', ) option( 'a64-neon', type : 'feature', description : 'Use ARM A64 NEON intrinsic optimized paths', ) option( 'mips-dspr2', type : 'feature', description : 'Use MIPS32 DSPr2 intrinsic optimized paths', ) option( 'rvv', type : 'feature', description : 'Use RISC-V Vector extension', ) option( 'gnu-inline-asm', type : 'feature', description : 'Use GNU style inline assembler', ) option( 'tls', type : 'feature', description : 'Use compiler support for thread-local storage', ) option( 'cpu-features-path', type : 'string', description : 'Path to platform-specific cpu-features.[ch] for systems that do not provide it (e.g. Android)', ) option( 'openmp', type : 'feature', description : 'Enable OpenMP for tests', ) option( 'timers', type : 'boolean', value : false, description : 'Enable TIMER_* macros', ) option( 'gnuplot', type : 'boolean', value : false, description : 'Enable output of filters that can be piped to gnuplot', ) option( 'gtk', type : 'feature', description : 'Enable demos using GTK', ) option( 'libpng', type : 'feature', description : 'Use libpng in tests' ) option( 'tests', type : 'feature', description : 'Build tests' ) option( 'demos', type : 'feature', description : 'Build demos' ) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/neon-test.S0000664000175000017500000000033014712446423015152 0ustar00mattst88mattst88.text .fpu neon .arch armv7a .object_arch armv4 .eabi_attribute 10, 0 .arm .altmacro #ifndef __ARM_EABI__ #error EABI is required (to be sure that calling conventions are compatible) #endif pld [r0] vmovn.u16 d0, q0 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/0000775000175000017500000000000014712446423014412 5ustar00mattst88mattst88././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/dither/0000775000175000017500000000000014712446423015671 5ustar00mattst88mattst88././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/dither/blue-noise-64x64.h0000664000175000017500000005667514712446423020720 0ustar00mattst88mattst88/* WARNING: This file is generated by make-blue-noise.c * Please edit that file instead of this one. */ #ifndef BLUE_NOISE_64X64_H #define BLUE_NOISE_64X64_H #include static const uint16_t dither_blue_noise_64x64[4096] = { 3039, 1368, 3169, 103, 2211, 1248, 2981, 668, 2633, 37, 3963, 2903, 384, 2564, 3115, 1973, 3348, 830, 2505, 1293, 3054, 1060, 1505, 3268, 400, 1341, 593, 3802, 3384, 429, 4082, 1411, 2503, 3863, 126, 1292, 1887, 2855, 205, 2094, 2977, 1899, 3924, 356, 3088, 2500, 3942, 1409, 2293, 1734, 3732, 1291, 3227, 277, 2054, 786, 2871, 411, 2425, 1678, 3986, 455, 2879, 2288, 388, 1972, 3851, 778, 2768, 3697, 944, 2123, 1501, 3533, 937, 1713, 1381, 3888, 156, 1242, 516, 2888, 1607, 3676, 632, 2397, 3804, 2673, 1898, 3534, 2593, 1777, 1170, 2299, 3013, 1838, 523, 3053, 1647, 3601, 3197, 959, 1520, 3633, 893, 2437, 3367, 2187, 1258, 137, 1965, 401, 3546, 643, 3087, 2498, 733, 2786, 3371, 4053, 1266, 1977, 3663, 183, 2570, 2107, 1183, 3708, 907, 2473, 1151, 3363, 1527, 1902, 232, 3903, 3060, 496, 2486, 3206, 2165, 861, 2387, 3653, 2101, 3972, 132, 2162, 3437, 1827, 215, 895, 3114, 271, 969, 2932, 197, 1598, 878, 3696, 1140, 2120, 904, 2431, 302, 3846, 2675, 481, 3187, 66, 1440, 650, 3833, 2826, 3435, 901, 2936, 2111, 250, 1875, 3609, 1174, 1747, 162, 2346, 3420, 913, 3172, 1383, 752, 3298, 1735, 3540, 2938, 249, 2324, 526, 3099, 2561, 1324, 2347, 1861, 1200, 3702, 257, 3442, 1514, 2999, 992, 1766, 2735, 1163, 478, 2943, 1279, 3635, 2177, 1464, 3672, 2386, 3871, 3340, 2690, 64, 3489, 2811, 3999, 633, 1948, 1243, 2269, 1807, 1143, 2750, 3729, 1790, 2363, 1053, 1537, 2636, 4065, 1076, 1476, 3869, 450, 2200, 2676, 658, 2979, 1548, 544, 1913, 2838, 3911, 116, 2698, 517, 1295, 3997, 1739, 3665, 1083, 3509, 599, 3400, 118, 2956, 720, 2689, 1907, 567, 2523, 284, 3397, 711, 3219, 2450, 3985, 1665, 2549, 562, 3011, 1855, 729, 1355, 528, 1908, 2456, 1384, 337, 1540, 2654, 3138, 3513, 703, 4080, 3314, 2047, 855, 3037, 209, 3317, 577, 1828, 17, 2336, 3193, 2748, 962, 3441, 1450, 3246, 1075, 3878, 2615, 3497, 1033, 2310, 1442, 2183, 1654, 3254, 2061, 738, 2832, 148, 2030, 1670, 909, 3850, 2109, 1533, 4046, 1085, 3098, 3897, 1378, 2248, 3829, 1495, 1966, 23, 797, 3427, 1124, 4057, 95, 2787, 2190, 3074, 3950, 742, 3194, 1999, 3386, 1113, 16, 1657, 2804, 201, 1543, 383, 2559, 1325, 3604, 2068, 2493, 3771, 1284, 3460, 710, 1716, 2447, 80, 3811, 2032, 347, 2227, 15, 1689, 397, 3084, 662, 3798, 973, 43, 2608, 3143, 1459, 2423, 4066, 2770, 3191, 1283, 2630, 314, 3235, 2289, 72, 1822, 2840, 924, 350, 2653, 1057, 3715, 2235, 2775, 346, 2083, 1553, 3292, 1081, 274, 1686, 1188, 2327, 3743, 578, 2234, 3916, 2519, 1011, 3056, 2207, 3438, 3890, 537, 1617, 837, 3094, 373, 2795, 1980, 276, 3951, 1353, 3015, 844, 1724, 3651, 2923, 1316, 4092, 2504, 3627, 1936, 2854, 2461, 3929, 1193, 421, 3746, 820, 1180, 286, 2261, 532, 3625, 1812, 802, 1327, 3527, 670, 3730, 2025, 3124, 3565, 529, 2960, 1769, 1390, 3196, 2494, 3756, 796, 3618, 2602, 3463, 2847, 166, 953, 1745, 2900, 438, 2070, 1418, 3741, 639, 1205, 1891, 2882, 2282, 4012, 1182, 1696, 3630, 951, 2904, 2170, 3530, 375, 2320, 2742, 1132, 701, 3216, 2023, 847, 1230, 310, 3431, 770, 1961, 3531, 1702, 2181, 3370, 1877, 3072, 1571, 3389, 1071, 2415, 3782, 2803, 1610, 2454, 1211, 182, 1655, 2322, 1282, 3372, 287, 3935, 704, 1232, 415, 1910, 2286, 1399, 556, 1964, 4068, 2444, 3605, 1272, 3345, 816, 3526, 256, 2402, 2777, 955, 345, 3289, 111, 2727, 635, 2396, 1488, 3331, 600, 1032, 1575, 4026, 515, 3507, 2433, 1605, 460, 3364, 2783, 1810, 1397, 2334, 223, 2945, 688, 2533, 99, 2705, 624, 3944, 2073, 46, 2978, 508, 2132, 269, 3173, 3453, 2631, 4076, 694, 1892, 2586, 972, 2178, 3470, 1695, 2849, 3141, 77, 3884, 994, 3029, 1536, 673, 3083, 124, 2583, 1722, 2821, 1944, 4027, 1661, 3176, 3728, 1337, 1813, 3503, 2035, 3930, 157, 2537, 1865, 3096, 2646, 1941, 3252, 1449, 135, 2836, 3758, 2139, 84, 3678, 3106, 3862, 1545, 3307, 1320, 3955, 1031, 3664, 1306, 2460, 776, 1487, 3294, 1187, 3990, 1903, 1021, 549, 1484, 943, 3027, 97, 3853, 1499, 2880, 198, 2575, 3995, 1089, 1587, 2475, 3282, 339, 2657, 1158, 2105, 1493, 3943, 580, 3232, 1287, 846, 48, 2480, 2112, 771, 2534, 459, 3134, 850, 1298, 3790, 325, 3652, 1249, 193, 940, 2202, 3895, 1829, 911, 1366, 2577, 1069, 534, 2104, 1009, 2667, 392, 1983, 2917, 1645, 324, 3439, 2869, 3705, 1767, 2592, 756, 2916, 3683, 2276, 2850, 2053, 3594, 2403, 3181, 634, 3699, 1933, 906, 519, 2150, 3673, 764, 1770, 2220, 3795, 3336, 502, 3547, 2339, 1110, 301, 2210, 3354, 3643, 569, 1518, 2940, 3973, 1138, 1613, 2773, 2127, 2983, 1671, 769, 2161, 3800, 2730, 3127, 1179, 533, 3259, 2284, 4014, 1651, 2820, 3566, 653, 1839, 3455, 2399, 789, 3149, 2244, 1863, 1099, 474, 2307, 158, 3541, 1312, 1711, 0, 3902, 360, 1629, 1091, 395, 1781, 1191, 2374, 3353, 1419, 3225, 206, 2931, 3553, 1046, 54, 1646, 2470, 910, 1860, 3137, 3770, 2635, 1562, 2809, 1215, 3788, 222, 2199, 3335, 67, 3606, 524, 1001, 3309, 2410, 3473, 591, 1619, 291, 2502, 3629, 2891, 335, 741, 3378, 168, 2384, 3129, 4051, 22, 1444, 3613, 543, 3893, 186, 2665, 4062, 933, 3058, 2142, 449, 2711, 3224, 849, 1330, 3349, 2195, 2670, 3484, 2993, 32, 3774, 2722, 1859, 2548, 1268, 583, 2027, 3165, 2807, 4029, 227, 2897, 1434, 721, 1816, 195, 905, 2066, 3258, 1754, 970, 2674, 1880, 2338, 3915, 1485, 2660, 14, 1313, 2914, 2046, 4074, 791, 1917, 1301, 1725, 2687, 2019, 1443, 418, 1186, 1664, 2859, 1049, 2056, 2741, 1226, 1589, 3186, 2042, 1377, 3449, 1574, 3941, 1063, 1930, 2501, 3751, 2930, 671, 4031, 888, 2081, 1544, 684, 1117, 351, 4052, 1698, 2393, 3881, 1439, 785, 1277, 2013, 3488, 441, 2459, 3980, 3061, 3481, 2543, 419, 3020, 609, 3515, 1350, 799, 2878, 348, 2034, 3966, 1824, 950, 3281, 1394, 2239, 3452, 55, 3922, 3119, 892, 3785, 3023, 2140, 782, 2492, 3817, 241, 3355, 2424, 856, 3639, 612, 2556, 245, 2858, 705, 2316, 3562, 495, 1748, 128, 1912, 1454, 280, 2552, 3905, 3130, 2274, 3472, 834, 3055, 240, 2692, 471, 2272, 3301, 2632, 1080, 3693, 2136, 1029, 1364, 590, 1611, 4067, 1190, 2360, 3827, 261, 3180, 1768, 3471, 1103, 3003, 520, 3674, 151, 2571, 555, 3033, 982, 2353, 504, 1259, 2555, 149, 3889, 3380, 493, 3178, 1681, 663, 1924, 2990, 49, 1792, 3861, 1192, 1987, 3273, 297, 1457, 3043, 1177, 2292, 3249, 2829, 3682, 1154, 1758, 428, 2872, 1993, 1500, 3703, 1129, 3421, 1840, 3754, 163, 659, 1733, 3182, 38, 2875, 1957, 3614, 2237, 78, 1873, 2801, 1513, 2121, 1074, 2516, 667, 3710, 1429, 2430, 2088, 2830, 1072, 3557, 1531, 2733, 1955, 3286, 3590, 1826, 2778, 1068, 1932, 1452, 2279, 1185, 3564, 3952, 1391, 2726, 3313, 2331, 870, 3709, 1674, 2772, 4085, 808, 2596, 3848, 927, 538, 2335, 3334, 773, 3597, 1347, 109, 2663, 608, 2108, 2994, 936, 1524, 2922, 3968, 2422, 1467, 845, 3870, 321, 2704, 1073, 3308, 3680, 823, 430, 3375, 4030, 112, 2171, 2695, 267, 3374, 731, 1627, 3919, 1871, 352, 3839, 1370, 234, 794, 1532, 3245, 647, 3575, 74, 3045, 2766, 285, 2174, 498, 1059, 1551, 385, 3125, 2598, 143, 1128, 2095, 3395, 318, 1590, 3524, 1345, 1969, 242, 2759, 2092, 947, 3926, 3244, 2356, 1658, 6, 3593, 2554, 1172, 1995, 371, 2755, 3417, 2294, 1570, 3164, 748, 2517, 1401, 3111, 2420, 1662, 2910, 1276, 3276, 854, 1804, 4000, 1253, 2987, 229, 2344, 3184, 649, 2196, 2921, 4095, 2389, 1289, 2193, 2579, 4023, 757, 1858, 986, 3199, 2514, 3475, 4021, 2154, 651, 1432, 3468, 2404, 574, 1799, 3105, 2145, 86, 2614, 3218, 1565, 4088, 2481, 3079, 1815, 323, 1212, 3837, 759, 2159, 435, 3223, 784, 3659, 1114, 1888, 550, 1221, 3786, 1803, 499, 2117, 185, 3763, 942, 589, 2001, 3838, 1483, 3154, 2256, 468, 2544, 3403, 898, 1208, 2610, 3622, 967, 1929, 378, 3781, 220, 1656, 1115, 3347, 2428, 3822, 1577, 712, 1959, 110, 2765, 1762, 3854, 979, 2928, 3714, 1371, 746, 3969, 2884, 975, 3779, 641, 1142, 159, 1460, 702, 3485, 2866, 2495, 3330, 1305, 3937, 1635, 2229, 2962, 146, 4055, 3091, 2417, 100, 3508, 2933, 4006, 1167, 1920, 2760, 3552, 2545, 433, 2845, 142, 1056, 1886, 3616, 1435, 2099, 3803, 1749, 27, 1446, 3350, 2843, 884, 3310, 2948, 2103, 447, 1351, 187, 2895, 3655, 1256, 3036, 932, 3325, 2257, 451, 1915, 40, 2780, 2438, 1112, 1814, 423, 2290, 1905, 2898, 3419, 2306, 3760, 1938, 486, 1019, 1791, 3010, 2628, 203, 3408, 1269, 2507, 1606, 862, 2779, 2078, 952, 1529, 2638, 708, 3332, 1413, 2, 1726, 1156, 3500, 2392, 3791, 3076, 812, 107, 2861, 501, 3050, 3487, 2455, 594, 1731, 2685, 1498, 680, 3908, 2621, 3529, 1786, 2236, 342, 2569, 1526, 3722, 230, 1290, 3203, 3947, 1609, 3516, 467, 3267, 3685, 1461, 3140, 3569, 367, 1759, 928, 2754, 1332, 2219, 4034, 260, 655, 1984, 978, 3814, 617, 2086, 3525, 279, 3841, 1373, 3361, 319, 2251, 3066, 407, 2382, 3918, 3133, 2168, 762, 1523, 507, 2641, 1677, 4025, 2413, 1584, 793, 2049, 1109, 3962, 2218, 1194, 3692, 266, 1687, 981, 3103, 740, 3983, 1005, 3434, 570, 2383, 1942, 2718, 676, 2462, 1007, 2089, 1308, 2222, 233, 2568, 829, 1241, 2669, 3987, 514, 3303, 69, 3142, 1603, 3560, 2295, 3288, 1497, 2696, 1764, 2865, 1058, 3271, 1914, 477, 2529, 3927, 1736, 1273, 3752, 2029, 1012, 565, 2798, 4078, 1949, 3305, 1175, 2179, 380, 3366, 1195, 3849, 2637, 416, 2959, 125, 3396, 2467, 2036, 3234, 2340, 68, 2819, 1436, 2011, 3139, 1704, 4073, 860, 3582, 1468, 2969, 211, 3157, 4056, 866, 2935, 2000, 3923, 31, 2157, 1477, 2429, 1147, 3792, 2557, 774, 2802, 1153, 3747, 464, 3192, 42, 3904, 539, 1474, 2283, 803, 2876, 1061, 75, 3477, 747, 2893, 1538, 3626, 251, 1322, 2506, 189, 2791, 3667, 939, 2991, 1971, 175, 3195, 1416, 3648, 1857, 3052, 454, 851, 3789, 1271, 1906, 3694, 2484, 406, 2757, 26, 1189, 2909, 296, 2215, 3784, 1864, 637, 2715, 1673, 3445, 581, 1572, 3059, 3469, 761, 2984, 1737, 2058, 440, 1414, 1921, 121, 2527, 894, 2223, 1302, 2377, 3077, 2666, 3759, 3198, 1811, 3661, 2166, 2731, 1883, 359, 3285, 2458, 1805, 3459, 926, 3834, 675, 1893, 1496, 2612, 657, 3523, 1763, 2354, 564, 961, 1367, 3977, 1588, 2714, 322, 3446, 1088, 625, 3887, 1354, 3535, 2090, 3316, 1760, 1127, 483, 3491, 1421, 2301, 94, 1202, 3740, 2311, 1014, 1878, 3836, 180, 3412, 991, 2868, 3953, 3450, 3081, 1632, 4071, 1882, 3543, 726, 1719, 179, 1171, 364, 1420, 622, 3090, 1490, 946, 4007, 2212, 1102, 619, 2739, 2189, 1669, 2937, 3426, 39, 3940, 2191, 1264, 887, 4091, 2792, 2135, 4, 2883, 2281, 631, 3044, 1641, 2232, 3243, 1773, 2319, 827, 2591, 629, 3938, 2426, 3222, 2629, 1044, 3879, 3293, 1952, 2749, 275, 2590, 472, 1372, 2496, 660, 3669, 2264, 208, 915, 2167, 561, 2828, 307, 3265, 1104, 3964, 2155, 3425, 1951, 4077, 2391, 283, 3387, 2581, 115, 1415, 3069, 3896, 141, 3158, 1214, 442, 2405, 1349, 3085, 425, 2528, 3002, 312, 1602, 3588, 1137, 3323, 1963, 1002, 3578, 2521, 127, 925, 2970, 273, 3737, 1573, 167, 2863, 1509, 800, 147, 2059, 2942, 409, 921, 3151, 1451, 3909, 3333, 2844, 2096, 1512, 3136, 1210, 1798, 2709, 1331, 3586, 1034, 1521, 2441, 2926, 488, 2585, 775, 3031, 2693, 879, 3602, 1173, 2028, 3654, 2781, 841, 1975, 1507, 3646, 768, 3991, 2012, 996, 3544, 1666, 3810, 1990, 3360, 753, 2597, 3736, 304, 1473, 3828, 485, 1334, 4008, 2072, 3495, 1136, 2806, 2004, 3236, 1010, 2130, 3819, 1750, 3567, 644, 2515, 1794, 3636, 698, 2137, 1162, 832, 3761, 326, 2613, 513, 3302, 3820, 357, 3163, 2259, 3733, 101, 1922, 1386, 3587, 1640, 28, 1286, 2141, 1761, 2918, 693, 1639, 457, 3250, 2434, 365, 2599, 1729, 3284, 2643, 306, 2793, 689, 1090, 104, 1309, 2305, 1831, 2776, 859, 2446, 2915, 1778, 3337, 2677, 614, 1508, 2409, 469, 4033, 1321, 3563, 402, 3131, 2720, 1093, 1569, 4042, 1229, 2277, 216, 3046, 1817, 57, 3006, 1684, 4059, 2016, 795, 2440, 1652, 1960, 610, 2763, 920, 3864, 3110, 1026, 2326, 3762, 3233, 521, 3856, 173, 2457, 3939, 2138, 1262, 3572, 989, 3021, 2238, 119, 1445, 3832, 1809, 2297, 3467, 2700, 3684, 3102, 394, 4036, 2050, 3256, 89, 2198, 1079, 248, 1845, 3805, 3104, 880, 1779, 2688, 717, 2373, 1375, 262, 2249, 3071, 13, 2813, 3429, 1600, 3984, 2416, 3603, 1299, 2298, 998, 3492, 1393, 2951, 10, 4009, 1247, 3462, 1679, 2204, 414, 2736, 316, 1894, 2816, 1050, 3373, 1462, 3107, 817, 3464, 21, 1835, 4070, 568, 1178, 3718, 875, 3168, 466, 2974, 1458, 2084, 616, 1564, 1018, 1693, 546, 1244, 3899, 716, 3160, 3608, 2877, 1220, 334, 3443, 2270, 44, 3000, 1843, 3928, 3405, 766, 3686, 2040, 587, 993, 2647, 387, 930, 2753, 630, 3274, 150, 2808, 453, 3638, 1092, 2352, 3030, 239, 2562, 700, 3240, 1257, 4016, 730, 1515, 2203, 2551, 417, 1866, 1123, 2348, 2902, 1550, 2678, 2075, 3238, 1630, 2531, 2115, 1255, 4054, 840, 290, 3874, 2477, 3399, 2250, 3577, 2817, 1626, 2576, 1356, 2315, 792, 2087, 2618, 1612, 3855, 1263, 3637, 1036, 494, 1535, 2553, 1198, 1715, 3867, 3170, 1359, 1954, 3483, 1539, 2069, 3886, 1772, 2487, 1534, 2045, 3242, 806, 1578, 2018, 3948, 1423, 3596, 2076, 2466, 3424, 139, 3688, 871, 4049, 2852, 3342, 547, 3719, 327, 852, 3505, 207, 2794, 542, 3600, 45, 2411, 3324, 1788, 3012, 1235, 61, 2655, 917, 253, 1986, 3738, 313, 1706, 4072, 120, 3229, 957, 597, 2024, 3262, 2453, 2857, 2002, 3190, 210, 2784, 2206, 300, 2400, 3766, 553, 3152, 218, 1150, 2988, 883, 3753, 627, 2664, 3831, 437, 3385, 1008, 2957, 60, 1636, 891, 2899, 1776, 3062, 1315, 2026, 194, 1643, 2079, 1296, 3201, 2465, 1379, 1927, 3898, 1125, 1847, 2846, 1552, 1028, 2725, 2169, 787, 3202, 1441, 3982, 3032, 1052, 3251, 605, 2639, 3073, 1431, 3642, 2329, 2949, 341, 1634, 833, 129, 4020, 916, 3571, 669, 1506, 3411, 821, 2856, 1207, 2337, 2683, 3448, 340, 2214, 3128, 235, 1738, 1288, 2833, 2419, 606, 1884, 2668, 552, 3765, 1176, 399, 2302, 596, 3591, 2634, 767, 3845, 2767, 995, 3967, 491, 3057, 814, 2300, 3422, 691, 3797, 254, 3645, 509, 3478, 1836, 2119, 475, 2445, 1525, 2175, 3539, 914, 1926, 473, 1157, 1800, 3971, 2701, 3739, 2129, 3486, 1333, 1784, 2366, 2982, 1070, 4089, 1802, 73, 1642, 3958, 835, 1837, 1480, 4043, 1217, 2469, 3416, 2113, 88, 3668, 1240, 3255, 3920, 2355, 3167, 2003, 2645, 3936, 3228, 1592, 1144, 3474, 2394, 79, 1820, 2241, 1594, 3656, 2584, 153, 1448, 3034, 2005, 2511, 1692, 1335, 3913, 217, 2822, 3391, 745, 3813, 192, 1274, 2941, 3847, 2489, 3440, 744, 161, 1422, 1086, 572, 3004, 2617, 338, 3807, 2031, 236, 2472, 3065, 2098, 3358, 362, 2163, 3574, 497, 2788, 1970, 948, 3885, 685, 3100, 1712, 2228, 292, 1408, 1016, 164, 3537, 1417, 941, 34, 2172, 3001, 358, 1491, 3147, 699, 3356, 258, 1149, 2946, 1787, 3931, 382, 1146, 3291, 818, 2890, 2379, 1096, 3679, 1328, 1901, 3162, 2747, 1730, 2253, 5, 1556, 2818, 2093, 3166, 2522, 3410, 2287, 1701, 956, 3237, 620, 1596, 3300, 1307, 511, 3701, 1020, 2939, 1362, 2532, 3208, 749, 3641, 160, 1522, 2624, 1095, 4086, 826, 2841, 3583, 2173, 1727, 723, 2925, 1911, 2482, 3726, 863, 1962, 4028, 1111, 2835, 3773, 2449, 2022, 582, 3278, 923, 2619, 2152, 4039, 92, 1934, 3145, 677, 2530, 53, 2303, 1003, 458, 3989, 739, 3321, 1064, 369, 3556, 877, 1900, 426, 3876, 1, 3617, 2106, 1197, 2805, 3634, 857, 2706, 1504, 2418, 682, 3868, 20, 1139, 1688, 2333, 3311, 2907, 1945, 265, 2385, 3433, 1601, 636, 2620, 3095, 4044, 386, 3382, 1184, 527, 2814, 3414, 2342, 465, 1889, 1343, 874, 3479, 1502, 2233, 3689, 1385, 559, 2745, 1463, 3465, 376, 1718, 3217, 4045, 1580, 3612, 2525, 1228, 3018, 1958, 3725, 2358, 1361, 3996, 1581, 3063, 1224, 2737, 1475, 2442, 3946, 191, 1796, 2128, 3975, 134, 1916, 3318, 1597, 2071, 3749, 2672, 403, 1278, 602, 3745, 3220, 1374, 445, 2064, 3830, 243, 1252, 2390, 1563, 2724, 3875, 1818, 1346, 165, 1650, 3264, 2680, 117, 2998, 4081, 343, 2799, 9, 3122, 1743, 3724, 1040, 2231, 3842, 1209, 900, 398, 2851, 697, 1797, 3482, 293, 2679, 1649, 566, 2954, 91, 2697, 714, 2060, 3211, 781, 480, 3040, 1038, 2611, 666, 2989, 3458, 1201, 2796, 548, 2975, 839, 3121, 1850, 4001, 2208, 1631, 790, 2558, 2972, 1148, 3213, 1849, 3624, 971, 2102, 108, 772, 3101, 2589, 3777, 1042, 656, 3907, 2097, 1615, 2540, 805, 1935, 1231, 3494, 2451, 268, 2995, 750, 2682, 2020, 3024, 1392, 2124, 3279, 106, 2217, 1387, 822, 3214, 3825, 2160, 1000, 2395, 3691, 228, 4038, 1872, 3413, 1608, 2225, 3536, 303, 1653, 886, 2541, 224, 4037, 2252, 1428, 172, 3504, 958, 2848, 113, 3628, 1834, 3979, 19, 2317, 779, 2797, 518, 3174, 3549, 1482, 2266, 444, 2014, 3555, 2439, 1213, 3113, 535, 1135, 3204, 3858, 2309, 931, 623, 2009, 3359, 1566, 140, 3550, 1808, 3872, 2488, 1152, 3764, 2892, 3960, 2412, 353, 1223, 1825, 3444, 3116, 1717, 1082, 2313, 1280, 2661, 82, 3852, 1389, 3200, 2330, 3812, 2038, 3581, 1728, 1039, 3339, 2427, 586, 2580, 1238, 3328, 2280, 1047, 595, 2662, 1363, 3338, 1620, 3934, 2497, 1881, 1054, 3954, 3215, 864, 2887, 1801, 320, 3519, 2378, 3704, 1753, 424, 2958, 1660, 4005, 2601, 1116, 3912, 2381, 573, 2740, 200, 828, 1667, 432, 1931, 1035, 1616, 3598, 2640, 728, 264, 1437, 557, 3501, 2966, 372, 3734, 974, 1978, 758, 2719, 1145, 452, 1433, 725, 2681, 408, 3843, 1918, 1547, 3906, 1996, 503, 1456, 3019, 3493, 1700, 3742, 355, 2134, 176, 1311, 615, 2867, 315, 1680, 1314, 8, 3297, 1494, 783, 1950, 83, 2656, 1382, 3561, 138, 2834, 1404, 330, 1904, 3156, 1027, 1357, 3381, 3041, 3666, 2729, 734, 3415, 177, 3051, 2021, 4079, 2823, 3775, 2186, 2616, 869, 1668, 3148, 2367, 3315, 393, 4075, 1870, 2920, 3343, 2362, 3188, 1303, 2782, 825, 3171, 259, 2905, 3717, 2538, 184, 2074, 838, 2860, 2407, 1024, 3496, 3008, 3706, 1985, 2349, 3623, 2582, 4058, 2184, 2694, 3873, 2964, 990, 3346, 690, 2033, 1066, 2201, 3490, 2971, 718, 3700, 2188, 4061, 391, 1989, 2325, 1430, 3150, 2125, 2526, 592, 1403, 976, 2351, 1165, 1851, 114, 3921, 2063, 613, 1358, 2785, 1623, 2254, 25, 3542, 1045, 246, 1852, 3554, 87, 2243, 3615, 1169, 727, 1705, 968, 3957, 3185, 1251, 500, 4063, 1751, 2622, 842, 1519, 90, 3393, 819, 490, 1874, 999, 571, 1275, 2271, 1586, 4040, 2448, 3126, 3731, 436, 885, 1708, 2421, 24, 1599, 889, 2563, 1199, 645, 70, 4013, 1237, 3723, 1694, 3499, 3, 3266, 484, 2997, 3390, 1233, 2842, 3687, 152, 3480, 1084, 3698, 881, 2490, 1542, 3992, 2209, 692, 1690, 3022, 1470, 2625, 2114, 3512, 2359, 381, 2684, 1897, 3368, 1395, 3080, 289, 2065, 3981, 2758, 1141, 3097, 1472, 2870, 3352, 3707, 225, 3159, 505, 1895, 214, 1222, 1774, 2686, 3978, 3275, 1196, 3518, 2825, 3270, 1720, 3796, 3466, 2650, 1841, 298, 899, 2862, 2091, 2671, 1744, 3735, 801, 1560, 349, 2262, 903, 1833, 2524, 512, 3117, 1793, 2827, 476, 3038, 1216, 2550, 3826, 980, 431, 4048, 35, 2992, 1265, 1595, 765, 3675, 76, 2247, 696, 3456, 1254, 2452, 664, 1757, 2133, 3750, 145, 2332, 1554, 1981, 3580, 2712, 868, 3640, 2919, 638, 2275, 1427, 309, 2595, 2006, 492, 2226, 178, 2911, 836, 1528, 3028, 2240, 3327, 404, 3970, 707, 1294, 2464, 2131, 4032, 2600, 3319, 1406, 2913, 3974, 2156, 1425, 221, 3877, 2017, 811, 3662, 272, 3287, 1988, 2408, 3357, 1746, 598, 3239, 3823, 2182, 2934, 1078, 2604, 3840, 1697, 2906, 413, 3210, 3880, 331, 2644, 1260, 848, 3042, 2535, 1077, 1438, 3261, 2365, 1561, 3799, 85, 3082, 1876, 674, 3932, 1101, 3644, 1344, 1943, 2401, 390, 3835, 1048, 2572, 1541, 1133, 3075, 3584, 308, 2889, 1065, 1869, 601, 3783, 282, 1181, 736, 3312, 2368, 1126, 3383, 1675, 2734, 1426, 628, 2873, 1317, 843, 2717, 2048, 1004, 2536, 333, 1782, 3295, 1517, 219, 2153, 815, 3502, 1579, 2268, 987, 3409, 1780, 4018, 354, 665, 3914, 47, 1956, 456, 1006, 2010, 3406, 1130, 3621, 2894, 1549, 3092, 2485, 640, 3993, 3179, 1270, 3436, 585, 1925, 3757, 2304, 136, 1976, 1486, 646, 3520, 50, 3155, 1637, 2435, 3522, 1937, 2756, 3748, 661, 2224, 58, 3230, 2357, 1830, 3892, 170, 3607, 1447, 3949, 190, 3392, 1336, 584, 4010, 918, 3016, 3670, 1155, 2406, 52, 1304, 3009, 607, 2085, 2699, 3205, 1848, 2291, 3402, 2764, 3865, 3048, 2508, 735, 2710, 443, 2341, 897, 263, 1785, 2769, 983, 56, 2197, 1685, 2703, 202, 2944, 810, 3377, 2626, 3787, 3047, 2055, 1236, 2752, 2122, 945, 3093, 96, 1624, 439, 3014, 1388, 4015, 977, 448, 3506, 1098, 2242, 3026, 506, 2361, 2952, 1862, 3619, 2790, 1992, 2483, 525, 1868, 2652, 4093, 1998, 3595, 2478, 3816, 122, 1412, 929, 3716, 1166, 1648, 813, 1300, 199, 1489, 3998, 1771, 1310, 3808, 2052, 3423, 434, 3712, 1625, 3558, 2955, 853, 4019, 1348, 3511, 1732, 1246, 487, 934, 1672, 2510, 3965, 788, 3711, 396, 1369, 4090, 1055, 2603, 1879, 3528, 2518, 2067, 3005, 1516, 2588, 751, 1740, 3418, 1131, 1576, 686, 2296, 1118, 18, 3263, 1365, 3401, 294, 737, 3177, 410, 867, 1633, 2963, 3579, 2375, 252, 2881, 479, 2471, 3576, 2180, 3306, 332, 2255, 3035, 41, 2648, 1396, 2929, 2230, 1219, 2512, 446, 2008, 3189, 2388, 626, 2164, 2831, 4047, 2376, 174, 3272, 368, 1469, 3226, 2578, 1991, 2874, 2263, 3681, 876, 188, 1239, 683, 3776, 226, 3183, 4083, 2148, 63, 2649, 3859, 299, 3086, 3933, 1585, 2185, 3767, 988, 1707, 2908, 1407, 1844, 2771, 2245, 1161, 560, 1755, 3376, 2051, 4064, 3135, 1832, 652, 2853, 1051, 3649, 760, 3290, 1105, 3945, 872, 154, 3207, 713, 3780, 1453, 281, 1087, 3695, 30, 3299, 1919, 1400, 3551, 1119, 1890, 2314, 618, 1703, 3428, 724, 295, 3146, 1557, 3341, 2896, 1683, 2723, 1974, 1017, 541, 1380, 3720, 804, 3280, 2082, 997, 2567, 777, 2961, 213, 2707, 2328, 3632, 1025, 3891, 3304, 255, 4003, 3108, 2587, 1323, 743, 1479, 105, 1013, 3901, 1618, 2044, 2627, 1465, 1846, 576, 1994, 2560, 3521, 1742, 2118, 2800, 3404, 1783, 2609, 2968, 1582, 1022, 412, 2713, 687, 2976, 3857, 2761, 3620, 62, 1108, 3844, 1340, 2100, 540, 2345, 3925, 405, 3457, 1319, 2468, 3362, 2815, 1867, 2372, 1281, 1714, 3690, 482, 3498, 1842, 1285, 3994, 558, 2039, 81, 2499, 678, 1481, 1923, 964, 12, 3824, 2980, 2205, 2762, 3432, 2398, 181, 3247, 462, 4094, 2350, 3589, 3089, 1555, 1094, 4041, 247, 1267, 908, 3959, 2041, 732, 3860, 2343, 3132, 3769, 2144, 1621, 237, 912, 1329, 3025, 2146, 2642, 1775, 3721, 2746, 1121, 1953, 902, 2285, 130, 3671, 1659, 278, 3153, 522, 2721, 123, 2996, 1466, 2380, 377, 3231, 873, 1510, 3476, 3123, 1250, 2147, 3650, 2839, 3451, 2323, 1122, 3545, 379, 1765, 1218, 603, 3768, 1360, 938, 2885, 133, 1245, 363, 2364, 554, 2743, 3344, 2474, 530, 3112, 169, 1297, 3430, 536, 1741, 98, 1043, 2574, 3253, 2246, 1854, 4022, 510, 3283, 204, 858, 3398, 36, 3118, 1478, 3794, 2986, 706, 2176, 922, 3559, 1097, 3976, 3322, 2149, 1160, 2810, 3883, 2007, 2513, 2953, 328, 1721, 3793, 422, 2566, 807, 329, 1638, 1967, 648, 2520, 3727, 3109, 2116, 2927, 2491, 1939, 3365, 1709, 2728, 3815, 2037, 3120, 831, 1405, 1896, 3592, 1622, 2369, 2864, 2151, 1107, 2542, 3532, 1410, 3917, 427, 3568, 709, 2509, 1503, 1037, 2973, 2436, 1604, 4035, 2594, 563, 1819, 2659, 1234, 4004, 2565, 1511, 2273, 1823, 336, 882, 3772, 575, 1628, 171, 3570, 1120, 2260, 2716, 935, 3064, 1806, 1342, 3144, 3900, 2744, 3296, 985, 1546, 238, 896, 1663, 305, 3660, 695, 2213, 960, 3407, 144, 1795, 3894, 2267, 51, 2708, 1023, 3818, 366, 1821, 4087, 2985, 755, 2057, 2912, 949, 1583, 2774, 231, 3447, 2258, 3866, 1982, 672, 1225, 2077, 3320, 1062, 370, 3241, 1968, 7, 3068, 681, 3631, 2573, 1567, 3175, 2321, 1067, 3070, 722, 1856, 3744, 642, 1471, 4084, 131, 3514, 2443, 531, 1227, 155, 2265, 4024, 2658, 3326, 3910, 1168, 3078, 1530, 3956, 489, 1424, 3647, 1203, 420, 2924, 3755, 719, 3248, 1376, 3067, 890, 196, 1559, 3269, 270, 2432, 1885, 3212, 1164, 3778, 1752, 579, 1338, 344, 3585, 3017, 288, 3658, 2371, 3882, 1691, 611, 2789, 3809, 1339, 389, 2950, 2015, 59, 3548, 2751, 2158, 4011, 1352, 29, 3388, 2370, 2812, 1946, 954, 2110, 1558, 2947, 3573, 1909, 1326, 679, 1853, 2312, 551, 2702, 33, 2414, 3209, 2824, 2547, 2143, 3379, 966, 1492, 1979, 2479, 463, 2194, 3657, 2738, 2318, 1261, 3713, 604, 4002, 11, 2192, 2967, 919, 2607, 3369, 2837, 1676, 2539, 984, 1568, 93, 2901, 1318, 3538, 1041, 2216, 1756, 3454, 1030, 4050, 1402, 798, 1723, 311, 3277, 2546, 2886, 2043, 461, 1206, 3677, 361, 3260, 3988, 809, 2605, 470, 3007, 3517, 102, 3221, 1398, 2062, 3611, 1134, 1928, 865, 4060, 621, 1710, 2606, 3510, 317, 4017, 1682, 3329, 1159, 1940, 654, 3461, 1789, 1015, 2691, 1455, 3599, 374, 1947, 4069, 71, 2126, 763, 3961, 2278, 3161, 1997, 824, 2623, 2080, 244, 3257, 780, 2732, 2308, 545, 3351, 2476, 3806, 1204, 588, 1591, 963, 3610, 1699, 754, 3049, 2651, 1106, 65, 2221, 1644, 3821, 1100, 2463, 1614, 3801, 965, 2965, 715, 3394, 1593, 212, }; #endif /* BLUE_NOISE_64X64_H */ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/dither/make-blue-noise.c0000664000175000017500000003236414712446423021022 0ustar00mattst88mattst88/* Blue noise generation using the void-and-cluster method as described in * * The void-and-cluster method for dither array generation * Ulichney, Robert A (1993) * * http://cv.ulichney.com/papers/1993-void-cluster.pdf * * Note that running with openmp (-DUSE_OPENMP) will trigger additional * randomness due to computing reductions in parallel, and is not recommended * unless generating very large dither arrays. */ #include #include #include #include #include /* Booleans and utility functions */ #ifndef TRUE # define TRUE 1 #endif #ifndef FALSE # define FALSE 0 #endif typedef int bool_t; int imin (int x, int y) { return x < y ? x : y; } /* Memory allocation */ void * malloc_abc (unsigned int a, unsigned int b, unsigned int c) { if (a >= INT32_MAX / b) return NULL; else if (a * b >= INT32_MAX / c) return NULL; else return malloc (a * b * c); } /* Random number generation */ typedef uint32_t xorwow_state_t[5]; uint32_t xorwow_next (xorwow_state_t *state) { uint32_t s = (*state)[0], t = (*state)[3]; (*state)[3] = (*state)[2]; (*state)[2] = (*state)[1]; (*state)[1] = s; t ^= t >> 2; t ^= t << 1; t ^= s ^ (s << 4); (*state)[0] = t; (*state)[4] += 362437; return t + (*state)[4]; } float xorwow_float (xorwow_state_t *s) { return (xorwow_next (s) >> 9) / (float)((1 << 23) - 1); } /* Floating point matrices * * Used to cache the cluster sizes. */ typedef struct matrix_t { int width; int height; float *buffer; } matrix_t; bool_t matrix_init (matrix_t *matrix, int width, int height) { float *buffer; if (!matrix) return FALSE; buffer = malloc_abc (width, height, sizeof (float)); if (!buffer) return FALSE; matrix->buffer = buffer; matrix->width = width; matrix->height = height; return TRUE; } bool_t matrix_copy (matrix_t *dst, matrix_t const *src) { float *srcbuf = src->buffer, *srcend = src->buffer + src->width * src->height, *dstbuf = dst->buffer; if (dst->width != src->width || dst->height != src->height) return FALSE; while (srcbuf < srcend) *dstbuf++ = *srcbuf++; return TRUE; } float * matrix_get (matrix_t *matrix, int x, int y) { return &matrix->buffer[y * matrix->width + x]; } void matrix_destroy (matrix_t *matrix) { free (matrix->buffer); } /* Binary patterns */ typedef struct pattern_t { int width; int height; bool_t *buffer; } pattern_t; bool_t pattern_init (pattern_t *pattern, int width, int height) { bool_t *buffer; if (!pattern) return FALSE; buffer = malloc_abc (width, height, sizeof (bool_t)); if (!buffer) return FALSE; pattern->buffer = buffer; pattern->width = width; pattern->height = height; return TRUE; } bool_t pattern_copy (pattern_t *dst, pattern_t const *src) { bool_t *srcbuf = src->buffer, *srcend = src->buffer + src->width * src->height, *dstbuf = dst->buffer; if (dst->width != src->width || dst->height != src->height) return FALSE; while (srcbuf < srcend) *dstbuf++ = *srcbuf++; return TRUE; } bool_t * pattern_get (pattern_t *pattern, int x, int y) { return &pattern->buffer[y * pattern->width + x]; } void pattern_fill_white_noise (pattern_t *pattern, float fraction, xorwow_state_t *s) { bool_t *buffer = pattern->buffer; bool_t *end = buffer + (pattern->width * pattern->height); while (buffer < end) *buffer++ = xorwow_float (s) < fraction; } void pattern_destroy (pattern_t *pattern) { free (pattern->buffer); } /* Dither arrays */ typedef struct array_t { int width; int height; uint32_t *buffer; } array_t; bool_t array_init (array_t *array, int width, int height) { uint32_t *buffer; if (!array) return FALSE; buffer = malloc_abc (width, height, sizeof (uint32_t)); if (!buffer) return FALSE; array->buffer = buffer; array->width = width; array->height = height; return TRUE; } uint32_t * array_get (array_t *array, int x, int y) { return &array->buffer[y * array->width + x]; } bool_t array_save_ppm (array_t *array, const char *filename) { FILE *f = fopen(filename, "wb"); int i = 0; int bpp = 2; uint8_t buffer[1024]; if (!f) return FALSE; if (array->width * array->height - 1 < 256) bpp = 1; fprintf(f, "P5 %d %d %d\n", array->width, array->height, array->width * array->height - 1); while (i < array->width * array->height) { int j = 0; for (; j < 1024 / bpp && j < array->width * array->height; ++j) { uint32_t v = array->buffer[i + j]; if (bpp == 2) { buffer[2 * j] = v & 0xff; buffer[2 * j + 1] = (v & 0xff00) >> 8; } else { buffer[j] = v; } } fwrite((void *)buffer, bpp, j, f); i += j; } if (fclose(f) != 0) return FALSE; return TRUE; } bool_t array_save (array_t *array, const char *filename) { int x, y; FILE *f = fopen(filename, "wb"); if (!f) return FALSE; fprintf (f, "/* WARNING: This file is generated by make-blue-noise.c\n" " * Please edit that file instead of this one.\n" " */\n" "\n" "#ifndef BLUE_NOISE_%dX%d_H\n" "#define BLUE_NOISE_%dX%d_H\n" "\n" "#include \n" "\n", array->width, array->height, array->width, array->height); fprintf (f, "static const uint16_t dither_blue_noise_%dx%d[%d] = {\n", array->width, array->height, array->width * array->height); for (y = 0; y < array->height; ++y) { fprintf (f, " "); for (x = 0; x < array->width; ++x) { if (x != 0) fprintf (f, ", "); fprintf (f, "%d", *array_get (array, x, y)); } fprintf (f, ",\n"); } fprintf (f, "};\n"); fprintf (f, "\n#endif /* BLUE_NOISE_%dX%d_H */\n", array->width, array->height); if (fclose(f) != 0) return FALSE; return TRUE; } void array_destroy (array_t *array) { free (array->buffer); } /* Dither array generation */ bool_t compute_cluster_sizes (pattern_t *pattern, matrix_t *matrix) { int width = pattern->width, height = pattern->height; if (matrix->width != width || matrix->height != height) return FALSE; int px, py, qx, qy, dx, dy; float tsqsi = 2.f * 1.5f * 1.5f; #ifdef USE_OPENMP #pragma omp parallel for default (none) \ private (py, px, qy, qx, dx, dy) \ shared (height, width, pattern, matrix, tsqsi) #endif for (py = 0; py < height; ++py) { for (px = 0; px < width; ++px) { bool_t pixel = *pattern_get (pattern, px, py); float dist = 0.f; for (qx = 0; qx < width; ++qx) { dx = imin (abs (qx - px), width - abs (qx - px)); dx = dx * dx; for (qy = 0; qy < height; ++qy) { dy = imin (abs (qy - py), height - abs (qy - py)); dy = dy * dy; dist += (pixel == *pattern_get (pattern, qx, qy)) * expf (- (dx + dy) / tsqsi); } } *matrix_get (matrix, px, py) = dist; } } return TRUE; } bool_t swap_pixel (pattern_t *pattern, matrix_t *matrix, int x, int y) { int width = pattern->width, height = pattern->height; bool_t new; float f, dist = 0.f, tsqsi = 2.f * 1.5f * 1.5f; int px, py, dx, dy; bool_t b; new = !*pattern_get (pattern, x, y); *pattern_get (pattern, x, y) = new; if (matrix->width != width || matrix->height != height) return FALSE; #ifdef USE_OPENMP #pragma omp parallel for reduction (+:dist) default (none) \ private (px, py, dx, dy, b, f) \ shared (x, y, width, height, pattern, matrix, new, tsqsi) #endif for (py = 0; py < height; ++py) { dy = imin (abs (py - y), height - abs (py - y)); dy = dy * dy; for (px = 0; px < width; ++px) { dx = imin (abs (px - x), width - abs (px - x)); dx = dx * dx; b = (*pattern_get (pattern, px, py) == new); f = expf (- (dx + dy) / tsqsi); *matrix_get (matrix, px, py) += (2 * b - 1) * f; dist += b * f; } } *matrix_get (matrix, x, y) = dist; return TRUE; } void largest_cluster (pattern_t *pattern, matrix_t *matrix, bool_t pixel, int *xmax, int *ymax) { int width = pattern->width, height = pattern->height; int x, y; float vmax = -INFINITY; #ifdef USE_OPENMP #pragma omp parallel default (none) \ private (x, y) \ shared (height, width, pattern, matrix, pixel, xmax, ymax, vmax) #endif { int xbest = -1, ybest = -1; #ifdef USE_OPENMP float vbest = -INFINITY; #pragma omp for reduction (max: vmax) collapse (2) #endif for (y = 0; y < height; ++y) { for (x = 0; x < width; ++x) { if (*pattern_get (pattern, x, y) != pixel) continue; if (*matrix_get (matrix, x, y) > vmax) { vmax = *matrix_get (matrix, x, y); #ifdef USE_OPENMP vbest = vmax; #endif xbest = x; ybest = y; } } } #ifdef USE_OPENMP #pragma omp barrier #pragma omp critical { if (vmax == vbest) { *xmax = xbest; *ymax = ybest; } } #else *xmax = xbest; *ymax = ybest; #endif } assert (vmax > -INFINITY); } void generate_initial_binary_pattern (pattern_t *pattern, matrix_t *matrix) { int xcluster = 0, ycluster = 0, xvoid = 0, yvoid = 0; for (;;) { largest_cluster (pattern, matrix, TRUE, &xcluster, &ycluster); assert (*pattern_get (pattern, xcluster, ycluster) == TRUE); swap_pixel (pattern, matrix, xcluster, ycluster); largest_cluster (pattern, matrix, FALSE, &xvoid, &yvoid); assert (*pattern_get (pattern, xvoid, yvoid) == FALSE); swap_pixel (pattern, matrix, xvoid, yvoid); if (xcluster == xvoid && ycluster == yvoid) return; } } bool_t generate_dither_array (array_t *array, pattern_t const *prototype, matrix_t const *matrix, pattern_t *temp_pattern, matrix_t *temp_matrix) { int width = prototype->width, height = prototype->height; int x, y, rank; int initial_rank = 0; if (array->width != width || array->height != height) return FALSE; // Make copies of the prototype and associated sizes matrix since we will // trash them if (!pattern_copy (temp_pattern, prototype)) return FALSE; if (!matrix_copy (temp_matrix, matrix)) return FALSE; // Compute initial rank for (y = 0; y < height; ++y) { for (x = 0; x < width; ++x) { if (*pattern_get (temp_pattern, x, y)) initial_rank += 1; *array_get (array, x, y) = 0; } } // Phase 1 for (rank = initial_rank; rank > 0; --rank) { largest_cluster (temp_pattern, temp_matrix, TRUE, &x, &y); swap_pixel (temp_pattern, temp_matrix, x, y); *array_get (array, x, y) = rank - 1; } // Make copies again for phases 2 & 3 if (!pattern_copy (temp_pattern, prototype)) return FALSE; if (!matrix_copy (temp_matrix, matrix)) return FALSE; // Phase 2 & 3 for (rank = initial_rank; rank < width * height; ++rank) { largest_cluster (temp_pattern, temp_matrix, FALSE, &x, &y); swap_pixel (temp_pattern, temp_matrix, x, y); *array_get (array, x, y) = rank; } return TRUE; } bool_t generate (int size, xorwow_state_t *s, char const *c_filename, char const *ppm_filename) { bool_t ok = TRUE; pattern_t prototype, temp_pattern; array_t array; matrix_t matrix, temp_matrix; printf ("Generating %dx%d blue noise...\n", size, size); if (!pattern_init (&prototype, size, size)) return FALSE; if (!pattern_init (&temp_pattern, size, size)) { pattern_destroy (&prototype); return FALSE; } if (!matrix_init (&matrix, size, size)) { pattern_destroy (&temp_pattern); pattern_destroy (&prototype); return FALSE; } if (!matrix_init (&temp_matrix, size, size)) { matrix_destroy (&matrix); pattern_destroy (&temp_pattern); pattern_destroy (&prototype); return FALSE; } if (!array_init (&array, size, size)) { matrix_destroy (&temp_matrix); matrix_destroy (&matrix); pattern_destroy (&temp_pattern); pattern_destroy (&prototype); return FALSE; } printf("Filling initial binary pattern with white noise...\n"); pattern_fill_white_noise (&prototype, .1, s); printf("Initializing cluster sizes...\n"); if (!compute_cluster_sizes (&prototype, &matrix)) { fprintf (stderr, "Error while computing cluster sizes\n"); ok = FALSE; goto out; } printf("Generating initial binary pattern...\n"); generate_initial_binary_pattern (&prototype, &matrix); printf("Generating dither array...\n"); if (!generate_dither_array (&array, &prototype, &matrix, &temp_pattern, &temp_matrix)) { fprintf (stderr, "Error while generating dither array\n"); ok = FALSE; goto out; } printf("Saving dither array...\n"); if (!array_save (&array, c_filename)) { fprintf (stderr, "Error saving dither array\n"); ok = FALSE; goto out; } #if SAVE_PPM if (!array_save_ppm (&array, ppm_filename)) { fprintf (stderr, "Error saving dither array PPM\n"); ok = FALSE; goto out; } #else (void)ppm_filename; #endif printf("All done!\n"); out: array_destroy (&array); matrix_destroy (&temp_matrix); matrix_destroy (&matrix); pattern_destroy (&temp_pattern); pattern_destroy (&prototype); return ok; } int main (void) { xorwow_state_t s = {1185956906, 12385940, 983948, 349208051, 901842}; if (!generate (64, &s, "blue-noise-64x64.h", "blue-noise-64x64.ppm")) return -1; return 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/loongson-mmintrin.h0000664000175000017500000002360714712446423020264 0ustar00mattst88mattst88/* The gcc-provided loongson intrinsic functions are way too fucking broken * to be of any use, otherwise I'd use them. * * - The hardware instructions are very similar to MMX or iwMMXt. Certainly * close enough that they could have implemented the _mm_*-style intrinsic * interface and had a ton of optimized code available to them. Instead they * implemented something much, much worse. * * - pshuf takes a dead first argument, causing extra instructions to be * generated. * * - There are no 64-bit shift or logical intrinsics, which means you have * to implement them with inline assembly, but this is a nightmare because * gcc doesn't understand that the integer vector datatypes are actually in * floating-point registers, so you end up with braindead code like * * punpcklwd $f9,$f9,$f5 * dmtc1 v0,$f8 * punpcklwd $f19,$f19,$f5 * dmfc1 t9,$f9 * dmtc1 v0,$f9 * dmtc1 t9,$f20 * dmfc1 s0,$f19 * punpcklbh $f20,$f20,$f2 * * where crap just gets copied back and forth between integer and floating- * point registers ad nauseum. * * Instead of trying to workaround the problems from these crap intrinsics, I * just implement the _mm_* intrinsics needed for pixman-mmx.c using inline * assembly. */ #include /* vectors are stored in 64-bit floating-point registers */ typedef double __m64; /* having a 32-bit datatype allows us to use 32-bit loads in places like load8888 */ typedef float __m32; extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_setzero_si64 (void) { return 0.0; } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_add_pi16 (__m64 __m1, __m64 __m2) { __m64 ret; asm("paddh %0, %1, %2\n\t" : "=f" (ret) : "f" (__m1), "f" (__m2) ); return ret; } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_add_pi32 (__m64 __m1, __m64 __m2) { __m64 ret; asm("paddw %0, %1, %2\n\t" : "=f" (ret) : "f" (__m1), "f" (__m2) ); return ret; } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_adds_pu16 (__m64 __m1, __m64 __m2) { __m64 ret; asm("paddush %0, %1, %2\n\t" : "=f" (ret) : "f" (__m1), "f" (__m2) ); return ret; } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_adds_pu8 (__m64 __m1, __m64 __m2) { __m64 ret; asm("paddusb %0, %1, %2\n\t" : "=f" (ret) : "f" (__m1), "f" (__m2) ); return ret; } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_and_si64 (__m64 __m1, __m64 __m2) { __m64 ret; asm("and %0, %1, %2\n\t" : "=f" (ret) : "f" (__m1), "f" (__m2) ); return ret; } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cmpeq_pi32 (__m64 __m1, __m64 __m2) { __m64 ret; asm("pcmpeqw %0, %1, %2\n\t" : "=f" (ret) : "f" (__m1), "f" (__m2) ); return ret; } extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_empty (void) { } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_madd_pi16 (__m64 __m1, __m64 __m2) { __m64 ret; asm("pmaddhw %0, %1, %2\n\t" : "=f" (ret) : "f" (__m1), "f" (__m2) ); return ret; } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_mulhi_pu16 (__m64 __m1, __m64 __m2) { __m64 ret; asm("pmulhuh %0, %1, %2\n\t" : "=f" (ret) : "f" (__m1), "f" (__m2) ); return ret; } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_mullo_pi16 (__m64 __m1, __m64 __m2) { __m64 ret; asm("pmullh %0, %1, %2\n\t" : "=f" (ret) : "f" (__m1), "f" (__m2) ); return ret; } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_or_si64 (__m64 __m1, __m64 __m2) { __m64 ret; asm("or %0, %1, %2\n\t" : "=f" (ret) : "f" (__m1), "f" (__m2) ); return ret; } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_packs_pu16 (__m64 __m1, __m64 __m2) { __m64 ret; asm("packushb %0, %1, %2\n\t" : "=f" (ret) : "f" (__m1), "f" (__m2) ); return ret; } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_packs_pi32 (__m64 __m1, __m64 __m2) { __m64 ret; asm("packsswh %0, %1, %2\n\t" : "=f" (ret) : "f" (__m1), "f" (__m2) ); return ret; } #define _MM_SHUFFLE(fp3,fp2,fp1,fp0) \ (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | (fp0)) extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_set_pi16 (uint16_t __w3, uint16_t __w2, uint16_t __w1, uint16_t __w0) { if (__builtin_constant_p (__w3) && __builtin_constant_p (__w2) && __builtin_constant_p (__w1) && __builtin_constant_p (__w0)) { uint64_t val = ((uint64_t)__w3 << 48) | ((uint64_t)__w2 << 32) | ((uint64_t)__w1 << 16) | ((uint64_t)__w0 << 0); return *(__m64 *)&val; } else if (__w3 == __w2 && __w2 == __w1 && __w1 == __w0) { /* TODO: handle other cases */ uint64_t val = __w3; uint64_t imm = _MM_SHUFFLE (0, 0, 0, 0); __m64 ret; asm("pshufh %0, %1, %2\n\t" : "=f" (ret) : "f" (*(__m64 *)&val), "f" (*(__m64 *)&imm) ); return ret; } else { uint64_t val = ((uint64_t)__w3 << 48) | ((uint64_t)__w2 << 32) | ((uint64_t)__w1 << 16) | ((uint64_t)__w0 << 0); return *(__m64 *)&val; } } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_set_pi32 (unsigned __i1, unsigned __i0) { if (__builtin_constant_p (__i1) && __builtin_constant_p (__i0)) { uint64_t val = ((uint64_t)__i1 << 32) | ((uint64_t)__i0 << 0); return *(__m64 *)&val; } else if (__i1 == __i0) { uint64_t imm = _MM_SHUFFLE (1, 0, 1, 0); __m64 ret; asm("pshufh %0, %1, %2\n\t" : "=f" (ret) : "f" (*(__m32 *)&__i1), "f" (*(__m64 *)&imm) ); return ret; } else { uint64_t val = ((uint64_t)__i1 << 32) | ((uint64_t)__i0 << 0); return *(__m64 *)&val; } } #undef _MM_SHUFFLE extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_shuffle_pi16 (__m64 __m, int64_t __n) { __m64 ret; asm("pshufh %0, %1, %2\n\t" : "=f" (ret) : "f" (__m), "f" (*(__m64 *)&__n) ); return ret; } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_slli_pi16 (__m64 __m, int64_t __count) { __m64 ret; asm("psllh %0, %1, %2\n\t" : "=f" (ret) : "f" (__m), "f" (*(__m64 *)&__count) ); return ret; } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_slli_si64 (__m64 __m, int64_t __count) { __m64 ret; asm("dsll %0, %1, %2\n\t" : "=f" (ret) : "f" (__m), "f" (*(__m64 *)&__count) ); return ret; } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_srli_pi16 (__m64 __m, int64_t __count) { __m64 ret; asm("psrlh %0, %1, %2\n\t" : "=f" (ret) : "f" (__m), "f" (*(__m64 *)&__count) ); return ret; } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_srli_pi32 (__m64 __m, int64_t __count) { __m64 ret; asm("psrlw %0, %1, %2\n\t" : "=f" (ret) : "f" (__m), "f" (*(__m64 *)&__count) ); return ret; } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_srli_si64 (__m64 __m, int64_t __count) { __m64 ret; asm("dsrl %0, %1, %2\n\t" : "=f" (ret) : "f" (__m), "f" (*(__m64 *)&__count) ); return ret; } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_sub_pi16 (__m64 __m1, __m64 __m2) { __m64 ret; asm("psubh %0, %1, %2\n\t" : "=f" (ret) : "f" (__m1), "f" (__m2) ); return ret; } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_unpackhi_pi8 (__m64 __m1, __m64 __m2) { __m64 ret; asm("punpckhbh %0, %1, %2\n\t" : "=f" (ret) : "f" (__m1), "f" (__m2) ); return ret; } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_unpackhi_pi16 (__m64 __m1, __m64 __m2) { __m64 ret; asm("punpckhhw %0, %1, %2\n\t" : "=f" (ret) : "f" (__m1), "f" (__m2) ); return ret; } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_unpacklo_pi8 (__m64 __m1, __m64 __m2) { __m64 ret; asm("punpcklbh %0, %1, %2\n\t" : "=f" (ret) : "f" (__m1), "f" (__m2) ); return ret; } /* Since punpcklbh doesn't care about the high 32-bits, we use the __m32 datatype which * allows load8888 to use 32-bit loads */ extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_unpacklo_pi8_f (__m32 __m1, __m64 __m2) { __m64 ret; asm("punpcklbh %0, %1, %2\n\t" : "=f" (ret) : "f" (__m1), "f" (__m2) ); return ret; } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_unpacklo_pi16 (__m64 __m1, __m64 __m2) { __m64 ret; asm("punpcklhw %0, %1, %2\n\t" : "=f" (ret) : "f" (__m1), "f" (__m2) ); return ret; } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_xor_si64 (__m64 __m1, __m64 __m2) { __m64 ret; asm("xor %0, %1, %2\n\t" : "=f" (ret) : "f" (__m1), "f" (__m2) ); return ret; } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) loongson_extract_pi16 (__m64 __m, int64_t __pos) { __m64 ret; asm("pextrh %0, %1, %2\n\t" : "=f" (ret) : "f" (__m), "f" (*(__m64 *)&__pos) ); return ret; } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) loongson_insert_pi16 (__m64 __m1, __m64 __m2, int64_t __pos) { __m64 ret; asm("pinsrh_%3 %0, %1, %2\n\t" : "=f" (ret) : "f" (__m1), "f" (__m2), "i" (__pos) ); return ret; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/make-srgb.pl0000664000175000017500000000416614712446423016626 0ustar00mattst88mattst88#!/usr/bin/perl -w use strict; sub linear_to_srgb { my ($c) = @_; if ($c < 0.0031308) { return $c * 12.92; } else { return 1.055 * $c ** (1.0/2.4) - 0.055; } } sub srgb_to_linear { my ($c) = @_; if ($c < 0.04045) { return $c / 12.92; } else { return (($c + 0.055) / 1.055) ** 2.4 } } my @linear_to_srgb; for my $linear (0 .. 4095) { my $srgb = int(linear_to_srgb($linear / 4095.0) * 255.0 + 0.5); push @linear_to_srgb, $srgb; } my @srgb_to_linear; for my $srgb (0 .. 255) { my $linear = int(srgb_to_linear($srgb / 255.0) * 65535.0 + 0.5); push @srgb_to_linear, $linear; } # Ensure that we have a lossless sRGB and back conversion loop. # some of the darkest shades need a little bias -- maximum is just # 5 increments out of 16. This gives us useful property with # least amount of error in the sRGB-to-linear table, and keeps the actual # table lookup in the other direction as simple as possible. for my $srgb (0 .. $#srgb_to_linear) { my $add = 0; while (1) { my $linear = $srgb_to_linear[$srgb]; my $srgb_lossy = $linear_to_srgb[$linear >> 4]; last if $srgb == $srgb_lossy; # Add slight bias to this component until it rounds correctly $srgb_to_linear[$srgb] ++; $add ++; } die "Too many adds at $srgb" if $add > 5; } print <<"PROLOG"; /* WARNING: This file is generated by $0. * Please edit that file instead of this one. */ #include #ifdef HAVE_CONFIG_H #include #endif #include "pixman-private.h" PROLOG print "const uint8_t linear_to_srgb[" . @linear_to_srgb . "] =\n"; print "{\n"; for my $linear (0 .. $#linear_to_srgb) { if (($linear % 10) == 0) { print "\t"; } print sprintf("%d, ", $linear_to_srgb[$linear]); if (($linear % 10) == 9) { print "\n"; } } print "\n};\n"; print "\n"; print "const uint16_t srgb_to_linear[" . @srgb_to_linear . "] =\n"; print "{\n"; for my $srgb (0 .. $#srgb_to_linear) { if (($srgb % 10) == 0) { print "\t"; } print sprintf("%d, ", $srgb_to_linear[$srgb]); if (($srgb % 10) == 9) { print "\n"; } } print "\n};\n"; ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/meson.build0000664000175000017500000001077614712446423016567 0ustar00mattst88mattst88# Copyright Âİ 2018 Intel Corporation # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. config_h = configure_file( configuration : config, output : 'pixman-config.h' ) version_h = configure_file( configuration : version_conf, input : 'pixman-version.h.in', output : 'pixman-version.h', install_dir : join_paths(get_option('prefix'), get_option('includedir'), 'pixman-1') ) libpixman_extra_cargs = [] default_library = get_option('default_library') if default_library != 'static' and cc.has_function_attribute('dllexport') libpixman_extra_cargs = ['-DPIXMAN_API=__declspec(dllexport)'] endif pixman_simd_libs = [] simds = [ # the mmx library can be compiled with mmx on x86/x86_64 or loongson # mmi on loongson mips systems. The libraries will all have the same # name, "pixman-mmx", but there is no chance of more than one version # being built in the same build because no system could have mmx and # mmi, and it simplifies the build logic to give them the same name. ['mmx', have_mmx, mmx_flags, []], ['mmx', have_loongson_mmi, loongson_mmi_flags, []], ['sse2', have_sse2, sse2_flags, []], ['ssse3', have_ssse3, ssse3_flags, []], ['vmx', have_vmx, vmx_flags, []], ['arm-simd', have_armv6_simd, [], ['pixman-arm-simd-asm.S', 'pixman-arm-simd-asm-scaled.S']], ['arm-neon', have_neon, [], ['pixman-arm-neon-asm.S', 'pixman-arm-neon-asm-bilinear.S']], ['arm-neon', have_a64neon, [], ['pixman-arma64-neon-asm.S', 'pixman-arma64-neon-asm-bilinear.S']], ['mips-dspr2', have_mips_dspr2, mips_dspr2_flags, ['pixman-mips-dspr2-asm.S', 'pixman-mips-memcpy-asm.S']], ['rvv', have_rvv, rvv_flags, []], ] foreach simd : simds if simd[1] name = 'pixman-' + simd[0] pixman_simd_libs += static_library( name, [name + '.c', config_h, version_h, simd[3]], c_args : simd[2] ) endif endforeach pixman_files = files( 'pixman.c', 'pixman-access.c', 'pixman-access-accessors.c', 'pixman-arm.c', 'pixman-bits-image.c', 'pixman-combine32.c', 'pixman-combine-float.c', 'pixman-conical-gradient.c', 'pixman-edge.c', 'pixman-edge-accessors.c', 'pixman-fast-path.c', 'pixman-filter.c', 'pixman-glyph.c', 'pixman-general.c', 'pixman-gradient-walker.c', 'pixman-image.c', 'pixman-implementation.c', 'pixman-linear-gradient.c', 'pixman-matrix.c', 'pixman-mips.c', 'pixman-noop.c', 'pixman-ppc.c', 'pixman-radial-gradient.c', 'pixman-region16.c', 'pixman-region32.c', 'pixman-riscv.c', 'pixman-solid-fill.c', 'pixman-timer.c', 'pixman-trap.c', 'pixman-utils.c', 'pixman-x86.c', ) # Android cpu-features cpu_features_path = get_option('cpu-features-path') cpu_features_sources = [] cpu_features_inc = [] if cpu_features_path != '' message('Using cpu-features.[ch] from ' + cpu_features_path) cpu_features_sources = files( cpu_features_path / 'cpu-features.h', cpu_features_path / 'cpu-features.c', ) cpu_features_inc = include_directories(cpu_features_path) endif libpixman = library( 'pixman-1', [pixman_files, config_h, version_h, cpu_features_sources], link_with: pixman_simd_libs, c_args : libpixman_extra_cargs, dependencies : [dep_m, dep_threads], include_directories : cpu_features_inc, version : meson.project_version(), install : true, ) inc_pixman = include_directories('.') idep_pixman = declare_dependency( link_with: libpixman, include_directories : inc_pixman, ) if meson.version().version_compare('>= 0.54.0') meson.override_dependency('pixman-1', idep_pixman) endif install_headers('pixman.h', subdir : 'pixman-1') ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-access-accessors.c0000664000175000017500000000007014712446423021271 0ustar00mattst88mattst88#define PIXMAN_FB_ACCESSORS #include "pixman-access.c" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-access.c0000664000175000017500000013512014712446423017313 0ustar00mattst88mattst88/* * * Copyright Âİ 2000 Keith Packard, member of The XFree86 Project, Inc. * 2005 Lars Knoll & Zack Rusin, Trolltech * 2008 Aaron Plattner, NVIDIA Corporation * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of Keith Packard not be used in * advertising or publicity pertaining to distribution of the software without * specific, written prior permission. Keith Packard makes no * representations about the suitability of this software for any purpose. It * is provided "as is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS * SOFTWARE. */ #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include "pixman-accessor.h" #include "pixman-private.h" #define CONVERT_RGB24_TO_Y15(s) \ (((((s) >> 16) & 0xff) * 153 + \ (((s) >> 8) & 0xff) * 301 + \ (((s) ) & 0xff) * 58) >> 2) #define CONVERT_RGB24_TO_RGB15(s) \ ((((s) >> 3) & 0x001f) | \ (((s) >> 6) & 0x03e0) | \ (((s) >> 9) & 0x7c00)) /* Fetch macros */ #ifdef WORDS_BIGENDIAN #define FETCH_1(img,l,o) \ (((READ ((img), ((uint32_t *)(l)) + ((o) >> 5))) >> (0x1f - ((o) & 0x1f))) & 0x1) #else #define FETCH_1(img,l,o) \ ((((READ ((img), ((uint32_t *)(l)) + ((o) >> 5))) >> ((o) & 0x1f))) & 0x1) #endif #define FETCH_8(img,l,o) (READ (img, (((uint8_t *)(l)) + ((o) >> 3)))) #ifdef WORDS_BIGENDIAN #define FETCH_4(img,l,o) \ (((4 * (o)) & 4) ? (FETCH_8 (img,l, 4 * (o)) & 0xf) : (FETCH_8 (img,l,(4 * (o))) >> 4)) #else #define FETCH_4(img,l,o) \ (((4 * (o)) & 4) ? (FETCH_8 (img, l, 4 * (o)) >> 4) : (FETCH_8 (img, l, (4 * (o))) & 0xf)) #endif #ifdef WORDS_BIGENDIAN #define FETCH_24(img,l,o) \ ((uint32_t)(READ (img, (((uint8_t *)(l)) + ((o) * 3) + 0)) << 16) | \ (uint32_t)(READ (img, (((uint8_t *)(l)) + ((o) * 3) + 1)) << 8) | \ (uint32_t)(READ (img, (((uint8_t *)(l)) + ((o) * 3) + 2)) << 0)) #else #define FETCH_24(img,l,o) \ ((uint32_t)(READ (img, (((uint8_t *)(l)) + ((o) * 3) + 0)) << 0) | \ (uint32_t)(READ (img, (((uint8_t *)(l)) + ((o) * 3) + 1)) << 8) | \ (uint32_t)(READ (img, (((uint8_t *)(l)) + ((o) * 3) + 2)) << 16)) #endif /* Store macros */ #ifdef WORDS_BIGENDIAN #define STORE_1(img,l,o,v) \ do \ { \ uint32_t *__d = ((uint32_t *)(l)) + ((o) >> 5); \ uint32_t __m, __v; \ \ __m = 1U << (0x1f - ((o) & 0x1f)); \ __v = (v)? __m : 0; \ \ WRITE((img), __d, (READ((img), __d) & ~__m) | __v); \ } \ while (0) #else #define STORE_1(img,l,o,v) \ do \ { \ uint32_t *__d = ((uint32_t *)(l)) + ((o) >> 5); \ uint32_t __m, __v; \ \ __m = 1U << ((o) & 0x1f); \ __v = (v)? __m : 0; \ \ WRITE((img), __d, (READ((img), __d) & ~__m) | __v); \ } \ while (0) #endif #define STORE_8(img,l,o,v) (WRITE (img, (uint8_t *)(l) + ((o) >> 3), (v))) #ifdef WORDS_BIGENDIAN #define STORE_4(img,l,o,v) \ do \ { \ int bo = 4 * (o); \ int v4 = (v) & 0x0f; \ \ STORE_8 (img, l, bo, ( \ bo & 4 ? \ (FETCH_8 (img, l, bo) & 0xf0) | (v4) : \ (FETCH_8 (img, l, bo) & 0x0f) | (v4 << 4))); \ } while (0) #else #define STORE_4(img,l,o,v) \ do \ { \ int bo = 4 * (o); \ int v4 = (v) & 0x0f; \ \ STORE_8 (img, l, bo, ( \ bo & 4 ? \ (FETCH_8 (img, l, bo) & 0x0f) | (v4 << 4) : \ (FETCH_8 (img, l, bo) & 0xf0) | (v4))); \ } while (0) #endif #ifdef WORDS_BIGENDIAN #define STORE_24(img,l,o,v) \ do \ { \ uint8_t *__tmp = (l) + 3 * (o); \ \ WRITE ((img), __tmp++, ((v) & 0x00ff0000) >> 16); \ WRITE ((img), __tmp++, ((v) & 0x0000ff00) >> 8); \ WRITE ((img), __tmp++, ((v) & 0x000000ff) >> 0); \ } \ while (0) #else #define STORE_24(img,l,o,v) \ do \ { \ uint8_t *__tmp = (l) + 3 * (o); \ \ WRITE ((img), __tmp++, ((v) & 0x000000ff) >> 0); \ WRITE ((img), __tmp++, ((v) & 0x0000ff00) >> 8); \ WRITE ((img), __tmp++, ((v) & 0x00ff0000) >> 16); \ } \ while (0) #endif /* * YV12 setup and access macros */ #define YV12_SETUP(image) \ bits_image_t *__bits_image = (bits_image_t *)image; \ uint32_t *bits = __bits_image->bits; \ int stride = __bits_image->rowstride; \ int offset0 = stride < 0 ? \ ((-stride) >> 1) * ((__bits_image->height - 1) >> 1) - stride : \ stride * __bits_image->height; \ int offset1 = stride < 0 ? \ offset0 + ((-stride) >> 1) * ((__bits_image->height) >> 1) : \ offset0 + (offset0 >> 2) /* Note no trailing semicolon on the above macro; if it's there, then * the typical usage of YV12_SETUP(image); will have an extra trailing ; * that some compilers will interpret as a statement -- and then any further * variable declarations will cause an error. */ #define YV12_Y(line) \ ((uint8_t *) ((bits) + (stride) * (line))) #define YV12_U(line) \ ((uint8_t *) ((bits) + offset1 + \ ((stride) >> 1) * ((line) >> 1))) #define YV12_V(line) \ ((uint8_t *) ((bits) + offset0 + \ ((stride) >> 1) * ((line) >> 1))) /* Misc. helpers */ static force_inline void get_shifts (pixman_format_code_t format, int *a, int *r, int *g, int *b) { switch (PIXMAN_FORMAT_TYPE (format)) { case PIXMAN_TYPE_A: *b = 0; *g = 0; *r = 0; *a = 0; break; case PIXMAN_TYPE_ARGB: case PIXMAN_TYPE_ARGB_SRGB: *b = 0; *g = *b + PIXMAN_FORMAT_B (format); *r = *g + PIXMAN_FORMAT_G (format); *a = *r + PIXMAN_FORMAT_R (format); break; case PIXMAN_TYPE_ABGR: *r = 0; *g = *r + PIXMAN_FORMAT_R (format); *b = *g + PIXMAN_FORMAT_G (format); *a = *b + PIXMAN_FORMAT_B (format); break; case PIXMAN_TYPE_BGRA: /* With BGRA formats we start counting at the high end of the pixel */ *b = PIXMAN_FORMAT_BPP (format) - PIXMAN_FORMAT_B (format); *g = *b - PIXMAN_FORMAT_B (format); *r = *g - PIXMAN_FORMAT_G (format); *a = *r - PIXMAN_FORMAT_R (format); break; case PIXMAN_TYPE_RGBA: /* With BGRA formats we start counting at the high end of the pixel */ *r = PIXMAN_FORMAT_BPP (format) - PIXMAN_FORMAT_R (format); *g = *r - PIXMAN_FORMAT_R (format); *b = *g - PIXMAN_FORMAT_G (format); *a = *b - PIXMAN_FORMAT_B (format); break; default: assert (0); break; } } static force_inline uint32_t convert_channel (uint32_t pixel, uint32_t def_value, int n_from_bits, int from_shift, int n_to_bits, int to_shift) { uint32_t v; if (n_from_bits && n_to_bits) v = unorm_to_unorm (pixel >> from_shift, n_from_bits, n_to_bits); else if (n_to_bits) v = def_value; else v = 0; return (v & ((1 << n_to_bits) - 1)) << to_shift; } static force_inline uint32_t convert_pixel (pixman_format_code_t from, pixman_format_code_t to, uint32_t pixel) { int a_from_shift, r_from_shift, g_from_shift, b_from_shift; int a_to_shift, r_to_shift, g_to_shift, b_to_shift; uint32_t a, r, g, b; get_shifts (from, &a_from_shift, &r_from_shift, &g_from_shift, &b_from_shift); get_shifts (to, &a_to_shift, &r_to_shift, &g_to_shift, &b_to_shift); a = convert_channel (pixel, ~0, PIXMAN_FORMAT_A (from), a_from_shift, PIXMAN_FORMAT_A (to), a_to_shift); r = convert_channel (pixel, 0, PIXMAN_FORMAT_R (from), r_from_shift, PIXMAN_FORMAT_R (to), r_to_shift); g = convert_channel (pixel, 0, PIXMAN_FORMAT_G (from), g_from_shift, PIXMAN_FORMAT_G (to), g_to_shift); b = convert_channel (pixel, 0, PIXMAN_FORMAT_B (from), b_from_shift, PIXMAN_FORMAT_B (to), b_to_shift); return a | r | g | b; } static force_inline uint32_t convert_pixel_to_a8r8g8b8 (bits_image_t *image, pixman_format_code_t format, uint32_t pixel) { if (PIXMAN_FORMAT_TYPE (format) == PIXMAN_TYPE_GRAY || PIXMAN_FORMAT_TYPE (format) == PIXMAN_TYPE_COLOR) { return image->indexed->rgba[pixel]; } else { return convert_pixel (format, PIXMAN_a8r8g8b8, pixel); } } static force_inline uint32_t convert_pixel_from_a8r8g8b8 (pixman_image_t *image, pixman_format_code_t format, uint32_t pixel) { if (PIXMAN_FORMAT_TYPE (format) == PIXMAN_TYPE_GRAY) { pixel = CONVERT_RGB24_TO_Y15 (pixel); return image->bits.indexed->ent[pixel & 0x7fff]; } else if (PIXMAN_FORMAT_TYPE (format) == PIXMAN_TYPE_COLOR) { pixel = convert_pixel (PIXMAN_a8r8g8b8, PIXMAN_x1r5g5b5, pixel); return image->bits.indexed->ent[pixel & 0x7fff]; } else { return convert_pixel (PIXMAN_a8r8g8b8, format, pixel); } } static force_inline uint32_t fetch_and_convert_pixel (bits_image_t * image, const uint8_t * bits, int offset, pixman_format_code_t format) { uint32_t pixel; switch (PIXMAN_FORMAT_BPP (format)) { case 1: pixel = FETCH_1 (image, bits, offset); break; case 4: pixel = FETCH_4 (image, bits, offset); break; case 8: pixel = READ (image, bits + offset); break; case 16: pixel = READ (image, ((uint16_t *)bits + offset)); break; case 24: pixel = FETCH_24 (image, bits, offset); break; case 32: pixel = READ (image, ((uint32_t *)bits + offset)); break; default: pixel = 0xffff00ff; /* As ugly as possible to detect the bug */ break; } return convert_pixel_to_a8r8g8b8 (image, format, pixel); } static force_inline void convert_and_store_pixel (bits_image_t * image, uint8_t * dest, int offset, pixman_format_code_t format, uint32_t pixel) { uint32_t converted = convert_pixel_from_a8r8g8b8 ( (pixman_image_t *)image, format, pixel); switch (PIXMAN_FORMAT_BPP (format)) { case 1: STORE_1 (image, dest, offset, converted & 0x01); break; case 4: STORE_4 (image, dest, offset, converted & 0xf); break; case 8: WRITE (image, (dest + offset), converted & 0xff); break; case 16: WRITE (image, ((uint16_t *)dest + offset), converted & 0xffff); break; case 24: STORE_24 (image, dest, offset, converted); break; case 32: WRITE (image, ((uint32_t *)dest + offset), converted); break; default: *dest = 0x0; break; } } #define MAKE_ACCESSORS(format) \ static void \ fetch_scanline_ ## format (bits_image_t *image, \ int x, \ int y, \ int width, \ uint32_t * buffer, \ const uint32_t *mask) \ { \ uint8_t *bits = \ (uint8_t *)(image->bits + y * image->rowstride); \ int i; \ \ for (i = 0; i < width; ++i) \ { \ *buffer++ = \ fetch_and_convert_pixel (image, bits, x + i, PIXMAN_ ## format); \ } \ } \ \ static void \ store_scanline_ ## format (bits_image_t * image, \ int x, \ int y, \ int width, \ const uint32_t *values) \ { \ uint8_t *dest = \ (uint8_t *)(image->bits + y * image->rowstride); \ int i; \ \ for (i = 0; i < width; ++i) \ { \ convert_and_store_pixel ( \ image, dest, i + x, PIXMAN_ ## format, values[i]); \ } \ } \ \ static uint32_t \ fetch_pixel_ ## format (bits_image_t *image, \ int offset, \ int line) \ { \ uint8_t *bits = \ (uint8_t *)(image->bits + line * image->rowstride); \ \ return fetch_and_convert_pixel ( \ image, bits, offset, PIXMAN_ ## format); \ } \ \ static const void *const __dummy__ ## format MAYBE_UNUSED MAKE_ACCESSORS(a8r8g8b8); MAKE_ACCESSORS(x8r8g8b8); MAKE_ACCESSORS(a8b8g8r8); MAKE_ACCESSORS(x8b8g8r8); MAKE_ACCESSORS(x14r6g6b6); MAKE_ACCESSORS(b8g8r8a8); MAKE_ACCESSORS(b8g8r8x8); MAKE_ACCESSORS(r8g8b8x8); MAKE_ACCESSORS(r8g8b8a8); MAKE_ACCESSORS(r8g8b8); MAKE_ACCESSORS(b8g8r8); MAKE_ACCESSORS(r5g6b5); MAKE_ACCESSORS(b5g6r5); MAKE_ACCESSORS(a1r5g5b5); MAKE_ACCESSORS(x1r5g5b5); MAKE_ACCESSORS(a1b5g5r5); MAKE_ACCESSORS(x1b5g5r5); MAKE_ACCESSORS(a4r4g4b4); MAKE_ACCESSORS(x4r4g4b4); MAKE_ACCESSORS(a4b4g4r4); MAKE_ACCESSORS(x4b4g4r4); MAKE_ACCESSORS(a8); MAKE_ACCESSORS(c8); MAKE_ACCESSORS(g8); MAKE_ACCESSORS(r3g3b2); MAKE_ACCESSORS(b2g3r3); MAKE_ACCESSORS(a2r2g2b2); MAKE_ACCESSORS(a2b2g2r2); MAKE_ACCESSORS(x4a4); MAKE_ACCESSORS(a4); MAKE_ACCESSORS(g4); MAKE_ACCESSORS(c4); MAKE_ACCESSORS(r1g2b1); MAKE_ACCESSORS(b1g2r1); MAKE_ACCESSORS(a1r1g1b1); MAKE_ACCESSORS(a1b1g1r1); MAKE_ACCESSORS(a1); MAKE_ACCESSORS(g1); /********************************** Fetch ************************************/ /* Table mapping sRGB-encoded 8 bit numbers to linearly encoded * floating point numbers. We assume that single precision * floating point follows the IEEE 754 format. */ static const uint32_t to_linear_u[256] = { 0x00000000, 0x399f22b4, 0x3a1f22b4, 0x3a6eb40e, 0x3a9f22b4, 0x3ac6eb61, 0x3aeeb40e, 0x3b0b3e5d, 0x3b1f22b4, 0x3b33070b, 0x3b46eb61, 0x3b5b518a, 0x3b70f18a, 0x3b83e1c5, 0x3b8fe614, 0x3b9c87fb, 0x3ba9c9b5, 0x3bb7ad6d, 0x3bc63547, 0x3bd5635f, 0x3be539bd, 0x3bf5ba70, 0x3c0373b5, 0x3c0c6152, 0x3c15a703, 0x3c1f45bc, 0x3c293e68, 0x3c3391f4, 0x3c3e4149, 0x3c494d43, 0x3c54b6c7, 0x3c607eb1, 0x3c6ca5df, 0x3c792d22, 0x3c830aa8, 0x3c89af9e, 0x3c9085db, 0x3c978dc5, 0x3c9ec7c0, 0x3ca63432, 0x3cadd37d, 0x3cb5a601, 0x3cbdac20, 0x3cc5e639, 0x3cce54ab, 0x3cd6f7d2, 0x3cdfd00e, 0x3ce8ddb9, 0x3cf2212c, 0x3cfb9ac1, 0x3d02a569, 0x3d0798dc, 0x3d0ca7e4, 0x3d11d2ae, 0x3d171963, 0x3d1c7c2e, 0x3d21fb3a, 0x3d2796af, 0x3d2d4ebb, 0x3d332380, 0x3d39152b, 0x3d3f23e3, 0x3d454fd0, 0x3d4b991c, 0x3d51ffeb, 0x3d588466, 0x3d5f26b7, 0x3d65e6fe, 0x3d6cc564, 0x3d73c210, 0x3d7add25, 0x3d810b65, 0x3d84b793, 0x3d88732e, 0x3d8c3e48, 0x3d9018f4, 0x3d940343, 0x3d97fd48, 0x3d9c0714, 0x3da020b9, 0x3da44a48, 0x3da883d6, 0x3daccd70, 0x3db12728, 0x3db59110, 0x3dba0b38, 0x3dbe95b2, 0x3dc3308f, 0x3dc7dbe0, 0x3dcc97b4, 0x3dd1641c, 0x3dd6412a, 0x3ddb2eec, 0x3de02d75, 0x3de53cd3, 0x3dea5d16, 0x3def8e52, 0x3df4d091, 0x3dfa23e5, 0x3dff885e, 0x3e027f06, 0x3e05427f, 0x3e080ea2, 0x3e0ae376, 0x3e0dc104, 0x3e10a752, 0x3e139669, 0x3e168e50, 0x3e198f0e, 0x3e1c98ab, 0x3e1fab2e, 0x3e22c6a0, 0x3e25eb08, 0x3e29186a, 0x3e2c4ed0, 0x3e2f8e42, 0x3e32d6c4, 0x3e362861, 0x3e39831e, 0x3e3ce702, 0x3e405416, 0x3e43ca5e, 0x3e4749e4, 0x3e4ad2ae, 0x3e4e64c2, 0x3e520027, 0x3e55a4e6, 0x3e595303, 0x3e5d0a8a, 0x3e60cb7c, 0x3e6495e0, 0x3e6869bf, 0x3e6c4720, 0x3e702e08, 0x3e741e7f, 0x3e78188c, 0x3e7c1c34, 0x3e8014c0, 0x3e822039, 0x3e84308b, 0x3e8645b8, 0x3e885fc3, 0x3e8a7eb0, 0x3e8ca281, 0x3e8ecb3a, 0x3e90f8df, 0x3e932b72, 0x3e9562f6, 0x3e979f6f, 0x3e99e0e0, 0x3e9c274e, 0x3e9e72b8, 0x3ea0c322, 0x3ea31892, 0x3ea57308, 0x3ea7d28a, 0x3eaa3718, 0x3eaca0b7, 0x3eaf0f69, 0x3eb18332, 0x3eb3fc16, 0x3eb67a15, 0x3eb8fd34, 0x3ebb8576, 0x3ebe12de, 0x3ec0a56e, 0x3ec33d2a, 0x3ec5da14, 0x3ec87c30, 0x3ecb2380, 0x3ecdd008, 0x3ed081ca, 0x3ed338c9, 0x3ed5f508, 0x3ed8b68a, 0x3edb7d52, 0x3ede4962, 0x3ee11abe, 0x3ee3f168, 0x3ee6cd64, 0x3ee9aeb6, 0x3eec955d, 0x3eef815d, 0x3ef272ba, 0x3ef56976, 0x3ef86594, 0x3efb6717, 0x3efe6e02, 0x3f00bd2b, 0x3f02460c, 0x3f03d1a5, 0x3f055ff8, 0x3f06f105, 0x3f0884ce, 0x3f0a1b54, 0x3f0bb499, 0x3f0d509f, 0x3f0eef65, 0x3f1090ef, 0x3f12353c, 0x3f13dc50, 0x3f15862a, 0x3f1732cc, 0x3f18e237, 0x3f1a946d, 0x3f1c4970, 0x3f1e013f, 0x3f1fbbde, 0x3f21794c, 0x3f23398c, 0x3f24fca0, 0x3f26c286, 0x3f288b42, 0x3f2a56d3, 0x3f2c253d, 0x3f2df680, 0x3f2fca9d, 0x3f31a195, 0x3f337b6a, 0x3f35581e, 0x3f3737b1, 0x3f391a24, 0x3f3aff7a, 0x3f3ce7b2, 0x3f3ed2d0, 0x3f40c0d2, 0x3f42b1bc, 0x3f44a58e, 0x3f469c49, 0x3f4895ee, 0x3f4a9280, 0x3f4c91ff, 0x3f4e946c, 0x3f5099c8, 0x3f52a216, 0x3f54ad55, 0x3f56bb88, 0x3f58ccae, 0x3f5ae0cb, 0x3f5cf7de, 0x3f5f11ec, 0x3f612ef0, 0x3f634eef, 0x3f6571ea, 0x3f6797e1, 0x3f69c0d6, 0x3f6beccb, 0x3f6e1bc0, 0x3f704db6, 0x3f7282af, 0x3f74baac, 0x3f76f5ae, 0x3f7933b6, 0x3f7b74c6, 0x3f7db8de, 0x3f800000 }; static const float * const to_linear = (const float *)to_linear_u; static uint8_t to_srgb (float f) { uint8_t low = 0; uint8_t high = 255; while (high - low > 1) { uint8_t mid = (low + high) / 2; if (to_linear[mid] > f) high = mid; else low = mid; } if (to_linear[high] - f < f - to_linear[low]) return high; else return low; } static void fetch_scanline_a8r8g8b8_sRGB_float (bits_image_t * image, int x, int y, int width, uint32_t * b, const uint32_t *mask) { const uint32_t *bits = image->bits + y * image->rowstride; const uint32_t *pixel = bits + x; const uint32_t *end = pixel + width; argb_t *buffer = (argb_t *)b; while (pixel < end) { uint32_t p = READ (image, pixel++); argb_t *argb = buffer; argb->a = pixman_unorm_to_float ((p >> 24) & 0xff, 8); argb->r = to_linear [(p >> 16) & 0xff]; argb->g = to_linear [(p >> 8) & 0xff]; argb->b = to_linear [(p >> 0) & 0xff]; buffer++; } } static void fetch_scanline_r8g8b8_sRGB_float (bits_image_t * image, int x, int y, int width, uint32_t * b, const uint32_t *mask) { const uint8_t *bits = (uint8_t *)(image->bits + y * image->rowstride); argb_t *buffer = (argb_t *)b; int i; for (i = x; i < width; ++i) { uint32_t p = FETCH_24 (image, bits, i); argb_t *argb = buffer; argb->a = 1.0f; argb->r = to_linear[(p >> 16) & 0xff]; argb->g = to_linear[(p >> 8) & 0xff]; argb->b = to_linear[(p >> 0) & 0xff]; buffer++; } } /* Expects a float buffer */ static void fetch_scanline_a2r10g10b10_float (bits_image_t * image, int x, int y, int width, uint32_t * b, const uint32_t *mask) { const uint32_t *bits = image->bits + y * image->rowstride; const uint32_t *pixel = bits + x; const uint32_t *end = pixel + width; argb_t *buffer = (argb_t *)b; while (pixel < end) { uint32_t p = READ (image, pixel++); uint64_t a = p >> 30; uint64_t r = (p >> 20) & 0x3ff; uint64_t g = (p >> 10) & 0x3ff; uint64_t b = p & 0x3ff; buffer->a = pixman_unorm_to_float (a, 2); buffer->r = pixman_unorm_to_float (r, 10); buffer->g = pixman_unorm_to_float (g, 10); buffer->b = pixman_unorm_to_float (b, 10); buffer++; } } /* Expects a float buffer */ #ifndef PIXMAN_FB_ACCESSORS static void fetch_scanline_rgbf_float (bits_image_t *image, int x, int y, int width, uint32_t * b, const uint32_t *mask) { const float *bits = (float *)image->bits + y * image->rowstride; const float *pixel = bits + x * 3; argb_t *buffer = (argb_t *)b; for (; width--; buffer++) { buffer->r = *pixel++; buffer->g = *pixel++; buffer->b = *pixel++; buffer->a = 1.f; } } static void fetch_scanline_rgbaf_float (bits_image_t *image, int x, int y, int width, uint32_t * b, const uint32_t *mask) { const float *bits = (float *)image->bits + y * image->rowstride; const float *pixel = bits + x * 4; argb_t *buffer = (argb_t *)b; for (; width--; buffer++) { buffer->r = *pixel++; buffer->g = *pixel++; buffer->b = *pixel++; buffer->a = *pixel++; } } #endif static void fetch_scanline_x2r10g10b10_float (bits_image_t *image, int x, int y, int width, uint32_t * b, const uint32_t *mask) { const uint32_t *bits = image->bits + y * image->rowstride; const uint32_t *pixel = (uint32_t *)bits + x; const uint32_t *end = pixel + width; argb_t *buffer = (argb_t *)b; while (pixel < end) { uint32_t p = READ (image, pixel++); uint64_t r = (p >> 20) & 0x3ff; uint64_t g = (p >> 10) & 0x3ff; uint64_t b = p & 0x3ff; buffer->a = 1.0; buffer->r = pixman_unorm_to_float (r, 10); buffer->g = pixman_unorm_to_float (g, 10); buffer->b = pixman_unorm_to_float (b, 10); buffer++; } } /* Expects a float buffer */ static void fetch_scanline_a2b10g10r10_float (bits_image_t *image, int x, int y, int width, uint32_t * b, const uint32_t *mask) { const uint32_t *bits = image->bits + y * image->rowstride; const uint32_t *pixel = bits + x; const uint32_t *end = pixel + width; argb_t *buffer = (argb_t *)b; while (pixel < end) { uint32_t p = READ (image, pixel++); uint64_t a = p >> 30; uint64_t b = (p >> 20) & 0x3ff; uint64_t g = (p >> 10) & 0x3ff; uint64_t r = p & 0x3ff; buffer->a = pixman_unorm_to_float (a, 2); buffer->r = pixman_unorm_to_float (r, 10); buffer->g = pixman_unorm_to_float (g, 10); buffer->b = pixman_unorm_to_float (b, 10); buffer++; } } /* Expects a float buffer */ static void fetch_scanline_x2b10g10r10_float (bits_image_t *image, int x, int y, int width, uint32_t * b, const uint32_t *mask) { const uint32_t *bits = image->bits + y * image->rowstride; const uint32_t *pixel = (uint32_t *)bits + x; const uint32_t *end = pixel + width; argb_t *buffer = (argb_t *)b; while (pixel < end) { uint32_t p = READ (image, pixel++); uint64_t b = (p >> 20) & 0x3ff; uint64_t g = (p >> 10) & 0x3ff; uint64_t r = p & 0x3ff; buffer->a = 1.0; buffer->r = pixman_unorm_to_float (r, 10); buffer->g = pixman_unorm_to_float (g, 10); buffer->b = pixman_unorm_to_float (b, 10); buffer++; } } static void fetch_scanline_yuy2 (bits_image_t *image, int x, int line, int width, uint32_t * buffer, const uint32_t *mask) { const uint32_t *bits = image->bits + image->rowstride * line; int i; for (i = 0; i < width; i++) { int16_t y, u, v; int32_t r, g, b; y = ((uint8_t *) bits)[(x + i) << 1] - 16; u = ((uint8_t *) bits)[(((x + i) << 1) & - 4) + 1] - 128; v = ((uint8_t *) bits)[(((x + i) << 1) & - 4) + 3] - 128; /* R = 1.164(Y - 16) + 1.596(V - 128) */ r = 0x012b27 * y + 0x019a2e * v; /* G = 1.164(Y - 16) - 0.813(V - 128) - 0.391(U - 128) */ g = 0x012b27 * y - 0x00d0f2 * v - 0x00647e * u; /* B = 1.164(Y - 16) + 2.018(U - 128) */ b = 0x012b27 * y + 0x0206a2 * u; *buffer++ = 0xff000000 | (r >= 0 ? r < 0x1000000 ? r & 0xff0000 : 0xff0000 : 0) | (g >= 0 ? g < 0x1000000 ? (g >> 8) & 0x00ff00 : 0x00ff00 : 0) | (b >= 0 ? b < 0x1000000 ? (b >> 16) & 0x0000ff : 0x0000ff : 0); } } static void fetch_scanline_yv12 (bits_image_t *image, int x, int line, int width, uint32_t * buffer, const uint32_t *mask) { YV12_SETUP (image); uint8_t *y_line = YV12_Y (line); uint8_t *u_line = YV12_U (line); uint8_t *v_line = YV12_V (line); int i; for (i = 0; i < width; i++) { int16_t y, u, v; int32_t r, g, b; y = y_line[x + i] - 16; u = u_line[(x + i) >> 1] - 128; v = v_line[(x + i) >> 1] - 128; /* R = 1.164(Y - 16) + 1.596(V - 128) */ r = 0x012b27 * y + 0x019a2e * v; /* G = 1.164(Y - 16) - 0.813(V - 128) - 0.391(U - 128) */ g = 0x012b27 * y - 0x00d0f2 * v - 0x00647e * u; /* B = 1.164(Y - 16) + 2.018(U - 128) */ b = 0x012b27 * y + 0x0206a2 * u; *buffer++ = 0xff000000 | (r >= 0 ? r < 0x1000000 ? r & 0xff0000 : 0xff0000 : 0) | (g >= 0 ? g < 0x1000000 ? (g >> 8) & 0x00ff00 : 0x00ff00 : 0) | (b >= 0 ? b < 0x1000000 ? (b >> 16) & 0x0000ff : 0x0000ff : 0); } } /**************************** Pixel wise fetching *****************************/ #ifndef PIXMAN_FB_ACCESSORS static argb_t fetch_pixel_rgbf_float (bits_image_t *image, int offset, int line) { float *bits = (float *)image->bits + line * image->rowstride; argb_t argb; argb.r = bits[offset * 3]; argb.g = bits[offset * 3 + 1]; argb.b = bits[offset * 3 + 2]; argb.a = 1.f; return argb; } static argb_t fetch_pixel_rgbaf_float (bits_image_t *image, int offset, int line) { float *bits = (float *)image->bits + line * image->rowstride; argb_t argb; argb.r = bits[offset * 4]; argb.g = bits[offset * 4 + 1]; argb.b = bits[offset * 4 + 2]; argb.a = bits[offset * 4 + 3]; return argb; } #endif static argb_t fetch_pixel_x2r10g10b10_float (bits_image_t *image, int offset, int line) { uint32_t *bits = image->bits + line * image->rowstride; uint32_t p = READ (image, bits + offset); uint64_t r = (p >> 20) & 0x3ff; uint64_t g = (p >> 10) & 0x3ff; uint64_t b = p & 0x3ff; argb_t argb; argb.a = 1.0; argb.r = pixman_unorm_to_float (r, 10); argb.g = pixman_unorm_to_float (g, 10); argb.b = pixman_unorm_to_float (b, 10); return argb; } static argb_t fetch_pixel_a2r10g10b10_float (bits_image_t *image, int offset, int line) { uint32_t *bits = image->bits + line * image->rowstride; uint32_t p = READ (image, bits + offset); uint64_t a = p >> 30; uint64_t r = (p >> 20) & 0x3ff; uint64_t g = (p >> 10) & 0x3ff; uint64_t b = p & 0x3ff; argb_t argb; argb.a = pixman_unorm_to_float (a, 2); argb.r = pixman_unorm_to_float (r, 10); argb.g = pixman_unorm_to_float (g, 10); argb.b = pixman_unorm_to_float (b, 10); return argb; } static argb_t fetch_pixel_a2b10g10r10_float (bits_image_t *image, int offset, int line) { uint32_t *bits = image->bits + line * image->rowstride; uint32_t p = READ (image, bits + offset); uint64_t a = p >> 30; uint64_t b = (p >> 20) & 0x3ff; uint64_t g = (p >> 10) & 0x3ff; uint64_t r = p & 0x3ff; argb_t argb; argb.a = pixman_unorm_to_float (a, 2); argb.r = pixman_unorm_to_float (r, 10); argb.g = pixman_unorm_to_float (g, 10); argb.b = pixman_unorm_to_float (b, 10); return argb; } static argb_t fetch_pixel_x2b10g10r10_float (bits_image_t *image, int offset, int line) { uint32_t *bits = image->bits + line * image->rowstride; uint32_t p = READ (image, bits + offset); uint64_t b = (p >> 20) & 0x3ff; uint64_t g = (p >> 10) & 0x3ff; uint64_t r = p & 0x3ff; argb_t argb; argb.a = 1.0; argb.r = pixman_unorm_to_float (r, 10); argb.g = pixman_unorm_to_float (g, 10); argb.b = pixman_unorm_to_float (b, 10); return argb; } static argb_t fetch_pixel_a8r8g8b8_sRGB_float (bits_image_t *image, int offset, int line) { uint32_t *bits = image->bits + line * image->rowstride; uint32_t p = READ (image, bits + offset); argb_t argb; argb.a = pixman_unorm_to_float ((p >> 24) & 0xff, 8); argb.r = to_linear [(p >> 16) & 0xff]; argb.g = to_linear [(p >> 8) & 0xff]; argb.b = to_linear [(p >> 0) & 0xff]; return argb; } static argb_t fetch_pixel_r8g8b8_sRGB_float (bits_image_t *image, int offset, int line) { uint8_t *bits = (uint8_t *)(image->bits + line * image->rowstride); uint32_t p = FETCH_24 (image, bits, offset); argb_t argb; argb.a = 1.0f; argb.r = to_linear[(p >> 16) & 0xff]; argb.g = to_linear[(p >> 8) & 0xff]; argb.b = to_linear[(p >> 0) & 0xff]; return argb; } static uint32_t fetch_pixel_yuy2 (bits_image_t *image, int offset, int line) { const uint32_t *bits = image->bits + image->rowstride * line; int16_t y, u, v; int32_t r, g, b; y = ((uint8_t *) bits)[offset << 1] - 16; u = ((uint8_t *) bits)[((offset << 1) & - 4) + 1] - 128; v = ((uint8_t *) bits)[((offset << 1) & - 4) + 3] - 128; /* R = 1.164(Y - 16) + 1.596(V - 128) */ r = 0x012b27 * y + 0x019a2e * v; /* G = 1.164(Y - 16) - 0.813(V - 128) - 0.391(U - 128) */ g = 0x012b27 * y - 0x00d0f2 * v - 0x00647e * u; /* B = 1.164(Y - 16) + 2.018(U - 128) */ b = 0x012b27 * y + 0x0206a2 * u; return 0xff000000 | (r >= 0 ? r < 0x1000000 ? r & 0xff0000 : 0xff0000 : 0) | (g >= 0 ? g < 0x1000000 ? (g >> 8) & 0x00ff00 : 0x00ff00 : 0) | (b >= 0 ? b < 0x1000000 ? (b >> 16) & 0x0000ff : 0x0000ff : 0); } static uint32_t fetch_pixel_yv12 (bits_image_t *image, int offset, int line) { YV12_SETUP (image); int16_t y = YV12_Y (line)[offset] - 16; int16_t u = YV12_U (line)[offset >> 1] - 128; int16_t v = YV12_V (line)[offset >> 1] - 128; int32_t r, g, b; /* R = 1.164(Y - 16) + 1.596(V - 128) */ r = 0x012b27 * y + 0x019a2e * v; /* G = 1.164(Y - 16) - 0.813(V - 128) - 0.391(U - 128) */ g = 0x012b27 * y - 0x00d0f2 * v - 0x00647e * u; /* B = 1.164(Y - 16) + 2.018(U - 128) */ b = 0x012b27 * y + 0x0206a2 * u; return 0xff000000 | (r >= 0 ? r < 0x1000000 ? r & 0xff0000 : 0xff0000 : 0) | (g >= 0 ? g < 0x1000000 ? (g >> 8) & 0x00ff00 : 0x00ff00 : 0) | (b >= 0 ? b < 0x1000000 ? (b >> 16) & 0x0000ff : 0x0000ff : 0); } /*********************************** Store ************************************/ #ifndef PIXMAN_FB_ACCESSORS static void store_scanline_rgbaf_float (bits_image_t * image, int x, int y, int width, const uint32_t *v) { float *bits = (float *)image->bits + image->rowstride * y + 4 * x; const argb_t *values = (argb_t *)v; for (; width; width--, values++) { *bits++ = values->r; *bits++ = values->g; *bits++ = values->b; *bits++ = values->a; } } static void store_scanline_rgbf_float (bits_image_t * image, int x, int y, int width, const uint32_t *v) { float *bits = (float *)image->bits + image->rowstride * y + 3 * x; const argb_t *values = (argb_t *)v; for (; width; width--, values++) { *bits++ = values->r; *bits++ = values->g; *bits++ = values->b; } } #endif static void store_scanline_a2r10g10b10_float (bits_image_t * image, int x, int y, int width, const uint32_t *v) { uint32_t *bits = image->bits + image->rowstride * y; uint32_t *pixel = bits + x; argb_t *values = (argb_t *)v; int i; for (i = 0; i < width; ++i) { uint32_t a, r, g, b; a = pixman_float_to_unorm (values[i].a, 2); r = pixman_float_to_unorm (values[i].r, 10); g = pixman_float_to_unorm (values[i].g, 10); b = pixman_float_to_unorm (values[i].b, 10); WRITE (image, pixel++, (a << 30) | (r << 20) | (g << 10) | b); } } static void store_scanline_x2r10g10b10_float (bits_image_t * image, int x, int y, int width, const uint32_t *v) { uint32_t *bits = image->bits + image->rowstride * y; uint32_t *pixel = bits + x; argb_t *values = (argb_t *)v; int i; for (i = 0; i < width; ++i) { uint32_t r, g, b; r = pixman_float_to_unorm (values[i].r, 10); g = pixman_float_to_unorm (values[i].g, 10); b = pixman_float_to_unorm (values[i].b, 10); WRITE (image, pixel++, (r << 20) | (g << 10) | b); } } static void store_scanline_a2b10g10r10_float (bits_image_t * image, int x, int y, int width, const uint32_t *v) { uint32_t *bits = image->bits + image->rowstride * y; uint32_t *pixel = bits + x; argb_t *values = (argb_t *)v; int i; for (i = 0; i < width; ++i) { uint32_t a, r, g, b; a = pixman_float_to_unorm (values[i].a, 2); r = pixman_float_to_unorm (values[i].r, 10); g = pixman_float_to_unorm (values[i].g, 10); b = pixman_float_to_unorm (values[i].b, 10); WRITE (image, pixel++, (a << 30) | (b << 20) | (g << 10) | r); } } static void store_scanline_x2b10g10r10_float (bits_image_t * image, int x, int y, int width, const uint32_t *v) { uint32_t *bits = image->bits + image->rowstride * y; uint32_t *pixel = bits + x; argb_t *values = (argb_t *)v; int i; for (i = 0; i < width; ++i) { uint32_t r, g, b; r = pixman_float_to_unorm (values[i].r, 10); g = pixman_float_to_unorm (values[i].g, 10); b = pixman_float_to_unorm (values[i].b, 10); WRITE (image, pixel++, (b << 20) | (g << 10) | r); } } static void store_scanline_a8r8g8b8_sRGB_float (bits_image_t * image, int x, int y, int width, const uint32_t *v) { uint32_t *bits = image->bits + image->rowstride * y; uint32_t *pixel = bits + x; argb_t *values = (argb_t *)v; int i; for (i = 0; i < width; ++i) { uint32_t a, r, g, b; a = pixman_float_to_unorm (values[i].a, 8); r = to_srgb (values[i].r); g = to_srgb (values[i].g); b = to_srgb (values[i].b); WRITE (image, pixel++, (a << 24) | (r << 16) | (g << 8) | b); } } static void store_scanline_r8g8b8_sRGB_float (bits_image_t * image, int x, int y, int width, const uint32_t *v) { uint8_t *bits = (uint8_t *)(image->bits + image->rowstride * y) + 3 * x; argb_t *values = (argb_t *)v; int i; for (i = 0; i < width; ++i) { uint32_t r, g, b, rgb; r = to_srgb (values[i].r); g = to_srgb (values[i].g); b = to_srgb (values[i].b); rgb = (r << 16) | (g << 8) | b; STORE_24 (image, bits, i, rgb); } } /* * Contracts a floating point image to 32bpp and then stores it using a * regular 32-bit store proc. Despite the type, this function expects an * argb_t buffer. */ static void store_scanline_generic_float (bits_image_t * image, int x, int y, int width, const uint32_t *values) { uint32_t *argb8_pixels; assert (image->common.type == BITS); argb8_pixels = pixman_malloc_ab (width, sizeof(uint32_t)); if (!argb8_pixels) return; /* Contract the scanline. We could do this in place if values weren't * const. */ pixman_contract_from_float (argb8_pixels, (argb_t *)values, width); image->store_scanline_32 (image, x, y, width, argb8_pixels); free (argb8_pixels); } static void fetch_scanline_generic_float (bits_image_t * image, int x, int y, int width, uint32_t * buffer, const uint32_t *mask) { image->fetch_scanline_32 (image, x, y, width, buffer, NULL); pixman_expand_to_float ((argb_t *)buffer, buffer, image->format, width); } /* The 32_sRGB paths should be deleted after narrow processing * is no longer invoked for formats that are considered wide. * (Also see fetch_pixel_generic_lossy_32) */ static void fetch_scanline_a8r8g8b8_32_sRGB (bits_image_t *image, int x, int y, int width, uint32_t *buffer, const uint32_t *mask) { const uint32_t *bits = image->bits + y * image->rowstride; const uint32_t *pixel = (uint32_t *)bits + x; const uint32_t *end = pixel + width; uint32_t tmp; while (pixel < end) { uint32_t a, r, g, b; tmp = READ (image, pixel++); a = (tmp >> 24) & 0xff; r = (tmp >> 16) & 0xff; g = (tmp >> 8) & 0xff; b = (tmp >> 0) & 0xff; r = to_linear[r] * 255.0f + 0.5f; g = to_linear[g] * 255.0f + 0.5f; b = to_linear[b] * 255.0f + 0.5f; *buffer++ = (a << 24) | (r << 16) | (g << 8) | (b << 0); } } static void fetch_scanline_r8g8b8_32_sRGB (bits_image_t *image, int x, int y, int width, uint32_t *buffer, const uint32_t *mask) { const uint8_t *bits = (uint8_t *)(image->bits + y * image->rowstride) + 3 * x; uint32_t tmp; int i; for (i = 0; i < width; ++i) { uint32_t a, r, g, b; tmp = FETCH_24 (image, bits, i); a = 0xff; r = (tmp >> 16) & 0xff; g = (tmp >> 8) & 0xff; b = (tmp >> 0) & 0xff; r = to_linear[r] * 255.0f + 0.5f; g = to_linear[g] * 255.0f + 0.5f; b = to_linear[b] * 255.0f + 0.5f; *buffer++ = (a << 24) | (r << 16) | (g << 8) | (b << 0); } } static uint32_t fetch_pixel_a8r8g8b8_32_sRGB (bits_image_t *image, int offset, int line) { uint32_t *bits = image->bits + line * image->rowstride; uint32_t tmp = READ (image, bits + offset); uint32_t a, r, g, b; a = (tmp >> 24) & 0xff; r = (tmp >> 16) & 0xff; g = (tmp >> 8) & 0xff; b = (tmp >> 0) & 0xff; r = to_linear[r] * 255.0f + 0.5f; g = to_linear[g] * 255.0f + 0.5f; b = to_linear[b] * 255.0f + 0.5f; return (a << 24) | (r << 16) | (g << 8) | (b << 0); } static uint32_t fetch_pixel_r8g8b8_32_sRGB (bits_image_t *image, int offset, int line) { uint8_t *bits = (uint8_t *)(image->bits + line * image->rowstride); uint32_t tmp = FETCH_24 (image, bits, offset); uint32_t a, r, g, b; a = 0xff; r = (tmp >> 16) & 0xff; g = (tmp >> 8) & 0xff; b = (tmp >> 0) & 0xff; r = to_linear[r] * 255.0f + 0.5f; g = to_linear[g] * 255.0f + 0.5f; b = to_linear[b] * 255.0f + 0.5f; return (a << 24) | (r << 16) | (g << 8) | (b << 0); } static void store_scanline_a8r8g8b8_32_sRGB (bits_image_t *image, int x, int y, int width, const uint32_t *v) { uint32_t *bits = image->bits + image->rowstride * y; uint64_t *values = (uint64_t *)v; uint32_t *pixel = bits + x; uint64_t tmp; int i; for (i = 0; i < width; ++i) { uint32_t a, r, g, b; tmp = values[i]; a = (tmp >> 24) & 0xff; r = (tmp >> 16) & 0xff; g = (tmp >> 8) & 0xff; b = (tmp >> 0) & 0xff; r = to_srgb (r * (1/255.0f)); g = to_srgb (g * (1/255.0f)); b = to_srgb (b * (1/255.0f)); WRITE (image, pixel++, a | (r << 16) | (g << 8) | (b << 0)); } } static void store_scanline_r8g8b8_32_sRGB (bits_image_t *image, int x, int y, int width, const uint32_t *v) { uint8_t *bits = (uint8_t *)(image->bits + image->rowstride * y) + 3 * x; uint64_t *values = (uint64_t *)v; uint64_t tmp; int i; for (i = 0; i < width; ++i) { uint32_t r, g, b; tmp = values[i]; r = (tmp >> 16) & 0xff; g = (tmp >> 8) & 0xff; b = (tmp >> 0) & 0xff; r = to_srgb (r * (1/255.0f)); g = to_srgb (g * (1/255.0f)); b = to_srgb (b * (1/255.0f)); STORE_24 (image, bits, i, (r << 16) | (g << 8) | (b << 0)); } } static argb_t fetch_pixel_generic_float (bits_image_t *image, int offset, int line) { uint32_t pixel32 = image->fetch_pixel_32 (image, offset, line); argb_t f; pixman_expand_to_float (&f, &pixel32, image->format, 1); return f; } /* * XXX: The transformed fetch path only works at 32-bpp so far. When all * paths have wide versions, this can be removed. * * WARNING: This function loses precision! */ static uint32_t fetch_pixel_generic_lossy_32 (bits_image_t *image, int offset, int line) { argb_t pixel64 = image->fetch_pixel_float (image, offset, line); uint32_t result; pixman_contract_from_float (&result, &pixel64, 1); return result; } typedef struct { pixman_format_code_t format; fetch_scanline_t fetch_scanline_32; fetch_scanline_t fetch_scanline_float; fetch_pixel_32_t fetch_pixel_32; fetch_pixel_float_t fetch_pixel_float; store_scanline_t store_scanline_32; store_scanline_t store_scanline_float; } format_info_t; #define FORMAT_INFO(format) \ { \ PIXMAN_ ## format, \ fetch_scanline_ ## format, \ fetch_scanline_generic_float, \ fetch_pixel_ ## format, \ fetch_pixel_generic_float, \ store_scanline_ ## format, \ store_scanline_generic_float \ } static const format_info_t accessors[] = { /* 32 bpp formats */ FORMAT_INFO (a8r8g8b8), FORMAT_INFO (x8r8g8b8), FORMAT_INFO (a8b8g8r8), FORMAT_INFO (x8b8g8r8), FORMAT_INFO (b8g8r8a8), FORMAT_INFO (b8g8r8x8), FORMAT_INFO (r8g8b8a8), FORMAT_INFO (r8g8b8x8), FORMAT_INFO (x14r6g6b6), /* sRGB formats */ { PIXMAN_a8r8g8b8_sRGB, fetch_scanline_a8r8g8b8_32_sRGB, fetch_scanline_a8r8g8b8_sRGB_float, fetch_pixel_a8r8g8b8_32_sRGB, fetch_pixel_a8r8g8b8_sRGB_float, store_scanline_a8r8g8b8_32_sRGB, store_scanline_a8r8g8b8_sRGB_float, }, { PIXMAN_r8g8b8_sRGB, fetch_scanline_r8g8b8_32_sRGB, fetch_scanline_r8g8b8_sRGB_float, fetch_pixel_r8g8b8_32_sRGB, fetch_pixel_r8g8b8_sRGB_float, store_scanline_r8g8b8_32_sRGB, store_scanline_r8g8b8_sRGB_float, }, /* 24bpp formats */ FORMAT_INFO (r8g8b8), FORMAT_INFO (b8g8r8), /* 16bpp formats */ FORMAT_INFO (r5g6b5), FORMAT_INFO (b5g6r5), FORMAT_INFO (a1r5g5b5), FORMAT_INFO (x1r5g5b5), FORMAT_INFO (a1b5g5r5), FORMAT_INFO (x1b5g5r5), FORMAT_INFO (a4r4g4b4), FORMAT_INFO (x4r4g4b4), FORMAT_INFO (a4b4g4r4), FORMAT_INFO (x4b4g4r4), /* 8bpp formats */ FORMAT_INFO (a8), FORMAT_INFO (r3g3b2), FORMAT_INFO (b2g3r3), FORMAT_INFO (a2r2g2b2), FORMAT_INFO (a2b2g2r2), FORMAT_INFO (c8), FORMAT_INFO (g8), #define fetch_scanline_x4c4 fetch_scanline_c8 #define fetch_pixel_x4c4 fetch_pixel_c8 #define store_scanline_x4c4 store_scanline_c8 FORMAT_INFO (x4c4), #define fetch_scanline_x4g4 fetch_scanline_g8 #define fetch_pixel_x4g4 fetch_pixel_g8 #define store_scanline_x4g4 store_scanline_g8 FORMAT_INFO (x4g4), FORMAT_INFO (x4a4), /* 4bpp formats */ FORMAT_INFO (a4), FORMAT_INFO (r1g2b1), FORMAT_INFO (b1g2r1), FORMAT_INFO (a1r1g1b1), FORMAT_INFO (a1b1g1r1), FORMAT_INFO (c4), FORMAT_INFO (g4), /* 1bpp formats */ FORMAT_INFO (a1), FORMAT_INFO (g1), /* Wide formats */ #ifndef PIXMAN_FB_ACCESSORS { PIXMAN_rgba_float, NULL, fetch_scanline_rgbaf_float, fetch_pixel_generic_lossy_32, fetch_pixel_rgbaf_float, NULL, store_scanline_rgbaf_float }, { PIXMAN_rgb_float, NULL, fetch_scanline_rgbf_float, fetch_pixel_generic_lossy_32, fetch_pixel_rgbf_float, NULL, store_scanline_rgbf_float }, #endif { PIXMAN_a2r10g10b10, NULL, fetch_scanline_a2r10g10b10_float, fetch_pixel_generic_lossy_32, fetch_pixel_a2r10g10b10_float, NULL, store_scanline_a2r10g10b10_float }, { PIXMAN_x2r10g10b10, NULL, fetch_scanline_x2r10g10b10_float, fetch_pixel_generic_lossy_32, fetch_pixel_x2r10g10b10_float, NULL, store_scanline_x2r10g10b10_float }, { PIXMAN_a2b10g10r10, NULL, fetch_scanline_a2b10g10r10_float, fetch_pixel_generic_lossy_32, fetch_pixel_a2b10g10r10_float, NULL, store_scanline_a2b10g10r10_float }, { PIXMAN_x2b10g10r10, NULL, fetch_scanline_x2b10g10r10_float, fetch_pixel_generic_lossy_32, fetch_pixel_x2b10g10r10_float, NULL, store_scanline_x2b10g10r10_float }, /* YUV formats */ { PIXMAN_yuy2, fetch_scanline_yuy2, fetch_scanline_generic_float, fetch_pixel_yuy2, fetch_pixel_generic_float, NULL, NULL }, { PIXMAN_yv12, fetch_scanline_yv12, fetch_scanline_generic_float, fetch_pixel_yv12, fetch_pixel_generic_float, NULL, NULL }, { PIXMAN_null }, }; static void setup_accessors (bits_image_t *image) { const format_info_t *info = accessors; while (info->format != PIXMAN_null) { if (info->format == image->format) { image->fetch_scanline_32 = info->fetch_scanline_32; image->fetch_scanline_float = info->fetch_scanline_float; image->fetch_pixel_32 = info->fetch_pixel_32; image->fetch_pixel_float = info->fetch_pixel_float; image->store_scanline_32 = info->store_scanline_32; image->store_scanline_float = info->store_scanline_float; return; } info++; } } #ifndef PIXMAN_FB_ACCESSORS void _pixman_bits_image_setup_accessors_accessors (bits_image_t *image); void _pixman_bits_image_setup_accessors (bits_image_t *image) { if (image->read_func || image->write_func) _pixman_bits_image_setup_accessors_accessors (image); else setup_accessors (image); } #else void _pixman_bits_image_setup_accessors_accessors (bits_image_t *image) { setup_accessors (image); } #endif ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-accessor.h0000664000175000017500000000120714712446423017657 0ustar00mattst88mattst88#ifdef PIXMAN_FB_ACCESSORS #define READ(img, ptr) \ (((bits_image_t *)(img))->read_func ((ptr), sizeof(*(ptr)))) #define WRITE(img, ptr,val) \ (((bits_image_t *)(img))->write_func ((ptr), (val), sizeof (*(ptr)))) #define MEMSET_WRAPPED(img, dst, val, size) \ do { \ size_t _i; \ uint8_t *_dst = (uint8_t*)(dst); \ for(_i = 0; _i < (size_t) size; _i++) { \ WRITE((img), _dst +_i, (val)); \ } \ } while (0) #else #define READ(img, ptr) (*(ptr)) #define WRITE(img, ptr, val) (*(ptr) = (val)) #define MEMSET_WRAPPED(img, dst, val, size) \ memset(dst, val, size) #endif ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-arm-asm.h0000664000175000017500000000720014712446423017411 0ustar00mattst88mattst88/* * Copyright Âİ 2008 Mozilla Corporation * Copyright Âİ 2010 Nokia Corporation * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of Mozilla Corporation not be used in * advertising or publicity pertaining to distribution of the software without * specific, written prior permission. Mozilla Corporation makes no * representations about the suitability of this software for any purpose. It * is provided "as is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS * SOFTWARE. * * Author: Jeff Muizelaar (jeff@infidigm.net) * */ #ifndef PIXMAN_ARM_ASM_H #define PIXMAN_ARM_ASM_H #include "pixman-config.h" /* * References: * - https://developer.arm.com/documentation/101028/0012/5--Feature-test-macros * - https://github.com/ARM-software/abi-aa/blob/main/aaelf64/aaelf64.rst */ #if defined(__ARM_FEATURE_BTI_DEFAULT) && __ARM_FEATURE_BTI_DEFAULT == 1 #define BTI_C hint 34 /* bti c: for calls, IE bl instructions */ #define GNU_PROPERTY_AARCH64_BTI 1 /* bit 0 GNU Notes is for BTI support */ #else #define BTI_C #define GNU_PROPERTY_AARCH64_BTI 0 #endif #if defined(__ARM_FEATURE_PAC_DEFAULT) #if __ARM_FEATURE_PAC_DEFAULT & 1 #define SIGN_LR hint 25 /* paciasp: sign with the A key */ #define VERIFY_LR hint 29 /* autiasp: verify with the b key */ #elif __ARM_FEATURE_PAC_DEFAULT & 2 #define SIGN_LR hint 27 /* pacibsp: sign with the b key */ #define VERIFY_LR hint 31 /* autibsp: verify with the b key */ #endif #define GNU_PROPERTY_AARCH64_POINTER_AUTH 2 /* bit 1 GNU Notes is for PAC support */ #else #define SIGN_LR BTI_C #define VERIFY_LR #define GNU_PROPERTY_AARCH64_POINTER_AUTH 0 #endif /* Add the BTI support to GNU Notes section for ASM files */ #if GNU_PROPERTY_AARCH64_BTI != 0 || GNU_PROPERTY_AARCH64_POINTER_AUTH != 0 .pushsection .note.gnu.property, "a"; /* Start a new allocatable section */ .balign 8; /* align it on a byte boundry */ .long 4; /* size of "GNU\0" */ .long 0x10; /* size of descriptor */ .long 0x5; /* NT_GNU_PROPERTY_TYPE_0 */ .asciz "GNU"; .long 0xc0000000; /* GNU_PROPERTY_AARCH64_FEATURE_1_AND */ .long 4; /* Four bytes of data */ .long (GNU_PROPERTY_AARCH64_BTI|GNU_PROPERTY_AARCH64_POINTER_AUTH); /* BTI or PAC is enabled */ .long 0; /* padding for 8 byte alignment */ .popsection; /* end the section */ #endif /* Supplementary macro for setting function attributes */ .macro pixman_asm_function_impl fname #ifdef ASM_HAVE_FUNC_DIRECTIVE .func \fname #endif .global \fname #ifdef __ELF__ .hidden \fname .type \fname, %function #endif \fname: SIGN_LR .endm .macro pixman_asm_function fname #ifdef ASM_LEADING_UNDERSCORE pixman_asm_function_impl _\fname #else pixman_asm_function_impl \fname #endif .endm .macro pixman_syntax_unified #ifdef ASM_HAVE_SYNTAX_UNIFIED .syntax unified #endif .endm .macro pixman_end_asm_function #ifdef ASM_HAVE_FUNC_DIRECTIVE .endfunc #endif .endm #endif /* PIXMAN_ARM_ASM_H */ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-arm-common.h0000664000175000017500000006762014712446423020135 0ustar00mattst88mattst88/* * Copyright Âİ 2010 Nokia Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Author: Siarhei Siamashka (siarhei.siamashka@nokia.com) */ #ifndef PIXMAN_ARM_COMMON_H #define PIXMAN_ARM_COMMON_H #include "pixman-inlines.h" /* Define some macros which can expand into proxy functions between * ARM assembly optimized functions and the rest of pixman fast path API. * * All the low level ARM assembly functions have to use ARM EABI * calling convention and take up to 8 arguments: * width, height, dst, dst_stride, src, src_stride, mask, mask_stride * * The arguments are ordered with the most important coming first (the * first 4 arguments are passed to function in registers, the rest are * on stack). The last arguments are optional, for example if the * function is not using mask, then 'mask' and 'mask_stride' can be * omitted when doing a function call. * * Arguments 'src' and 'mask' contain either a pointer to the top left * pixel of the composited rectangle or a pixel color value depending * on the function type. In the case of just a color value (solid source * or mask), the corresponding stride argument is unused. */ #define SKIP_ZERO_SRC 1 #define SKIP_ZERO_MASK 2 #define PIXMAN_ARM_BIND_FAST_PATH_SRC_DST(cputype, name, \ src_type, src_cnt, \ dst_type, dst_cnt) \ void \ pixman_composite_##name##_asm_##cputype (int32_t w, \ int32_t h, \ dst_type *dst, \ int32_t dst_stride, \ src_type *src, \ int32_t src_stride); \ \ static void \ cputype##_composite_##name (pixman_implementation_t *imp, \ pixman_composite_info_t *info) \ { \ PIXMAN_COMPOSITE_ARGS (info); \ dst_type *dst_line; \ src_type *src_line; \ int32_t dst_stride, src_stride; \ \ PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, src_type, \ src_stride, src_line, src_cnt); \ PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, dst_type, \ dst_stride, dst_line, dst_cnt); \ \ pixman_composite_##name##_asm_##cputype (width, height, \ dst_line, dst_stride, \ src_line, src_stride); \ } #define PIXMAN_ARM_BIND_FAST_PATH_N_DST(flags, cputype, name, \ dst_type, dst_cnt) \ void \ pixman_composite_##name##_asm_##cputype (int32_t w, \ int32_t h, \ dst_type *dst, \ int32_t dst_stride, \ uint32_t src); \ \ static void \ cputype##_composite_##name (pixman_implementation_t *imp, \ pixman_composite_info_t *info) \ { \ PIXMAN_COMPOSITE_ARGS (info); \ dst_type *dst_line; \ int32_t dst_stride; \ uint32_t src; \ \ src = _pixman_image_get_solid ( \ imp, src_image, dest_image->bits.format); \ \ if ((flags & SKIP_ZERO_SRC) && src == 0) \ return; \ \ PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, dst_type, \ dst_stride, dst_line, dst_cnt); \ \ pixman_composite_##name##_asm_##cputype (width, height, \ dst_line, dst_stride, \ src); \ } #define PIXMAN_ARM_BIND_FAST_PATH_N_MASK_DST(flags, cputype, name, \ mask_type, mask_cnt, \ dst_type, dst_cnt) \ void \ pixman_composite_##name##_asm_##cputype (int32_t w, \ int32_t h, \ dst_type *dst, \ int32_t dst_stride, \ uint32_t src, \ int32_t unused, \ mask_type *mask, \ int32_t mask_stride); \ \ static void \ cputype##_composite_##name (pixman_implementation_t *imp, \ pixman_composite_info_t *info) \ { \ PIXMAN_COMPOSITE_ARGS (info); \ dst_type *dst_line; \ mask_type *mask_line; \ int32_t dst_stride, mask_stride; \ uint32_t src; \ \ src = _pixman_image_get_solid ( \ imp, src_image, dest_image->bits.format); \ \ if ((flags & SKIP_ZERO_SRC) && src == 0) \ return; \ \ PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, dst_type, \ dst_stride, dst_line, dst_cnt); \ PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, mask_type, \ mask_stride, mask_line, mask_cnt); \ \ pixman_composite_##name##_asm_##cputype (width, height, \ dst_line, dst_stride, \ src, 0, \ mask_line, mask_stride); \ } #define PIXMAN_ARM_BIND_FAST_PATH_SRC_N_DST(flags, cputype, name, \ src_type, src_cnt, \ dst_type, dst_cnt) \ void \ pixman_composite_##name##_asm_##cputype (int32_t w, \ int32_t h, \ dst_type *dst, \ int32_t dst_stride, \ src_type *src, \ int32_t src_stride, \ uint32_t mask); \ \ static void \ cputype##_composite_##name (pixman_implementation_t *imp, \ pixman_composite_info_t *info) \ { \ PIXMAN_COMPOSITE_ARGS (info); \ dst_type *dst_line; \ src_type *src_line; \ int32_t dst_stride, src_stride; \ uint32_t mask; \ \ mask = _pixman_image_get_solid ( \ imp, mask_image, dest_image->bits.format); \ \ if ((flags & SKIP_ZERO_MASK) && mask == 0) \ return; \ \ PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, dst_type, \ dst_stride, dst_line, dst_cnt); \ PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, src_type, \ src_stride, src_line, src_cnt); \ \ pixman_composite_##name##_asm_##cputype (width, height, \ dst_line, dst_stride, \ src_line, src_stride, \ mask); \ } #define PIXMAN_ARM_BIND_FAST_PATH_SRC_MASK_DST(cputype, name, \ src_type, src_cnt, \ mask_type, mask_cnt, \ dst_type, dst_cnt) \ void \ pixman_composite_##name##_asm_##cputype (int32_t w, \ int32_t h, \ dst_type *dst, \ int32_t dst_stride, \ src_type *src, \ int32_t src_stride, \ mask_type *mask, \ int32_t mask_stride); \ \ static void \ cputype##_composite_##name (pixman_implementation_t *imp, \ pixman_composite_info_t *info) \ { \ PIXMAN_COMPOSITE_ARGS (info); \ dst_type *dst_line; \ src_type *src_line; \ mask_type *mask_line; \ int32_t dst_stride, src_stride, mask_stride; \ \ PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, dst_type, \ dst_stride, dst_line, dst_cnt); \ PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, src_type, \ src_stride, src_line, src_cnt); \ PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, mask_type, \ mask_stride, mask_line, mask_cnt); \ \ pixman_composite_##name##_asm_##cputype (width, height, \ dst_line, dst_stride, \ src_line, src_stride, \ mask_line, mask_stride); \ } #define PIXMAN_ARM_BIND_SCALED_NEAREST_SRC_DST(cputype, name, op, \ src_type, dst_type) \ void \ pixman_scaled_nearest_scanline_##name##_##op##_asm_##cputype ( \ int32_t w, \ dst_type * dst, \ const src_type * src, \ pixman_fixed_t vx, \ pixman_fixed_t unit_x, \ pixman_fixed_t max_vx); \ \ static force_inline void \ scaled_nearest_scanline_##cputype##_##name##_##op (dst_type * pd, \ const src_type * ps, \ int32_t w, \ pixman_fixed_t vx, \ pixman_fixed_t unit_x, \ pixman_fixed_t max_vx, \ pixman_bool_t zero_src) \ { \ pixman_scaled_nearest_scanline_##name##_##op##_asm_##cputype (w, pd, ps, \ vx, unit_x, \ max_vx); \ } \ \ FAST_NEAREST_MAINLOOP (cputype##_##name##_cover_##op, \ scaled_nearest_scanline_##cputype##_##name##_##op, \ src_type, dst_type, COVER) \ FAST_NEAREST_MAINLOOP (cputype##_##name##_none_##op, \ scaled_nearest_scanline_##cputype##_##name##_##op, \ src_type, dst_type, NONE) \ FAST_NEAREST_MAINLOOP (cputype##_##name##_pad_##op, \ scaled_nearest_scanline_##cputype##_##name##_##op, \ src_type, dst_type, PAD) \ FAST_NEAREST_MAINLOOP (cputype##_##name##_normal_##op, \ scaled_nearest_scanline_##cputype##_##name##_##op, \ src_type, dst_type, NORMAL) #define PIXMAN_ARM_BIND_SCALED_NEAREST_SRC_A8_DST(flags, cputype, name, op, \ src_type, dst_type) \ void \ pixman_scaled_nearest_scanline_##name##_##op##_asm_##cputype ( \ int32_t w, \ dst_type * dst, \ const src_type * src, \ pixman_fixed_t vx, \ pixman_fixed_t unit_x, \ pixman_fixed_t max_vx, \ const uint8_t * mask); \ \ static force_inline void \ scaled_nearest_scanline_##cputype##_##name##_##op (const uint8_t * mask, \ dst_type * pd, \ const src_type * ps, \ int32_t w, \ pixman_fixed_t vx, \ pixman_fixed_t unit_x, \ pixman_fixed_t max_vx, \ pixman_bool_t zero_src) \ { \ if ((flags & SKIP_ZERO_SRC) && zero_src) \ return; \ pixman_scaled_nearest_scanline_##name##_##op##_asm_##cputype (w, pd, ps, \ vx, unit_x, \ max_vx, \ mask); \ } \ \ FAST_NEAREST_MAINLOOP_COMMON (cputype##_##name##_cover_##op, \ scaled_nearest_scanline_##cputype##_##name##_##op,\ src_type, uint8_t, dst_type, COVER, TRUE, FALSE)\ FAST_NEAREST_MAINLOOP_COMMON (cputype##_##name##_none_##op, \ scaled_nearest_scanline_##cputype##_##name##_##op,\ src_type, uint8_t, dst_type, NONE, TRUE, FALSE) \ FAST_NEAREST_MAINLOOP_COMMON (cputype##_##name##_pad_##op, \ scaled_nearest_scanline_##cputype##_##name##_##op,\ src_type, uint8_t, dst_type, PAD, TRUE, FALSE) \ FAST_NEAREST_MAINLOOP_COMMON (cputype##_##name##_normal_##op, \ scaled_nearest_scanline_##cputype##_##name##_##op,\ src_type, uint8_t, dst_type, NORMAL, TRUE, FALSE) /* Provide entries for the fast path table */ #define PIXMAN_ARM_SIMPLE_NEAREST_A8_MASK_FAST_PATH(op,s,d,func) \ SIMPLE_NEAREST_A8_MASK_FAST_PATH (op,s,d,func), \ SIMPLE_NEAREST_A8_MASK_FAST_PATH_NORMAL (op,s,d,func) /*****************************************************************************/ #define PIXMAN_ARM_BIND_SCALED_BILINEAR_SRC_DST(flags, cputype, name, op, \ src_type, dst_type) \ void \ pixman_scaled_bilinear_scanline_##name##_##op##_asm_##cputype ( \ dst_type * dst, \ const src_type * top, \ const src_type * bottom, \ int wt, \ int wb, \ pixman_fixed_t x, \ pixman_fixed_t ux, \ int width); \ \ static force_inline void \ scaled_bilinear_scanline_##cputype##_##name##_##op ( \ dst_type * dst, \ const uint32_t * mask, \ const src_type * src_top, \ const src_type * src_bottom, \ int32_t w, \ int wt, \ int wb, \ pixman_fixed_t vx, \ pixman_fixed_t unit_x, \ pixman_fixed_t max_vx, \ pixman_bool_t zero_src) \ { \ if ((flags & SKIP_ZERO_SRC) && zero_src) \ return; \ pixman_scaled_bilinear_scanline_##name##_##op##_asm_##cputype ( \ dst, src_top, src_bottom, wt, wb, vx, unit_x, w); \ } \ \ FAST_BILINEAR_MAINLOOP_COMMON (cputype##_##name##_cover_##op, \ scaled_bilinear_scanline_##cputype##_##name##_##op, \ src_type, uint32_t, dst_type, COVER, FLAG_NONE) \ FAST_BILINEAR_MAINLOOP_COMMON (cputype##_##name##_none_##op, \ scaled_bilinear_scanline_##cputype##_##name##_##op, \ src_type, uint32_t, dst_type, NONE, FLAG_NONE) \ FAST_BILINEAR_MAINLOOP_COMMON (cputype##_##name##_pad_##op, \ scaled_bilinear_scanline_##cputype##_##name##_##op, \ src_type, uint32_t, dst_type, PAD, FLAG_NONE) \ FAST_BILINEAR_MAINLOOP_COMMON (cputype##_##name##_normal_##op, \ scaled_bilinear_scanline_##cputype##_##name##_##op, \ src_type, uint32_t, dst_type, NORMAL, \ FLAG_NONE) #define PIXMAN_ARM_BIND_SCALED_BILINEAR_SRC_A8_DST(flags, cputype, name, op, \ src_type, dst_type) \ void \ pixman_scaled_bilinear_scanline_##name##_##op##_asm_##cputype ( \ dst_type * dst, \ const uint8_t * mask, \ const src_type * top, \ const src_type * bottom, \ int wt, \ int wb, \ pixman_fixed_t x, \ pixman_fixed_t ux, \ int width); \ \ static force_inline void \ scaled_bilinear_scanline_##cputype##_##name##_##op ( \ dst_type * dst, \ const uint8_t * mask, \ const src_type * src_top, \ const src_type * src_bottom, \ int32_t w, \ int wt, \ int wb, \ pixman_fixed_t vx, \ pixman_fixed_t unit_x, \ pixman_fixed_t max_vx, \ pixman_bool_t zero_src) \ { \ if ((flags & SKIP_ZERO_SRC) && zero_src) \ return; \ pixman_scaled_bilinear_scanline_##name##_##op##_asm_##cputype ( \ dst, mask, src_top, src_bottom, wt, wb, vx, unit_x, w); \ } \ \ FAST_BILINEAR_MAINLOOP_COMMON (cputype##_##name##_cover_##op, \ scaled_bilinear_scanline_##cputype##_##name##_##op, \ src_type, uint8_t, dst_type, COVER, \ FLAG_HAVE_NON_SOLID_MASK) \ FAST_BILINEAR_MAINLOOP_COMMON (cputype##_##name##_none_##op, \ scaled_bilinear_scanline_##cputype##_##name##_##op, \ src_type, uint8_t, dst_type, NONE, \ FLAG_HAVE_NON_SOLID_MASK) \ FAST_BILINEAR_MAINLOOP_COMMON (cputype##_##name##_pad_##op, \ scaled_bilinear_scanline_##cputype##_##name##_##op, \ src_type, uint8_t, dst_type, PAD, \ FLAG_HAVE_NON_SOLID_MASK) \ FAST_BILINEAR_MAINLOOP_COMMON (cputype##_##name##_normal_##op, \ scaled_bilinear_scanline_##cputype##_##name##_##op, \ src_type, uint8_t, dst_type, NORMAL, \ FLAG_HAVE_NON_SOLID_MASK) #endif ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-arm-detect-win32.asm0000664000175000017500000000072114712446423021373 0ustar00mattst88mattst88 area pixman_msvc, code, readonly export pixman_msvc_try_arm_simd_op pixman_msvc_try_arm_simd_op ;; I don't think the msvc arm asm knows how to do SIMD insns ;; uqadd8 r3,r3,r3 dcd 0xe6633f93 mov pc,lr endp export pixman_msvc_try_arm_neon_op pixman_msvc_try_arm_neon_op ;; I don't think the msvc arm asm knows how to do NEON insns ;; veor d0,d0,d0 dcd 0xf3000110 mov pc,lr endp end ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-arm-neon-asm-bilinear.S0000664000175000017500000013105514712446423022112 0ustar00mattst88mattst88/* * Copyright Âİ 2011 SCore Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Author: Siarhei Siamashka (siarhei.siamashka@nokia.com) * Author: Taekyun Kim (tkq.kim@samsung.com) */ /* * This file contains scaled bilinear scanline functions implemented * using older siarhei's bilinear macro template. * * << General scanline function procedures >> * 1. bilinear interpolate source pixels * 2. load mask pixels * 3. load destination pixels * 4. duplicate mask to fill whole register * 5. interleave source & destination pixels * 6. apply mask to source pixels * 7. combine source & destination pixels * 8, Deinterleave final result * 9. store destination pixels * * All registers with single number (i.e. src0, tmp0) are 64-bits registers. * Registers with double numbers(src01, dst01) are 128-bits registers. * All temp registers can be used freely outside the code block. * Assume that symbol(register .req) OUT and MASK are defined at caller of these macro blocks. * * Remarks * There can be lots of pipeline stalls inside code block and between code blocks. * Further optimizations will be done by new macro templates using head/tail_head/tail scheme. */ /* Prevent the stack from becoming executable for no reason... */ #if defined(__linux__) && defined (__ELF__) .section .note.GNU-stack,"",%progbits #endif .text .fpu neon .arch armv7a .object_arch armv4 .eabi_attribute 10, 0 .eabi_attribute 12, 0 .arm .altmacro .p2align 2 #include "pixman-private.h" #include "pixman-arm-asm.h" #include "pixman-arm-neon-asm.h" pixman_syntax_unified /* * Bilinear macros from pixman-arm-neon-asm.S */ /* * Bilinear scaling support code which tries to provide pixel fetching, color * format conversion, and interpolation as separate macros which can be used * as the basic building blocks for constructing bilinear scanline functions. */ .macro bilinear_load_8888 reg1, reg2, tmp mov TMP1, X, asr #16 add X, X, UX add TMP1, TOP, TMP1, asl #2 vld1.32 {\reg1}, [TMP1], STRIDE vld1.32 {\reg2}, [TMP1] .endm .macro bilinear_load_0565 reg1, reg2, tmp mov TMP1, X, asr #16 add X, X, UX add TMP1, TOP, TMP1, asl #1 vld1.32 {\reg2[0]}, [TMP1], STRIDE vld1.32 {\reg2[1]}, [TMP1] convert_four_0565_to_x888_packed \reg2, \reg1, \reg2, \tmp .endm .macro bilinear_load_and_vertical_interpolate_two_8888 \ acc1, acc2, reg1, reg2, reg3, reg4, tmp1, tmp2 bilinear_load_8888 \reg1, \reg2, \tmp1 vmull.u8 \acc1, \reg1, d28 vmlal.u8 \acc1, \reg2, d29 bilinear_load_8888 \reg3, \reg4, \tmp2 vmull.u8 \acc2, \reg3, d28 vmlal.u8 \acc2, \reg4, d29 .endm .macro bilinear_load_and_vertical_interpolate_four_8888 \ xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi \ yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi bilinear_load_and_vertical_interpolate_two_8888 \ \xacc1, \xacc2, \xreg1, \xreg2, \xreg3, \xreg4, \xacc2lo, \xacc2hi bilinear_load_and_vertical_interpolate_two_8888 \ \yacc1, \yacc2, \yreg1, \yreg2, \yreg3, \yreg4, \yacc2lo, \yacc2hi .endm .macro bilinear_load_and_vertical_interpolate_two_0565 \ acc1, acc2, reg1, reg2, reg3, reg4, acc2lo, acc2hi mov TMP1, X, asr #16 add X, X, UX add TMP1, TOP, TMP1, asl #1 mov TMP2, X, asr #16 add X, X, UX add TMP2, TOP, TMP2, asl #1 vld1.32 {\acc2lo[0]}, [TMP1], STRIDE vld1.32 {\acc2hi[0]}, [TMP2], STRIDE vld1.32 {\acc2lo[1]}, [TMP1] vld1.32 {\acc2hi[1]}, [TMP2] convert_0565_to_x888 \acc2, \reg3, \reg2, \reg1 vzip.u8 \reg1, \reg3 vzip.u8 \reg2, \reg4 vzip.u8 \reg3, \reg4 vzip.u8 \reg1, \reg2 vmull.u8 \acc1, \reg1, d28 vmlal.u8 \acc1, \reg2, d29 vmull.u8 \acc2, \reg3, d28 vmlal.u8 \acc2, \reg4, d29 .endm .macro bilinear_load_and_vertical_interpolate_four_0565 \ xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi \ yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi mov TMP1, X, asr #16 add X, X, UX add TMP1, TOP, TMP1, asl #1 mov TMP2, X, asr #16 add X, X, UX add TMP2, TOP, TMP2, asl #1 vld1.32 {\xacc2lo[0]}, [TMP1], STRIDE vld1.32 {\xacc2hi[0]}, [TMP2], STRIDE vld1.32 {\xacc2lo[1]}, [TMP1] vld1.32 {\xacc2hi[1]}, [TMP2] convert_0565_to_x888 \xacc2, \xreg3, \xreg2, \xreg1 mov TMP1, X, asr #16 add X, X, UX add TMP1, TOP, TMP1, asl #1 mov TMP2, X, asr #16 add X, X, UX add TMP2, TOP, TMP2, asl #1 vld1.32 {\yacc2lo[0]}, [TMP1], STRIDE vzip.u8 \xreg1, \xreg3 vld1.32 {\yacc2hi[0]}, [TMP2], STRIDE vzip.u8 \xreg2, \xreg4 vld1.32 {\yacc2lo[1]}, [TMP1] vzip.u8 \xreg3, \xreg4 vld1.32 {\yacc2hi[1]}, [TMP2] vzip.u8 \xreg1, \xreg2 convert_0565_to_x888 \yacc2, \yreg3, \yreg2, \yreg1 vmull.u8 \xacc1, \xreg1, d28 vzip.u8 \yreg1, \yreg3 vmlal.u8 \xacc1, \xreg2, d29 vzip.u8 \yreg2, \yreg4 vmull.u8 \xacc2, \xreg3, d28 vzip.u8 \yreg3, \yreg4 vmlal.u8 \xacc2, \xreg4, d29 vzip.u8 \yreg1, \yreg2 vmull.u8 \yacc1, \yreg1, d28 vmlal.u8 \yacc1, \yreg2, d29 vmull.u8 \yacc2, \yreg3, d28 vmlal.u8 \yacc2, \yreg4, d29 .endm .macro bilinear_store_8888 numpix, tmp1, tmp2 .if \numpix == 4 vst1.32 {d0, d1}, [OUT]! .elseif \numpix == 2 vst1.32 {d0}, [OUT]! .elseif \numpix == 1 vst1.32 {d0[0]}, [OUT, :32]! .else .error bilinear_store_8888 numpix is unsupported .endif .endm .macro bilinear_store_0565 numpix, tmp1, tmp2 vuzp.u8 d0, d1 vuzp.u8 d2, d3 vuzp.u8 d1, d3 vuzp.u8 d0, d2 convert_8888_to_0565 d2, d1, d0, q1, \tmp1, \tmp2 .if \numpix == 4 vst1.16 {d2}, [OUT]! .elseif \numpix == 2 vst1.32 {d2[0]}, [OUT]! .elseif \numpix == 1 vst1.16 {d2[0]}, [OUT]! .else .error bilinear_store_0565 numpix is unsupported .endif .endm /* * Macros for loading mask pixels into register 'mask'. * vdup must be done in somewhere else. */ .macro bilinear_load_mask_x numpix, mask .endm .macro bilinear_load_mask_8 numpix, mask .if \numpix == 4 vld1.32 {\mask[0]}, [MASK]! .elseif \numpix == 2 vld1.16 {\mask[0]}, [MASK]! .elseif \numpix == 1 vld1.8 {\mask[0]}, [MASK]! .else .error bilinear_load_mask_8 \numpix is unsupported .endif pld [MASK, #prefetch_offset] .endm .macro bilinear_load_mask mask_fmt, numpix, mask bilinear_load_mask_\()\mask_fmt \numpix, \mask .endm /* * Macros for loading destination pixels into register 'dst0' and 'dst1'. * Interleave should be done somewhere else. */ .macro bilinear_load_dst_0565_src numpix, dst0, dst1, dst01 .endm .macro bilinear_load_dst_8888_src numpix, dst0, dst1, dst01 .endm .macro bilinear_load_dst_8888 numpix, dst0, dst1, dst01 .if \numpix == 4 vld1.32 {\dst0, \dst1}, [OUT] .elseif \numpix == 2 vld1.32 {\dst0}, [OUT] .elseif \numpix == 1 vld1.32 {\dst0[0]}, [OUT] .else .error bilinear_load_dst_8888 \numpix is unsupported .endif pld [OUT, #(prefetch_offset * 4)] .endm .macro bilinear_load_dst_8888_over numpix, dst0, dst1, dst01 bilinear_load_dst_8888 \numpix, \dst0, \dst1, \dst01 .endm .macro bilinear_load_dst_8888_add numpix, dst0, dst1, dst01 bilinear_load_dst_8888 \numpix, \dst0, \dst1, \dst01 .endm .macro bilinear_load_dst dst_fmt, op, numpix, dst0, dst1, dst01 bilinear_load_dst_\()\dst_fmt\()_\()\op \numpix, \dst0, \dst1, \dst01 .endm /* * Macros for duplicating partially loaded mask to fill entire register. * We will apply mask to interleaved source pixels, that is * (r0, r1, r2, r3, g0, g1, g2, g3) x (m0, m1, m2, m3, m0, m1, m2, m3) * (b0, b1, b2, b3, a0, a1, a2, a3) x (m0, m1, m2, m3, m0, m1, m2, m3) * So, we need to duplicate loaded mask into whole register. * * For two pixel case * (r0, r1, x, x, g0, g1, x, x) x (m0, m1, m0, m1, m0, m1, m0, m1) * (b0, b1, x, x, a0, a1, x, x) x (m0, m1, m0, m1, m0, m1, m0, m1) * We can do some optimizations for this including last pixel cases. */ .macro bilinear_duplicate_mask_x numpix, mask .endm .macro bilinear_duplicate_mask_8 numpix, mask .if \numpix == 4 vdup.32 \mask, \mask[0] .elseif \numpix == 2 vdup.16 \mask, \mask[0] .elseif \numpix == 1 vdup.8 \mask, \mask[0] .else .error bilinear_duplicate_mask_8 is unsupported .endif .endm .macro bilinear_duplicate_mask mask_fmt, numpix, mask bilinear_duplicate_mask_\()\mask_fmt \numpix, \mask .endm /* * Macros for interleaving src and dst pixels to rrrr gggg bbbb aaaa form. * Interleave should be done when maks is enabled or operator is 'over'. */ .macro bilinear_interleave src0, src1, dst0, dst1 vuzp.8 \src0, \src1 vuzp.8 \dst0, \dst1 vuzp.8 \src0, \src1 vuzp.8 \dst0, \dst1 .endm .macro bilinear_interleave_src_dst_x_src \ numpix, src0, src1, src01, dst0, dst1, dst01 .endm .macro bilinear_interleave_src_dst_x_over \ numpix, src0, src1, src01, dst0, dst1, dst01 bilinear_interleave \src0, \src1, \dst0, \dst1 .endm .macro bilinear_interleave_src_dst_x_add \ numpix, src0, src1, src01, dst0, dst1, dst01 .endm .macro bilinear_interleave_src_dst_8_src \ numpix, src0, src1, src01, dst0, dst1, dst01 bilinear_interleave \src0, \src1, \dst0, \dst1 .endm .macro bilinear_interleave_src_dst_8_over \ numpix, src0, src1, src01, dst0, dst1, dst01 bilinear_interleave \src0, \src1, \dst0, \dst1 .endm .macro bilinear_interleave_src_dst_8_add \ numpix, src0, src1, src01, dst0, dst1, dst01 bilinear_interleave \src0, \src1, \dst0, \dst1 .endm .macro bilinear_interleave_src_dst \ mask_fmt, op, numpix, src0, src1, src01, dst0, dst1, dst01 bilinear_interleave_src_dst_\()\mask_fmt\()_\()\op \ \numpix, \src0, \src1, \src01, \dst0, \dst1, \dst01 .endm /* * Macros for applying masks to src pixels. (see combine_mask_u() function) * src, dst should be in interleaved form. * mask register should be in form (m0, m1, m2, m3). */ .macro bilinear_apply_mask_to_src_x \ numpix, src0, src1, src01, mask, \ tmp01, tmp23, tmp45, tmp67 .endm .macro bilinear_apply_mask_to_src_8 \ numpix, src0, src1, src01, mask, \ tmp01, tmp23, tmp45, tmp67 vmull.u8 \tmp01, \src0, \mask vmull.u8 \tmp23, \src1, \mask /* bubbles */ vrshr.u16 \tmp45, \tmp01, #8 vrshr.u16 \tmp67, \tmp23, #8 /* bubbles */ vraddhn.u16 \src0, \tmp45, \tmp01 vraddhn.u16 \src1, \tmp67, \tmp23 .endm .macro bilinear_apply_mask_to_src \ mask_fmt, numpix, src0, src1, src01, mask, \ tmp01, tmp23, tmp45, tmp67 bilinear_apply_mask_to_src_\()\mask_fmt \ \numpix, \src0, \src1, \src01, \mask, \ \tmp01, \tmp23, \tmp45, \tmp67 .endm /* * Macros for combining src and destination pixels. * Interleave or not is depending on operator 'op'. */ .macro bilinear_combine_src \ numpix, src0, src1, src01, dst0, dst1, dst01, \ tmp01, tmp23, tmp45, tmp67, tmp8 .endm .macro bilinear_combine_over \ numpix, src0, src1, src01, dst0, dst1, dst01, \ tmp01, tmp23, tmp45, tmp67, tmp8 vdup.32 \tmp8, \src1[1] /* bubbles */ vmvn.8 \tmp8, \tmp8 /* bubbles */ vmull.u8 \tmp01, \dst0, \tmp8 /* bubbles */ vmull.u8 \tmp23, \dst1, \tmp8 /* bubbles */ vrshr.u16 \tmp45, \tmp01, #8 vrshr.u16 \tmp67, \tmp23, #8 /* bubbles */ vraddhn.u16 \dst0, \tmp45, \tmp01 vraddhn.u16 \dst1, \tmp67, \tmp23 /* bubbles */ vqadd.u8 \src01, \dst01, \src01 .endm .macro bilinear_combine_add \ numpix, src0, src1, src01, dst0, dst1, dst01, \ tmp01, tmp23, tmp45, tmp67, tmp8 vqadd.u8 \src01, \dst01, \src01 .endm .macro bilinear_combine \ op, numpix, src0, src1, src01, dst0, dst1, dst01, \ tmp01, tmp23, tmp45, tmp67, tmp8 bilinear_combine_\()\op \ \numpix, \src0, \src1, \src01, \dst0, \dst1, \dst01, \ \tmp01, \tmp23, \tmp45, \tmp67, \tmp8 .endm /* * Macros for final deinterleaving of destination pixels if needed. */ .macro bilinear_deinterleave numpix, dst0, dst1, dst01 vuzp.8 \dst0, \dst1 /* bubbles */ vuzp.8 \dst0, \dst1 .endm .macro bilinear_deinterleave_dst_x_src numpix, dst0, dst1, dst01 .endm .macro bilinear_deinterleave_dst_x_over numpix, dst0, dst1, dst01 bilinear_deinterleave \numpix, \dst0, \dst1, \dst01 .endm .macro bilinear_deinterleave_dst_x_add numpix, dst0, dst1, dst01 .endm .macro bilinear_deinterleave_dst_8_src numpix, dst0, dst1, dst01 bilinear_deinterleave \numpix, \dst0, \dst1, \dst01 .endm .macro bilinear_deinterleave_dst_8_over numpix, dst0, dst1, dst01 bilinear_deinterleave \numpix, \dst0, \dst1, \dst01 .endm .macro bilinear_deinterleave_dst_8_add numpix, dst0, dst1, dst01 bilinear_deinterleave \numpix, \dst0, \dst1, \dst01 .endm .macro bilinear_deinterleave_dst mask_fmt, op, numpix, dst0, dst1, dst01 bilinear_deinterleave_dst_\()\mask_fmt\()_\()\op \numpix, \dst0, \dst1, \dst01 .endm .macro bilinear_interpolate_last_pixel src_fmt, mask_fmt, dst_fmt, op bilinear_load_\()\src_fmt d0, d1, d2 bilinear_load_mask \mask_fmt, 1, d4 bilinear_load_dst \dst_fmt, \op, 1, d18, d19, q9 vmull.u8 q1, d0, d28 vmlal.u8 q1, d1, d29 /* 5 cycles bubble */ vshll.u16 q0, d2, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q0, d2, d30 vmlal.u16 q0, d3, d30 /* 5 cycles bubble */ bilinear_duplicate_mask \mask_fmt, 1, d4 vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) /* 3 cycles bubble */ vmovn.u16 d0, q0 /* 1 cycle bubble */ bilinear_interleave_src_dst \ \mask_fmt, \op, 1, d0, d1, q0, d18, d19, q9 bilinear_apply_mask_to_src \ \mask_fmt, 1, d0, d1, q0, d4, \ q3, q8, q10, q11 bilinear_combine \ \op, 1, d0, d1, q0, d18, d19, q9, \ q3, q8, q10, q11, d5 bilinear_deinterleave_dst \mask_fmt, \op, 1, d0, d1, q0 bilinear_store_\()\dst_fmt 1, q2, q3 .endm .macro bilinear_interpolate_two_pixels src_fmt, mask_fmt, dst_fmt, op bilinear_load_and_vertical_interpolate_two_\()\src_fmt \ q1, q11, d0, d1, d20, d21, d22, d23 bilinear_load_mask \mask_fmt, 2, d4 bilinear_load_dst \dst_fmt, \op, 2, d18, d19, q9 vshll.u16 q0, d2, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q0, d2, d30 vmlal.u16 q0, d3, d30 vshll.u16 q10, d22, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q10, d22, d31 vmlal.u16 q10, d23, d31 vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d1, q10, #(2 * BILINEAR_INTERPOLATION_BITS) bilinear_duplicate_mask \mask_fmt, 2, d4 vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vadd.u16 q12, q12, q13 vmovn.u16 d0, q0 bilinear_interleave_src_dst \ \mask_fmt, \op, 2, d0, d1, q0, d18, d19, q9 bilinear_apply_mask_to_src \ \mask_fmt, 2, d0, d1, q0, d4, \ q3, q8, q10, q11 bilinear_combine \ \op, 2, d0, d1, q0, d18, d19, q9, \ q3, q8, q10, q11, d5 bilinear_deinterleave_dst \mask_fmt, \op, 2, d0, d1, q0 bilinear_store_\()\dst_fmt 2, q2, q3 .endm .macro bilinear_interpolate_four_pixels src_fmt, mask_fmt, dst_fmt, op bilinear_load_and_vertical_interpolate_four_\()\src_fmt \ q1, q11, d0, d1, d20, d21, d22, d23 \ q3, q9, d4, d5, d16, d17, d18, d19 pld [TMP1, PF_OFFS] sub TMP1, TMP1, STRIDE vshll.u16 q0, d2, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q0, d2, d30 vmlal.u16 q0, d3, d30 vshll.u16 q10, d22, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q10, d22, d31 vmlal.u16 q10, d23, d31 vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vshll.u16 q2, d6, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q2, d6, d30 vmlal.u16 q2, d7, d30 vshll.u16 q8, d18, #BILINEAR_INTERPOLATION_BITS bilinear_load_mask \mask_fmt, 4, d22 bilinear_load_dst \dst_fmt, \op, 4, d2, d3, q1 pld [TMP1, PF_OFFS] vmlsl.u16 q8, d18, d31 vmlal.u16 q8, d19, d31 vadd.u16 q12, q12, q13 vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d1, q10, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d5, q8, #(2 * BILINEAR_INTERPOLATION_BITS) bilinear_duplicate_mask \mask_fmt, 4, d22 vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vmovn.u16 d0, q0 vmovn.u16 d1, q2 vadd.u16 q12, q12, q13 bilinear_interleave_src_dst \ \mask_fmt, \op, 4, d0, d1, q0, d2, d3, q1 bilinear_apply_mask_to_src \ \mask_fmt, 4, d0, d1, q0, d22, \ q3, q8, q9, q10 bilinear_combine \ \op, 4, d0, d1, q0, d2, d3, q1, \ q3, q8, q9, q10, d23 bilinear_deinterleave_dst \mask_fmt, \op, 4, d0, d1, q0 bilinear_store_\()\dst_fmt 4, q2, q3 .endm .set BILINEAR_FLAG_USE_MASK, 1 .set BILINEAR_FLAG_USE_ALL_NEON_REGS, 2 /* * Main template macro for generating NEON optimized bilinear scanline functions. * * Bilinear scanline generator macro take folling arguments: * fname - name of the function to generate * src_fmt - source color format (8888 or 0565) * dst_fmt - destination color format (8888 or 0565) * src/dst_bpp_shift - (1 << bpp_shift) is the size of src/dst pixel in bytes * process_last_pixel - code block that interpolate one pixel and does not * update horizontal weight * process_two_pixels - code block that interpolate two pixels and update * horizontal weight * process_four_pixels - code block that interpolate four pixels and update * horizontal weight * process_pixblock_head - head part of middle loop * process_pixblock_tail - tail part of middle loop * process_pixblock_tail_head - tail_head of middle loop * pixblock_size - number of pixels processed in a single middle loop * prefetch_distance - prefetch in the source image by that many pixels ahead */ .macro generate_bilinear_scanline_func \ fname, \ src_fmt, dst_fmt, src_bpp_shift, dst_bpp_shift, \ bilinear_process_last_pixel, \ bilinear_process_two_pixels, \ bilinear_process_four_pixels, \ bilinear_process_pixblock_head, \ bilinear_process_pixblock_tail, \ bilinear_process_pixblock_tail_head, \ pixblock_size, \ prefetch_distance, \ flags pixman_asm_function \fname .if \pixblock_size == 8 .elseif \pixblock_size == 4 .else .error unsupported pixblock size .endif .if ((\flags) & BILINEAR_FLAG_USE_MASK) == 0 OUT .req r0 TOP .req r1 BOTTOM .req r2 WT .req r3 WB .req r4 X .req r5 UX .req r6 WIDTH .req ip TMP1 .req r3 TMP2 .req r4 PF_OFFS .req r7 TMP3 .req r8 TMP4 .req r9 STRIDE .req r2 mov ip, sp push {r4, r5, r6, r7, r8, r9} mov PF_OFFS, #\prefetch_distance ldmia ip, {WB, X, UX, WIDTH} .else OUT .req r0 MASK .req r1 TOP .req r2 BOTTOM .req r3 WT .req r4 WB .req r5 X .req r6 UX .req r7 WIDTH .req ip TMP1 .req r4 TMP2 .req r5 PF_OFFS .req r8 TMP3 .req r9 TMP4 .req r10 STRIDE .req r3 .set prefetch_offset, \prefetch_distance mov ip, sp push {r4, r5, r6, r7, r8, r9, r10, ip} mov PF_OFFS, #\prefetch_distance ldmia ip, {WT, WB, X, UX, WIDTH} .endif mul PF_OFFS, PF_OFFS, UX .if ((\flags) & BILINEAR_FLAG_USE_ALL_NEON_REGS) != 0 vpush {d8-d15} .endif sub STRIDE, BOTTOM, TOP .unreq BOTTOM cmp WIDTH, #0 ble 3f vdup.u16 q12, X vdup.u16 q13, UX vdup.u8 d28, WT vdup.u8 d29, WB vadd.u16 d25, d25, d26 /* ensure good destination alignment */ cmp WIDTH, #1 blt 0f tst OUT, #(1 << \dst_bpp_shift) beq 0f vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vadd.u16 q12, q12, q13 \bilinear_process_last_pixel sub WIDTH, WIDTH, #1 0: vadd.u16 q13, q13, q13 vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vadd.u16 q12, q12, q13 cmp WIDTH, #2 blt 0f tst OUT, #(1 << (\dst_bpp_shift + 1)) beq 0f \bilinear_process_two_pixels sub WIDTH, WIDTH, #2 0: .if \pixblock_size == 8 cmp WIDTH, #4 blt 0f tst OUT, #(1 << (\dst_bpp_shift + 2)) beq 0f \bilinear_process_four_pixels sub WIDTH, WIDTH, #4 0: .endif subs WIDTH, WIDTH, #\pixblock_size blt 1f mov PF_OFFS, PF_OFFS, asr #(16 - \src_bpp_shift) \bilinear_process_pixblock_head subs WIDTH, WIDTH, #\pixblock_size blt 5f 0: \bilinear_process_pixblock_tail_head subs WIDTH, WIDTH, #\pixblock_size bge 0b 5: \bilinear_process_pixblock_tail 1: .if \pixblock_size == 8 tst WIDTH, #4 beq 2f \bilinear_process_four_pixels 2: .endif /* handle the remaining trailing pixels */ tst WIDTH, #2 beq 2f \bilinear_process_two_pixels 2: tst WIDTH, #1 beq 3f \bilinear_process_last_pixel 3: .if ((\flags) & BILINEAR_FLAG_USE_ALL_NEON_REGS) != 0 vpop {d8-d15} .endif .if ((\flags) & BILINEAR_FLAG_USE_MASK) == 0 pop {r4, r5, r6, r7, r8, r9} .else pop {r4, r5, r6, r7, r8, r9, r10, ip} .endif bx lr .unreq OUT .unreq TOP .unreq WT .unreq WB .unreq X .unreq UX .unreq WIDTH .unreq TMP1 .unreq TMP2 .unreq PF_OFFS .unreq TMP3 .unreq TMP4 .unreq STRIDE .if ((\flags) & BILINEAR_FLAG_USE_MASK) != 0 .unreq MASK .endif pixman_end_asm_function .endm /* src_8888_8_8888 */ .macro bilinear_src_8888_8_8888_process_last_pixel bilinear_interpolate_last_pixel 8888, 8, 8888, src .endm .macro bilinear_src_8888_8_8888_process_two_pixels bilinear_interpolate_two_pixels 8888, 8, 8888, src .endm .macro bilinear_src_8888_8_8888_process_four_pixels bilinear_interpolate_four_pixels 8888, 8, 8888, src .endm .macro bilinear_src_8888_8_8888_process_pixblock_head bilinear_src_8888_8_8888_process_four_pixels .endm .macro bilinear_src_8888_8_8888_process_pixblock_tail .endm .macro bilinear_src_8888_8_8888_process_pixblock_tail_head bilinear_src_8888_8_8888_process_pixblock_tail bilinear_src_8888_8_8888_process_pixblock_head .endm /* src_8888_8_0565 */ .macro bilinear_src_8888_8_0565_process_last_pixel bilinear_interpolate_last_pixel 8888, 8, 0565, src .endm .macro bilinear_src_8888_8_0565_process_two_pixels bilinear_interpolate_two_pixels 8888, 8, 0565, src .endm .macro bilinear_src_8888_8_0565_process_four_pixels bilinear_interpolate_four_pixels 8888, 8, 0565, src .endm .macro bilinear_src_8888_8_0565_process_pixblock_head bilinear_src_8888_8_0565_process_four_pixels .endm .macro bilinear_src_8888_8_0565_process_pixblock_tail .endm .macro bilinear_src_8888_8_0565_process_pixblock_tail_head bilinear_src_8888_8_0565_process_pixblock_tail bilinear_src_8888_8_0565_process_pixblock_head .endm /* src_0565_8_x888 */ .macro bilinear_src_0565_8_x888_process_last_pixel bilinear_interpolate_last_pixel 0565, 8, 8888, src .endm .macro bilinear_src_0565_8_x888_process_two_pixels bilinear_interpolate_two_pixels 0565, 8, 8888, src .endm .macro bilinear_src_0565_8_x888_process_four_pixels bilinear_interpolate_four_pixels 0565, 8, 8888, src .endm .macro bilinear_src_0565_8_x888_process_pixblock_head bilinear_src_0565_8_x888_process_four_pixels .endm .macro bilinear_src_0565_8_x888_process_pixblock_tail .endm .macro bilinear_src_0565_8_x888_process_pixblock_tail_head bilinear_src_0565_8_x888_process_pixblock_tail bilinear_src_0565_8_x888_process_pixblock_head .endm /* src_0565_8_0565 */ .macro bilinear_src_0565_8_0565_process_last_pixel bilinear_interpolate_last_pixel 0565, 8, 0565, src .endm .macro bilinear_src_0565_8_0565_process_two_pixels bilinear_interpolate_two_pixels 0565, 8, 0565, src .endm .macro bilinear_src_0565_8_0565_process_four_pixels bilinear_interpolate_four_pixels 0565, 8, 0565, src .endm .macro bilinear_src_0565_8_0565_process_pixblock_head bilinear_src_0565_8_0565_process_four_pixels .endm .macro bilinear_src_0565_8_0565_process_pixblock_tail .endm .macro bilinear_src_0565_8_0565_process_pixblock_tail_head bilinear_src_0565_8_0565_process_pixblock_tail bilinear_src_0565_8_0565_process_pixblock_head .endm /* over_8888_8888 */ .macro bilinear_over_8888_8888_process_last_pixel bilinear_interpolate_last_pixel 8888, x, 8888, over .endm .macro bilinear_over_8888_8888_process_two_pixels bilinear_interpolate_two_pixels 8888, x, 8888, over .endm .macro bilinear_over_8888_8888_process_four_pixels bilinear_interpolate_four_pixels 8888, x, 8888, over .endm .macro bilinear_over_8888_8888_process_pixblock_head mov TMP1, X, asr #16 add X, X, UX add TMP1, TOP, TMP1, asl #2 mov TMP2, X, asr #16 add X, X, UX add TMP2, TOP, TMP2, asl #2 vld1.32 {d22}, [TMP1], STRIDE vld1.32 {d23}, [TMP1] mov TMP3, X, asr #16 add X, X, UX add TMP3, TOP, TMP3, asl #2 vmull.u8 q8, d22, d28 vmlal.u8 q8, d23, d29 vld1.32 {d22}, [TMP2], STRIDE vld1.32 {d23}, [TMP2] mov TMP4, X, asr #16 add X, X, UX add TMP4, TOP, TMP4, asl #2 vmull.u8 q9, d22, d28 vmlal.u8 q9, d23, d29 vld1.32 {d22}, [TMP3], STRIDE vld1.32 {d23}, [TMP3] vmull.u8 q10, d22, d28 vmlal.u8 q10, d23, d29 vshll.u16 q0, d16, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q0, d16, d30 vmlal.u16 q0, d17, d30 pld [TMP4, PF_OFFS] vld1.32 {d16}, [TMP4], STRIDE vld1.32 {d17}, [TMP4] pld [TMP4, PF_OFFS] vmull.u8 q11, d16, d28 vmlal.u8 q11, d17, d29 vshll.u16 q1, d18, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q1, d18, d31 vmlal.u16 q1, d19, d31 vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vadd.u16 q12, q12, q13 .endm .macro bilinear_over_8888_8888_process_pixblock_tail vshll.u16 q2, d20, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q2, d20, d30 vmlal.u16 q2, d21, d30 vshll.u16 q3, d22, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q3, d22, d31 vmlal.u16 q3, d23, d31 vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS) vld1.32 {d2, d3}, [OUT, :128] pld [OUT, #(prefetch_offset * 4)] vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS) vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vshrn.u32 d5, q3, #(2 * BILINEAR_INTERPOLATION_BITS) vmovn.u16 d6, q0 vmovn.u16 d7, q2 vuzp.8 d6, d7 vuzp.8 d2, d3 vuzp.8 d6, d7 vuzp.8 d2, d3 vdup.32 d4, d7[1] vmvn.8 d4, d4 vmull.u8 q11, d2, d4 vmull.u8 q2, d3, d4 vrshr.u16 q1, q11, #8 vrshr.u16 q10, q2, #8 vraddhn.u16 d2, q1, q11 vraddhn.u16 d3, q10, q2 vqadd.u8 q3, q1, q3 vuzp.8 d6, d7 vuzp.8 d6, d7 vadd.u16 q12, q12, q13 vst1.32 {d6, d7}, [OUT, :128]! .endm .macro bilinear_over_8888_8888_process_pixblock_tail_head vshll.u16 q2, d20, #BILINEAR_INTERPOLATION_BITS mov TMP1, X, asr #16 add X, X, UX add TMP1, TOP, TMP1, asl #2 vmlsl.u16 q2, d20, d30 mov TMP2, X, asr #16 add X, X, UX add TMP2, TOP, TMP2, asl #2 vmlal.u16 q2, d21, d30 vshll.u16 q3, d22, #BILINEAR_INTERPOLATION_BITS vld1.32 {d20}, [TMP1], STRIDE vmlsl.u16 q3, d22, d31 vmlal.u16 q3, d23, d31 vld1.32 {d21}, [TMP1] vmull.u8 q8, d20, d28 vmlal.u8 q8, d21, d29 vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS) vld1.32 {d2, d3}, [OUT, :128] pld [OUT, PF_OFFS] vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS) vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vld1.32 {d22}, [TMP2], STRIDE vshrn.u32 d5, q3, #(2 * BILINEAR_INTERPOLATION_BITS) vmovn.u16 d6, q0 vld1.32 {d23}, [TMP2] vmull.u8 q9, d22, d28 mov TMP3, X, asr #16 add X, X, UX add TMP3, TOP, TMP3, asl #2 mov TMP4, X, asr #16 add X, X, UX add TMP4, TOP, TMP4, asl #2 vmlal.u8 q9, d23, d29 vmovn.u16 d7, q2 vld1.32 {d22}, [TMP3], STRIDE vuzp.8 d6, d7 vuzp.8 d2, d3 vuzp.8 d6, d7 vuzp.8 d2, d3 vdup.32 d4, d7[1] vld1.32 {d23}, [TMP3] vmvn.8 d4, d4 vmull.u8 q10, d22, d28 vmlal.u8 q10, d23, d29 vmull.u8 q11, d2, d4 vmull.u8 q2, d3, d4 vshll.u16 q0, d16, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q0, d16, d30 vrshr.u16 q1, q11, #8 vmlal.u16 q0, d17, d30 vrshr.u16 q8, q2, #8 vraddhn.u16 d2, q1, q11 vraddhn.u16 d3, q8, q2 pld [TMP4, PF_OFFS] vld1.32 {d16}, [TMP4], STRIDE vqadd.u8 q3, q1, q3 vld1.32 {d17}, [TMP4] pld [TMP4, PF_OFFS] vmull.u8 q11, d16, d28 vmlal.u8 q11, d17, d29 vuzp.8 d6, d7 vshll.u16 q1, d18, #BILINEAR_INTERPOLATION_BITS vuzp.8 d6, d7 vmlsl.u16 q1, d18, d31 vadd.u16 q12, q12, q13 vmlal.u16 q1, d19, d31 vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vadd.u16 q12, q12, q13 vst1.32 {d6, d7}, [OUT, :128]! .endm /* over_8888_8_8888 */ .macro bilinear_over_8888_8_8888_process_last_pixel bilinear_interpolate_last_pixel 8888, 8, 8888, over .endm .macro bilinear_over_8888_8_8888_process_two_pixels bilinear_interpolate_two_pixels 8888, 8, 8888, over .endm .macro bilinear_over_8888_8_8888_process_four_pixels bilinear_interpolate_four_pixels 8888, 8, 8888, over .endm .macro bilinear_over_8888_8_8888_process_pixblock_head mov TMP1, X, asr #16 add X, X, UX add TMP1, TOP, TMP1, asl #2 vld1.32 {d0}, [TMP1], STRIDE mov TMP2, X, asr #16 add X, X, UX add TMP2, TOP, TMP2, asl #2 vld1.32 {d1}, [TMP1] mov TMP3, X, asr #16 add X, X, UX add TMP3, TOP, TMP3, asl #2 vld1.32 {d2}, [TMP2], STRIDE mov TMP4, X, asr #16 add X, X, UX add TMP4, TOP, TMP4, asl #2 vld1.32 {d3}, [TMP2] vmull.u8 q2, d0, d28 vmull.u8 q3, d2, d28 vmlal.u8 q2, d1, d29 vmlal.u8 q3, d3, d29 vshll.u16 q0, d4, #BILINEAR_INTERPOLATION_BITS vshll.u16 q1, d6, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q0, d4, d30 vmlsl.u16 q1, d6, d31 vmlal.u16 q0, d5, d30 vmlal.u16 q1, d7, d31 vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS) vld1.32 {d2}, [TMP3], STRIDE vld1.32 {d3}, [TMP3] pld [TMP4, PF_OFFS] vld1.32 {d4}, [TMP4], STRIDE vld1.32 {d5}, [TMP4] pld [TMP4, PF_OFFS] vmull.u8 q3, d2, d28 vmlal.u8 q3, d3, d29 vmull.u8 q1, d4, d28 vmlal.u8 q1, d5, d29 vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vld1.32 {d22[0]}, [MASK]! pld [MASK, #prefetch_offset] vadd.u16 q12, q12, q13 vmovn.u16 d16, q0 .endm .macro bilinear_over_8888_8_8888_process_pixblock_tail vshll.u16 q9, d6, #BILINEAR_INTERPOLATION_BITS vshll.u16 q10, d2, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q9, d6, d30 vmlsl.u16 q10, d2, d31 vmlal.u16 q9, d7, d30 vmlal.u16 q10, d3, d31 vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vadd.u16 q12, q12, q13 vdup.32 d22, d22[0] vshrn.u32 d18, q9, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d19, q10, #(2 * BILINEAR_INTERPOLATION_BITS) vmovn.u16 d17, q9 vld1.32 {d18, d19}, [OUT, :128] pld [OUT, PF_OFFS] vuzp.8 d16, d17 vuzp.8 d18, d19 vuzp.8 d16, d17 vuzp.8 d18, d19 vmull.u8 q10, d16, d22 vmull.u8 q11, d17, d22 vrsra.u16 q10, q10, #8 vrsra.u16 q11, q11, #8 vrshrn.u16 d16, q10, #8 vrshrn.u16 d17, q11, #8 vdup.32 d22, d17[1] vmvn.8 d22, d22 vmull.u8 q10, d18, d22 vmull.u8 q11, d19, d22 vrshr.u16 q9, q10, #8 vrshr.u16 q0, q11, #8 vraddhn.u16 d18, q9, q10 vraddhn.u16 d19, q0, q11 vqadd.u8 q9, q8, q9 vuzp.8 d18, d19 vuzp.8 d18, d19 vst1.32 {d18, d19}, [OUT, :128]! .endm .macro bilinear_over_8888_8_8888_process_pixblock_tail_head vshll.u16 q9, d6, #BILINEAR_INTERPOLATION_BITS mov TMP1, X, asr #16 add X, X, UX add TMP1, TOP, TMP1, asl #2 vshll.u16 q10, d2, #BILINEAR_INTERPOLATION_BITS vld1.32 {d0}, [TMP1], STRIDE mov TMP2, X, asr #16 add X, X, UX add TMP2, TOP, TMP2, asl #2 vmlsl.u16 q9, d6, d30 vmlsl.u16 q10, d2, d31 vld1.32 {d1}, [TMP1] mov TMP3, X, asr #16 add X, X, UX add TMP3, TOP, TMP3, asl #2 vmlal.u16 q9, d7, d30 vmlal.u16 q10, d3, d31 vld1.32 {d2}, [TMP2], STRIDE mov TMP4, X, asr #16 add X, X, UX add TMP4, TOP, TMP4, asl #2 vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vadd.u16 q12, q12, q13 vld1.32 {d3}, [TMP2] vdup.32 d22, d22[0] vshrn.u32 d18, q9, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d19, q10, #(2 * BILINEAR_INTERPOLATION_BITS) vmull.u8 q2, d0, d28 vmull.u8 q3, d2, d28 vmovn.u16 d17, q9 vld1.32 {d18, d19}, [OUT, :128] pld [OUT, #(prefetch_offset * 4)] vmlal.u8 q2, d1, d29 vmlal.u8 q3, d3, d29 vuzp.8 d16, d17 vuzp.8 d18, d19 vshll.u16 q0, d4, #BILINEAR_INTERPOLATION_BITS vshll.u16 q1, d6, #BILINEAR_INTERPOLATION_BITS vuzp.8 d16, d17 vuzp.8 d18, d19 vmlsl.u16 q0, d4, d30 vmlsl.u16 q1, d6, d31 vmull.u8 q10, d16, d22 vmull.u8 q11, d17, d22 vmlal.u16 q0, d5, d30 vmlal.u16 q1, d7, d31 vrsra.u16 q10, q10, #8 vrsra.u16 q11, q11, #8 vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS) vrshrn.u16 d16, q10, #8 vrshrn.u16 d17, q11, #8 vld1.32 {d2}, [TMP3], STRIDE vdup.32 d22, d17[1] vld1.32 {d3}, [TMP3] vmvn.8 d22, d22 pld [TMP4, PF_OFFS] vld1.32 {d4}, [TMP4], STRIDE vmull.u8 q10, d18, d22 vmull.u8 q11, d19, d22 vld1.32 {d5}, [TMP4] pld [TMP4, PF_OFFS] vmull.u8 q3, d2, d28 vrshr.u16 q9, q10, #8 vrshr.u16 q15, q11, #8 vmlal.u8 q3, d3, d29 vmull.u8 q1, d4, d28 vraddhn.u16 d18, q9, q10 vraddhn.u16 d19, q15, q11 vmlal.u8 q1, d5, d29 vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vqadd.u8 q9, q8, q9 vld1.32 {d22[0]}, [MASK]! vuzp.8 d18, d19 vadd.u16 q12, q12, q13 vuzp.8 d18, d19 vmovn.u16 d16, q0 vst1.32 {d18, d19}, [OUT, :128]! .endm /* add_8888_8888 */ .macro bilinear_add_8888_8888_process_last_pixel bilinear_interpolate_last_pixel 8888, x, 8888, add .endm .macro bilinear_add_8888_8888_process_two_pixels bilinear_interpolate_two_pixels 8888, x, 8888, add .endm .macro bilinear_add_8888_8888_process_four_pixels bilinear_interpolate_four_pixels 8888, x, 8888, add .endm .macro bilinear_add_8888_8888_process_pixblock_head bilinear_add_8888_8888_process_four_pixels .endm .macro bilinear_add_8888_8888_process_pixblock_tail .endm .macro bilinear_add_8888_8888_process_pixblock_tail_head bilinear_add_8888_8888_process_pixblock_tail bilinear_add_8888_8888_process_pixblock_head .endm /* add_8888_8_8888 */ .macro bilinear_add_8888_8_8888_process_last_pixel bilinear_interpolate_last_pixel 8888, 8, 8888, add .endm .macro bilinear_add_8888_8_8888_process_two_pixels bilinear_interpolate_two_pixels 8888, 8, 8888, add .endm .macro bilinear_add_8888_8_8888_process_four_pixels bilinear_interpolate_four_pixels 8888, 8, 8888, add .endm .macro bilinear_add_8888_8_8888_process_pixblock_head bilinear_add_8888_8_8888_process_four_pixels .endm .macro bilinear_add_8888_8_8888_process_pixblock_tail .endm .macro bilinear_add_8888_8_8888_process_pixblock_tail_head bilinear_add_8888_8_8888_process_pixblock_tail bilinear_add_8888_8_8888_process_pixblock_head .endm /* Bilinear scanline functions */ generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_8888_8_8888_SRC_asm_neon, \ 8888, 8888, 2, 2, \ bilinear_src_8888_8_8888_process_last_pixel, \ bilinear_src_8888_8_8888_process_two_pixels, \ bilinear_src_8888_8_8888_process_four_pixels, \ bilinear_src_8888_8_8888_process_pixblock_head, \ bilinear_src_8888_8_8888_process_pixblock_tail, \ bilinear_src_8888_8_8888_process_pixblock_tail_head, \ 4, 28, BILINEAR_FLAG_USE_MASK generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_8888_8_0565_SRC_asm_neon, \ 8888, 0565, 2, 1, \ bilinear_src_8888_8_0565_process_last_pixel, \ bilinear_src_8888_8_0565_process_two_pixels, \ bilinear_src_8888_8_0565_process_four_pixels, \ bilinear_src_8888_8_0565_process_pixblock_head, \ bilinear_src_8888_8_0565_process_pixblock_tail, \ bilinear_src_8888_8_0565_process_pixblock_tail_head, \ 4, 28, BILINEAR_FLAG_USE_MASK generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_0565_8_x888_SRC_asm_neon, \ 0565, 8888, 1, 2, \ bilinear_src_0565_8_x888_process_last_pixel, \ bilinear_src_0565_8_x888_process_two_pixels, \ bilinear_src_0565_8_x888_process_four_pixels, \ bilinear_src_0565_8_x888_process_pixblock_head, \ bilinear_src_0565_8_x888_process_pixblock_tail, \ bilinear_src_0565_8_x888_process_pixblock_tail_head, \ 4, 28, BILINEAR_FLAG_USE_MASK generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_0565_8_0565_SRC_asm_neon, \ 0565, 0565, 1, 1, \ bilinear_src_0565_8_0565_process_last_pixel, \ bilinear_src_0565_8_0565_process_two_pixels, \ bilinear_src_0565_8_0565_process_four_pixels, \ bilinear_src_0565_8_0565_process_pixblock_head, \ bilinear_src_0565_8_0565_process_pixblock_tail, \ bilinear_src_0565_8_0565_process_pixblock_tail_head, \ 4, 28, BILINEAR_FLAG_USE_MASK generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_8888_8888_OVER_asm_neon, \ 8888, 8888, 2, 2, \ bilinear_over_8888_8888_process_last_pixel, \ bilinear_over_8888_8888_process_two_pixels, \ bilinear_over_8888_8888_process_four_pixels, \ bilinear_over_8888_8888_process_pixblock_head, \ bilinear_over_8888_8888_process_pixblock_tail, \ bilinear_over_8888_8888_process_pixblock_tail_head, \ 4, 28, 0 generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_8888_8_8888_OVER_asm_neon, \ 8888, 8888, 2, 2, \ bilinear_over_8888_8_8888_process_last_pixel, \ bilinear_over_8888_8_8888_process_two_pixels, \ bilinear_over_8888_8_8888_process_four_pixels, \ bilinear_over_8888_8_8888_process_pixblock_head, \ bilinear_over_8888_8_8888_process_pixblock_tail, \ bilinear_over_8888_8_8888_process_pixblock_tail_head, \ 4, 28, BILINEAR_FLAG_USE_MASK generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_8888_8888_ADD_asm_neon, \ 8888, 8888, 2, 2, \ bilinear_add_8888_8888_process_last_pixel, \ bilinear_add_8888_8888_process_two_pixels, \ bilinear_add_8888_8888_process_four_pixels, \ bilinear_add_8888_8888_process_pixblock_head, \ bilinear_add_8888_8888_process_pixblock_tail, \ bilinear_add_8888_8888_process_pixblock_tail_head, \ 4, 28, 0 generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_8888_8_8888_ADD_asm_neon, \ 8888, 8888, 2, 2, \ bilinear_add_8888_8_8888_process_last_pixel, \ bilinear_add_8888_8_8888_process_two_pixels, \ bilinear_add_8888_8_8888_process_four_pixels, \ bilinear_add_8888_8_8888_process_pixblock_head, \ bilinear_add_8888_8_8888_process_pixblock_tail, \ bilinear_add_8888_8_8888_process_pixblock_tail_head, \ 4, 28, BILINEAR_FLAG_USE_MASK ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-arm-neon-asm.S0000664000175000017500000037442114712446423020335 0ustar00mattst88mattst88/* * Copyright Âİ 2009 Nokia Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Author: Siarhei Siamashka (siarhei.siamashka@nokia.com) */ /* * This file contains implementations of NEON optimized pixel processing * functions. There is no full and detailed tutorial, but some functions * (those which are exposing some new or interesting features) are * extensively commented and can be used as examples. * * You may want to have a look at the comments for following functions: * - pixman_composite_over_8888_0565_asm_neon * - pixman_composite_over_n_8_0565_asm_neon */ /* Prevent the stack from becoming executable for no reason... */ #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif .text .fpu neon .arch armv7a .object_arch armv4 .eabi_attribute 10, 0 /* suppress Tag_FP_arch */ .eabi_attribute 12, 0 /* suppress Tag_Advanced_SIMD_arch */ .arm .altmacro .p2align 2 #include "pixman-private.h" #include "pixman-arm-asm.h" #include "pixman-arm-neon-asm.h" pixman_syntax_unified /* Global configuration options and preferences */ /* * The code can optionally make use of unaligned memory accesses to improve * performance of handling leading/trailing pixels for each scanline. * Configuration variable RESPECT_STRICT_ALIGNMENT can be set to 0 for * example in linux if unaligned memory accesses are not configured to * generate.exceptions. */ .set RESPECT_STRICT_ALIGNMENT, 1 /* * Set default prefetch type. There is a choice between the following options: * * PREFETCH_TYPE_NONE (may be useful for the ARM cores where PLD is set to work * as NOP to workaround some HW bugs or for whatever other reason) * * PREFETCH_TYPE_SIMPLE (may be useful for simple single-issue ARM cores where * advanced prefetch intruduces heavy overhead) * * PREFETCH_TYPE_ADVANCED (useful for superscalar cores such as ARM Cortex-A8 * which can run ARM and NEON instructions simultaneously so that extra ARM * instructions do not add (many) extra cycles, but improve prefetch efficiency) * * Note: some types of function can't support advanced prefetch and fallback * to simple one (those which handle 24bpp pixels) */ .set PREFETCH_TYPE_DEFAULT, PREFETCH_TYPE_ADVANCED /* Prefetch distance in pixels for simple prefetch */ .set PREFETCH_DISTANCE_SIMPLE, 64 /* * Implementation of pixman_composite_over_8888_0565_asm_neon * * This function takes a8r8g8b8 source buffer, r5g6b5 destination buffer and * performs OVER compositing operation. Function fast_composite_over_8888_0565 * from pixman-fast-path.c does the same in C and can be used as a reference. * * First we need to have some NEON assembly code which can do the actual * operation on the pixels and provide it to the template macro. * * Template macro quite conveniently takes care of emitting all the necessary * code for memory reading and writing (including quite tricky cases of * handling unaligned leading/trailing pixels), so we only need to deal with * the data in NEON registers. * * NEON registers allocation in general is recommented to be the following: * d0, d1, d2, d3 - contain loaded source pixel data * d4, d5, d6, d7 - contain loaded destination pixels (if they are needed) * d24, d25, d26, d27 - contain loading mask pixel data (if mask is used) * d28, d29, d30, d31 - place for storing the result (destination pixels) * * As can be seen above, four 64-bit NEON registers are used for keeping * intermediate pixel data and up to 8 pixels can be processed in one step * for 32bpp formats (16 pixels for 16bpp, 32 pixels for 8bpp). * * This particular function uses the following registers allocation: * d0, d1, d2, d3 - contain loaded source pixel data * d4, d5 - contain loaded destination pixels (they are needed) * d28, d29 - place for storing the result (destination pixels) */ /* * Step one. We need to have some code to do some arithmetics on pixel data. * This is implemented as a pair of macros: '*_head' and '*_tail'. When used * back-to-back, they take pixel data from {d0, d1, d2, d3} and {d4, d5}, * perform all the needed calculations and write the result to {d28, d29}. * The rationale for having two macros and not just one will be explained * later. In practice, any single monolitic function which does the work can * be split into two parts in any arbitrary way without affecting correctness. * * There is one special trick here too. Common template macro can optionally * make our life a bit easier by doing R, G, B, A color components * deinterleaving for 32bpp pixel formats (and this feature is used in * 'pixman_composite_over_8888_0565_asm_neon' function). So it means that * instead of having 8 packed pixels in {d0, d1, d2, d3} registers, we * actually use d0 register for blue channel (a vector of eight 8-bit * values), d1 register for green, d2 for red and d3 for alpha. This * simple conversion can be also done with a few NEON instructions: * * Packed to planar conversion: * vuzp.8 d0, d1 * vuzp.8 d2, d3 * vuzp.8 d1, d3 * vuzp.8 d0, d2 * * Planar to packed conversion: * vzip.8 d0, d2 * vzip.8 d1, d3 * vzip.8 d2, d3 * vzip.8 d0, d1 * * But pixel can be loaded directly in planar format using VLD4.8 NEON * instruction. It is 1 cycle slower than VLD1.32, so this is not always * desirable, that's why deinterleaving is optional. * * But anyway, here is the code: */ .macro pixman_composite_over_8888_0565_process_pixblock_head /* convert 8 r5g6b5 pixel data from {d4, d5} to planar 8-bit format and put data into d6 - red, d7 - green, d30 - blue */ vshrn.u16 d6, q2, #8 vshrn.u16 d7, q2, #3 vsli.u16 q2, q2, #5 vsri.u8 d6, d6, #5 vmvn.8 d3, d3 /* invert source alpha */ vsri.u8 d7, d7, #6 vshrn.u16 d30, q2, #2 /* now do alpha blending, storing results in 8-bit planar format into d16 - red, d19 - green, d18 - blue */ vmull.u8 q10, d3, d6 vmull.u8 q11, d3, d7 vmull.u8 q12, d3, d30 vrshr.u16 q13, q10, #8 vrshr.u16 q3, q11, #8 vrshr.u16 q15, q12, #8 vraddhn.u16 d20, q10, q13 vraddhn.u16 d23, q11, q3 vraddhn.u16 d22, q12, q15 .endm .macro pixman_composite_over_8888_0565_process_pixblock_tail /* ... continue alpha blending */ vqadd.u8 d16, d2, d20 vqadd.u8 q9, q0, q11 /* convert the result to r5g6b5 and store it into {d28, d29} */ vshll.u8 q14, d16, #8 vshll.u8 q8, d19, #8 vshll.u8 q9, d18, #8 vsri.u16 q14, q8, #5 vsri.u16 q14, q9, #11 .endm /* * OK, now we got almost everything that we need. Using the above two * macros, the work can be done right. But now we want to optimize * it a bit. ARM Cortex-A8 is an in-order core, and benefits really * a lot from good code scheduling and software pipelining. * * Let's construct some code, which will run in the core main loop. * Some pseudo-code of the main loop will look like this: * head * while (...) { * tail * head * } * tail * * It may look a bit weird, but this setup allows to hide instruction * latencies better and also utilize dual-issue capability more * efficiently (make pairs of load-store and ALU instructions). * * So what we need now is a '*_tail_head' macro, which will be used * in the core main loop. A trivial straightforward implementation * of this macro would look like this: * * pixman_composite_over_8888_0565_process_pixblock_tail * vst1.16 {d28, d29}, [DST_W, :128]! * vld1.16 {d4, d5}, [DST_R, :128]! * vld4.32 {d0, d1, d2, d3}, [SRC]! * pixman_composite_over_8888_0565_process_pixblock_head * cache_preload 8, 8 * * Now it also got some VLD/VST instructions. We simply can't move from * processing one block of pixels to the other one with just arithmetics. * The previously processed data needs to be written to memory and new * data needs to be fetched. Fortunately, this main loop does not deal * with partial leading/trailing pixels and can load/store a full block * of pixels in a bulk. Additionally, destination buffer is already * 16 bytes aligned here (which is good for performance). * * New things here are DST_R, DST_W, SRC and MASK identifiers. These * are the aliases for ARM registers which are used as pointers for * accessing data. We maintain separate pointers for reading and writing * destination buffer (DST_R and DST_W). * * Another new thing is 'cache_preload' macro. It is used for prefetching * data into CPU L2 cache and improve performance when dealing with large * images which are far larger than cache size. It uses one argument * (actually two, but they need to be the same here) - number of pixels * in a block. Looking into 'pixman-arm-neon-asm.h' can provide some * details about this macro. Moreover, if good performance is needed * the code from this macro needs to be copied into '*_tail_head' macro * and mixed with the rest of code for optimal instructions scheduling. * We are actually doing it below. * * Now after all the explanations, here is the optimized code. * Different instruction streams (originaling from '*_head', '*_tail' * and 'cache_preload' macro) use different indentation levels for * better readability. Actually taking the code from one of these * indentation levels and ignoring a few VLD/VST instructions would * result in exactly the code from '*_head', '*_tail' or 'cache_preload' * macro! */ #if 1 .macro pixman_composite_over_8888_0565_process_pixblock_tail_head vqadd.u8 d16, d2, d20 vld1.16 {d4, d5}, [DST_R, :128]! vqadd.u8 q9, q0, q11 vshrn.u16 d6, q2, #8 fetch_src_pixblock vshrn.u16 d7, q2, #3 vsli.u16 q2, q2, #5 vshll.u8 q14, d16, #8 PF add, PF_X, PF_X, #8 vshll.u8 q8, d19, #8 PF tst, PF_CTL, #0xF vsri.u8 d6, d6, #5 PF addne, PF_X, PF_X, #8 vmvn.8 d3, d3 PF subne, PF_CTL, PF_CTL, #1 vsri.u8 d7, d7, #6 vshrn.u16 d30, q2, #2 vmull.u8 q10, d3, d6 PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift] vmull.u8 q11, d3, d7 vmull.u8 q12, d3, d30 PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift] vsri.u16 q14, q8, #5 PF cmp, PF_X, ORIG_W vshll.u8 q9, d18, #8 vrshr.u16 q13, q10, #8 PF subge, PF_X, PF_X, ORIG_W vrshr.u16 q3, q11, #8 vrshr.u16 q15, q12, #8 PF subsge, PF_CTL, PF_CTL, #0x10 vsri.u16 q14, q9, #11 PF ldrbge, DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]! vraddhn.u16 d20, q10, q13 vraddhn.u16 d23, q11, q3 PF ldrbge, DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]! vraddhn.u16 d22, q12, q15 vst1.16 {d28, d29}, [DST_W, :128]! .endm #else /* If we did not care much about the performance, we would just use this... */ .macro pixman_composite_over_8888_0565_process_pixblock_tail_head pixman_composite_over_8888_0565_process_pixblock_tail vst1.16 {d28, d29}, [DST_W, :128]! vld1.16 {d4, d5}, [DST_R, :128]! fetch_src_pixblock pixman_composite_over_8888_0565_process_pixblock_head cache_preload 8, 8 .endm #endif /* * And now the final part. We are using 'generate_composite_function' macro * to put all the stuff together. We are specifying the name of the function * which we want to get, number of bits per pixel for the source, mask and * destination (0 if unused, like mask in this case). Next come some bit * flags: * FLAG_DST_READWRITE - tells that the destination buffer is both read * and written, for write-only buffer we would use * FLAG_DST_WRITEONLY flag instead * FLAG_DEINTERLEAVE_32BPP - tells that we prefer to work with planar data * and separate color channels for 32bpp format. * The next things are: * - the number of pixels processed per iteration (8 in this case, because * that's the maximum what can fit into four 64-bit NEON registers). * - prefetch distance, measured in pixel blocks. In this case it is 5 times * by 8 pixels. That would be 40 pixels, or up to 160 bytes. Optimal * prefetch distance can be selected by running some benchmarks. * * After that we specify some macros, these are 'default_init', * 'default_cleanup' here which are empty (but it is possible to have custom * init/cleanup macros to be able to save/restore some extra NEON registers * like d8-d15 or do anything else) followed by * 'pixman_composite_over_8888_0565_process_pixblock_head', * 'pixman_composite_over_8888_0565_process_pixblock_tail' and * 'pixman_composite_over_8888_0565_process_pixblock_tail_head' * which we got implemented above. * * The last part is the NEON registers allocation scheme. */ generate_composite_function \ pixman_composite_over_8888_0565_asm_neon, 32, 0, 16, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_over_8888_0565_process_pixblock_head, \ pixman_composite_over_8888_0565_process_pixblock_tail, \ pixman_composite_over_8888_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 24 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_over_n_0565_process_pixblock_head /* convert 8 r5g6b5 pixel data from {d4, d5} to planar 8-bit format and put data into d6 - red, d7 - green, d30 - blue */ vshrn.u16 d6, q2, #8 vshrn.u16 d7, q2, #3 vsli.u16 q2, q2, #5 vsri.u8 d6, d6, #5 vsri.u8 d7, d7, #6 vshrn.u16 d30, q2, #2 /* now do alpha blending, storing results in 8-bit planar format into d16 - red, d19 - green, d18 - blue */ vmull.u8 q10, d3, d6 vmull.u8 q11, d3, d7 vmull.u8 q12, d3, d30 vrshr.u16 q13, q10, #8 vrshr.u16 q3, q11, #8 vrshr.u16 q15, q12, #8 vraddhn.u16 d20, q10, q13 vraddhn.u16 d23, q11, q3 vraddhn.u16 d22, q12, q15 .endm .macro pixman_composite_over_n_0565_process_pixblock_tail /* ... continue alpha blending */ vqadd.u8 d16, d2, d20 vqadd.u8 q9, q0, q11 /* convert the result to r5g6b5 and store it into {d28, d29} */ vshll.u8 q14, d16, #8 vshll.u8 q8, d19, #8 vshll.u8 q9, d18, #8 vsri.u16 q14, q8, #5 vsri.u16 q14, q9, #11 .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_over_n_0565_process_pixblock_tail_head pixman_composite_over_n_0565_process_pixblock_tail vld1.16 {d4, d5}, [DST_R, :128]! vst1.16 {d28, d29}, [DST_W, :128]! pixman_composite_over_n_0565_process_pixblock_head cache_preload 8, 8 .endm .macro pixman_composite_over_n_0565_init add DUMMY, sp, #ARGS_STACK_OFFSET vld1.32 {d3[0]}, [DUMMY] vdup.8 d0, d3[0] vdup.8 d1, d3[1] vdup.8 d2, d3[2] vdup.8 d3, d3[3] vmvn.8 d3, d3 /* invert source alpha */ .endm generate_composite_function \ pixman_composite_over_n_0565_asm_neon, 0, 0, 16, \ FLAG_DST_READWRITE, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_n_0565_init, \ default_cleanup, \ pixman_composite_over_n_0565_process_pixblock_head, \ pixman_composite_over_n_0565_process_pixblock_tail, \ pixman_composite_over_n_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 24 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_8888_0565_process_pixblock_head vshll.u8 q8, d1, #8 vshll.u8 q14, d2, #8 vshll.u8 q9, d0, #8 .endm .macro pixman_composite_src_8888_0565_process_pixblock_tail vsri.u16 q14, q8, #5 vsri.u16 q14, q9, #11 .endm .macro pixman_composite_src_8888_0565_process_pixblock_tail_head vsri.u16 q14, q8, #5 PF add, PF_X, PF_X, #8 PF tst, PF_CTL, #0xF fetch_src_pixblock PF addne, PF_X, PF_X, #8 PF subne, PF_CTL, PF_CTL, #1 vsri.u16 q14, q9, #11 PF cmp, PF_X, ORIG_W PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift] vshll.u8 q8, d1, #8 vst1.16 {d28, d29}, [DST_W, :128]! PF subge, PF_X, PF_X, ORIG_W PF subsge, PF_CTL, PF_CTL, #0x10 vshll.u8 q14, d2, #8 PF ldrbge, DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]! vshll.u8 q9, d0, #8 .endm generate_composite_function \ pixman_composite_src_8888_0565_asm_neon, 32, 0, 16, \ FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_src_8888_0565_process_pixblock_head, \ pixman_composite_src_8888_0565_process_pixblock_tail, \ pixman_composite_src_8888_0565_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_src_0565_8888_process_pixblock_head vshrn.u16 d30, q0, #8 vshrn.u16 d29, q0, #3 vsli.u16 q0, q0, #5 vmov.u8 d31, #255 vsri.u8 d30, d30, #5 vsri.u8 d29, d29, #6 vshrn.u16 d28, q0, #2 .endm .macro pixman_composite_src_0565_8888_process_pixblock_tail .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_src_0565_8888_process_pixblock_tail_head pixman_composite_src_0565_8888_process_pixblock_tail vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! fetch_src_pixblock pixman_composite_src_0565_8888_process_pixblock_head cache_preload 8, 8 .endm generate_composite_function \ pixman_composite_src_0565_8888_asm_neon, 16, 0, 32, \ FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_src_0565_8888_process_pixblock_head, \ pixman_composite_src_0565_8888_process_pixblock_tail, \ pixman_composite_src_0565_8888_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_add_8_8_process_pixblock_head vqadd.u8 q14, q0, q2 vqadd.u8 q15, q1, q3 .endm .macro pixman_composite_add_8_8_process_pixblock_tail .endm .macro pixman_composite_add_8_8_process_pixblock_tail_head fetch_src_pixblock PF add, PF_X, PF_X, #32 PF tst, PF_CTL, #0xF vld1.8 {d4, d5, d6, d7}, [DST_R, :128]! PF addne, PF_X, PF_X, #32 PF subne, PF_CTL, PF_CTL, #1 vst1.8 {d28, d29, d30, d31}, [DST_W, :128]! PF cmp, PF_X, ORIG_W PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift] PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift] PF subge, PF_X, PF_X, ORIG_W PF subsge, PF_CTL, PF_CTL, #0x10 vqadd.u8 q14, q0, q2 PF ldrbge, DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]! PF ldrbge, DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]! vqadd.u8 q15, q1, q3 .endm generate_composite_function \ pixman_composite_add_8_8_asm_neon, 8, 0, 8, \ FLAG_DST_READWRITE, \ 32, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_add_8_8_process_pixblock_head, \ pixman_composite_add_8_8_process_pixblock_tail, \ pixman_composite_add_8_8_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_add_8888_8888_process_pixblock_tail_head fetch_src_pixblock PF add, PF_X, PF_X, #8 PF tst, PF_CTL, #0xF vld1.32 {d4, d5, d6, d7}, [DST_R, :128]! PF addne, PF_X, PF_X, #8 PF subne, PF_CTL, PF_CTL, #1 vst1.32 {d28, d29, d30, d31}, [DST_W, :128]! PF cmp, PF_X, ORIG_W PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift] PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift] PF subge, PF_X, PF_X, ORIG_W PF subsge, PF_CTL, PF_CTL, #0x10 vqadd.u8 q14, q0, q2 PF ldrbge, DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]! PF ldrbge, DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]! vqadd.u8 q15, q1, q3 .endm generate_composite_function \ pixman_composite_add_8888_8888_asm_neon, 32, 0, 32, \ FLAG_DST_READWRITE, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_add_8_8_process_pixblock_head, \ pixman_composite_add_8_8_process_pixblock_tail, \ pixman_composite_add_8888_8888_process_pixblock_tail_head generate_composite_function_single_scanline \ pixman_composite_scanline_add_asm_neon, 32, 0, 32, \ FLAG_DST_READWRITE, \ 8, /* number of pixels, processed in a single block */ \ default_init, \ default_cleanup, \ pixman_composite_add_8_8_process_pixblock_head, \ pixman_composite_add_8_8_process_pixblock_tail, \ pixman_composite_add_8888_8888_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_out_reverse_8888_8888_process_pixblock_head vmvn.8 d24, d3 /* get inverted alpha */ /* do alpha blending */ vmull.u8 q8, d24, d4 vmull.u8 q9, d24, d5 vmull.u8 q10, d24, d6 vmull.u8 q11, d24, d7 .endm .macro pixman_composite_out_reverse_8888_8888_process_pixblock_tail vrshr.u16 q14, q8, #8 vrshr.u16 q15, q9, #8 vrshr.u16 q12, q10, #8 vrshr.u16 q13, q11, #8 vraddhn.u16 d28, q14, q8 vraddhn.u16 d29, q15, q9 vraddhn.u16 d30, q12, q10 vraddhn.u16 d31, q13, q11 .endm .macro pixman_composite_out_reverse_8888_8888_process_pixblock_tail_head vld4.8 {d4, d5, d6, d7}, [DST_R, :128]! vrshr.u16 q14, q8, #8 PF add, PF_X, PF_X, #8 PF tst, PF_CTL, #0xF vrshr.u16 q15, q9, #8 vrshr.u16 q12, q10, #8 vrshr.u16 q13, q11, #8 PF addne, PF_X, PF_X, #8 PF subne, PF_CTL, PF_CTL, #1 vraddhn.u16 d28, q14, q8 vraddhn.u16 d29, q15, q9 PF cmp, PF_X, ORIG_W vraddhn.u16 d30, q12, q10 vraddhn.u16 d31, q13, q11 fetch_src_pixblock PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift] vmvn.8 d22, d3 PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift] vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! PF subge, PF_X, PF_X, ORIG_W vmull.u8 q8, d22, d4 PF subsge, PF_CTL, PF_CTL, #0x10 vmull.u8 q9, d22, d5 PF ldrbge, DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]! vmull.u8 q10, d22, d6 PF ldrbge, DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]! vmull.u8 q11, d22, d7 .endm generate_composite_function_single_scanline \ pixman_composite_scanline_out_reverse_asm_neon, 32, 0, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ default_init, \ default_cleanup, \ pixman_composite_out_reverse_8888_8888_process_pixblock_head, \ pixman_composite_out_reverse_8888_8888_process_pixblock_tail, \ pixman_composite_out_reverse_8888_8888_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_over_8888_8888_process_pixblock_head pixman_composite_out_reverse_8888_8888_process_pixblock_head .endm .macro pixman_composite_over_8888_8888_process_pixblock_tail pixman_composite_out_reverse_8888_8888_process_pixblock_tail vqadd.u8 q14, q0, q14 vqadd.u8 q15, q1, q15 .endm .macro pixman_composite_over_8888_8888_process_pixblock_tail_head vld4.8 {d4, d5, d6, d7}, [DST_R, :128]! vrshr.u16 q14, q8, #8 PF add, PF_X, PF_X, #8 PF tst, PF_CTL, #0xF vrshr.u16 q15, q9, #8 vrshr.u16 q12, q10, #8 vrshr.u16 q13, q11, #8 PF addne, PF_X, PF_X, #8 PF subne, PF_CTL, PF_CTL, #1 vraddhn.u16 d28, q14, q8 vraddhn.u16 d29, q15, q9 PF cmp, PF_X, ORIG_W vraddhn.u16 d30, q12, q10 vraddhn.u16 d31, q13, q11 vqadd.u8 q14, q0, q14 vqadd.u8 q15, q1, q15 fetch_src_pixblock PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift] vmvn.8 d22, d3 PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift] vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! PF subge, PF_X, PF_X, ORIG_W vmull.u8 q8, d22, d4 PF subsge, PF_CTL, PF_CTL, #0x10 vmull.u8 q9, d22, d5 PF ldrbge, DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]! vmull.u8 q10, d22, d6 PF ldrbge, DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]! vmull.u8 q11, d22, d7 .endm generate_composite_function \ pixman_composite_over_8888_8888_asm_neon, 32, 0, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_over_8888_8888_process_pixblock_head, \ pixman_composite_over_8888_8888_process_pixblock_tail, \ pixman_composite_over_8888_8888_process_pixblock_tail_head generate_composite_function_single_scanline \ pixman_composite_scanline_over_asm_neon, 32, 0, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ default_init, \ default_cleanup, \ pixman_composite_over_8888_8888_process_pixblock_head, \ pixman_composite_over_8888_8888_process_pixblock_tail, \ pixman_composite_over_8888_8888_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_over_n_8888_process_pixblock_head /* deinterleaved source pixels in {d0, d1, d2, d3} */ /* inverted alpha in {d24} */ /* destination pixels in {d4, d5, d6, d7} */ vmull.u8 q8, d24, d4 vmull.u8 q9, d24, d5 vmull.u8 q10, d24, d6 vmull.u8 q11, d24, d7 .endm .macro pixman_composite_over_n_8888_process_pixblock_tail vrshr.u16 q14, q8, #8 vrshr.u16 q15, q9, #8 vrshr.u16 q2, q10, #8 vrshr.u16 q3, q11, #8 vraddhn.u16 d28, q14, q8 vraddhn.u16 d29, q15, q9 vraddhn.u16 d30, q2, q10 vraddhn.u16 d31, q3, q11 vqadd.u8 q14, q0, q14 vqadd.u8 q15, q1, q15 .endm .macro pixman_composite_over_n_8888_process_pixblock_tail_head vrshr.u16 q14, q8, #8 vrshr.u16 q15, q9, #8 vrshr.u16 q2, q10, #8 vrshr.u16 q3, q11, #8 vraddhn.u16 d28, q14, q8 vraddhn.u16 d29, q15, q9 vraddhn.u16 d30, q2, q10 vraddhn.u16 d31, q3, q11 vld4.8 {d4, d5, d6, d7}, [DST_R, :128]! vqadd.u8 q14, q0, q14 PF add, PF_X, PF_X, #8 PF tst, PF_CTL, #0x0F PF addne, PF_X, PF_X, #8 PF subne, PF_CTL, PF_CTL, #1 vqadd.u8 q15, q1, q15 PF cmp, PF_X, ORIG_W vmull.u8 q8, d24, d4 PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift] vmull.u8 q9, d24, d5 PF subge, PF_X, PF_X, ORIG_W vmull.u8 q10, d24, d6 PF subsge, PF_CTL, PF_CTL, #0x10 vmull.u8 q11, d24, d7 PF ldrbge, DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]! vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! .endm .macro pixman_composite_over_n_8888_init add DUMMY, sp, #ARGS_STACK_OFFSET vld1.32 {d3[0]}, [DUMMY] vdup.8 d0, d3[0] vdup.8 d1, d3[1] vdup.8 d2, d3[2] vdup.8 d3, d3[3] vmvn.8 d24, d3 /* get inverted alpha */ .endm generate_composite_function \ pixman_composite_over_n_8888_asm_neon, 0, 0, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_n_8888_init, \ default_cleanup, \ pixman_composite_over_8888_8888_process_pixblock_head, \ pixman_composite_over_8888_8888_process_pixblock_tail, \ pixman_composite_over_n_8888_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_over_reverse_n_8888_process_pixblock_tail_head vrshr.u16 q14, q8, #8 PF add, PF_X, PF_X, #8 PF tst, PF_CTL, #0xF vrshr.u16 q15, q9, #8 vrshr.u16 q12, q10, #8 vrshr.u16 q13, q11, #8 PF addne, PF_X, PF_X, #8 PF subne, PF_CTL, PF_CTL, #1 vraddhn.u16 d28, q14, q8 vraddhn.u16 d29, q15, q9 PF cmp, PF_X, ORIG_W vraddhn.u16 d30, q12, q10 vraddhn.u16 d31, q13, q11 vqadd.u8 q14, q0, q14 vqadd.u8 q15, q1, q15 vld4.8 {d0, d1, d2, d3}, [DST_R, :128]! vmvn.8 d22, d3 PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift] vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! PF subge, PF_X, PF_X, ORIG_W vmull.u8 q8, d22, d4 PF subsge, PF_CTL, PF_CTL, #0x10 vmull.u8 q9, d22, d5 vmull.u8 q10, d22, d6 PF ldrbge, DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]! vmull.u8 q11, d22, d7 .endm .macro pixman_composite_over_reverse_n_8888_init add DUMMY, sp, #ARGS_STACK_OFFSET vld1.32 {d7[0]}, [DUMMY] vdup.8 d4, d7[0] vdup.8 d5, d7[1] vdup.8 d6, d7[2] vdup.8 d7, d7[3] .endm generate_composite_function \ pixman_composite_over_reverse_n_8888_asm_neon, 0, 0, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_reverse_n_8888_init, \ default_cleanup, \ pixman_composite_over_8888_8888_process_pixblock_head, \ pixman_composite_over_8888_8888_process_pixblock_tail, \ pixman_composite_over_reverse_n_8888_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 4, /* src_basereg */ \ 24 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_over_8888_8_0565_process_pixblock_head vmull.u8 q0, d24, d8 /* IN for SRC pixels (part1) */ vmull.u8 q1, d24, d9 vmull.u8 q6, d24, d10 vmull.u8 q7, d24, d11 vshrn.u16 d6, q2, #8 /* convert DST_R data to 32-bpp (part1) */ vshrn.u16 d7, q2, #3 vsli.u16 q2, q2, #5 vrshr.u16 q8, q0, #8 /* IN for SRC pixels (part2) */ vrshr.u16 q9, q1, #8 vrshr.u16 q10, q6, #8 vrshr.u16 q11, q7, #8 vraddhn.u16 d0, q0, q8 vraddhn.u16 d1, q1, q9 vraddhn.u16 d2, q6, q10 vraddhn.u16 d3, q7, q11 vsri.u8 d6, d6, #5 /* convert DST_R data to 32-bpp (part2) */ vsri.u8 d7, d7, #6 vmvn.8 d3, d3 vshrn.u16 d30, q2, #2 vmull.u8 q8, d3, d6 /* now do alpha blending */ vmull.u8 q9, d3, d7 vmull.u8 q10, d3, d30 .endm .macro pixman_composite_over_8888_8_0565_process_pixblock_tail /* 3 cycle bubble (after vmull.u8) */ vrshr.u16 q13, q8, #8 vrshr.u16 q11, q9, #8 vrshr.u16 q15, q10, #8 vraddhn.u16 d16, q8, q13 vraddhn.u16 d27, q9, q11 vraddhn.u16 d26, q10, q15 vqadd.u8 d16, d2, d16 /* 1 cycle bubble */ vqadd.u8 q9, q0, q13 vshll.u8 q14, d16, #8 /* convert to 16bpp */ vshll.u8 q8, d19, #8 vshll.u8 q9, d18, #8 vsri.u16 q14, q8, #5 /* 1 cycle bubble */ vsri.u16 q14, q9, #11 .endm .macro pixman_composite_over_8888_8_0565_process_pixblock_tail_head vld1.16 {d4, d5}, [DST_R, :128]! vshrn.u16 d6, q2, #8 fetch_mask_pixblock vshrn.u16 d7, q2, #3 fetch_src_pixblock vmull.u8 q6, d24, d10 vrshr.u16 q13, q8, #8 vrshr.u16 q11, q9, #8 vrshr.u16 q15, q10, #8 vraddhn.u16 d16, q8, q13 vraddhn.u16 d27, q9, q11 vraddhn.u16 d26, q10, q15 vqadd.u8 d16, d2, d16 vmull.u8 q1, d24, d9 vqadd.u8 q9, q0, q13 vshll.u8 q14, d16, #8 vmull.u8 q0, d24, d8 vshll.u8 q8, d19, #8 vshll.u8 q9, d18, #8 vsri.u16 q14, q8, #5 vmull.u8 q7, d24, d11 vsri.u16 q14, q9, #11 cache_preload 8, 8 vsli.u16 q2, q2, #5 vrshr.u16 q8, q0, #8 vrshr.u16 q9, q1, #8 vrshr.u16 q10, q6, #8 vrshr.u16 q11, q7, #8 vraddhn.u16 d0, q0, q8 vraddhn.u16 d1, q1, q9 vraddhn.u16 d2, q6, q10 vraddhn.u16 d3, q7, q11 vsri.u8 d6, d6, #5 vsri.u8 d7, d7, #6 vmvn.8 d3, d3 vshrn.u16 d30, q2, #2 vst1.16 {d28, d29}, [DST_W, :128]! vmull.u8 q8, d3, d6 vmull.u8 q9, d3, d7 vmull.u8 q10, d3, d30 .endm generate_composite_function \ pixman_composite_over_8888_8_0565_asm_neon, 32, 8, 16, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ default_init_need_all_regs, \ default_cleanup_need_all_regs, \ pixman_composite_over_8888_8_0565_process_pixblock_head, \ pixman_composite_over_8888_8_0565_process_pixblock_tail, \ pixman_composite_over_8888_8_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 8, /* src_basereg */ \ 24 /* mask_basereg */ /******************************************************************************/ /* * This function needs a special initialization of solid mask. * Solid source pixel data is fetched from stack at ARGS_STACK_OFFSET * offset, split into color components and replicated in d8-d11 * registers. Additionally, this function needs all the NEON registers, * so it has to save d8-d15 registers which are callee saved according * to ABI. These registers are restored from 'cleanup' macro. All the * other NEON registers are caller saved, so can be clobbered freely * without introducing any problems. */ .macro pixman_composite_over_n_8_0565_init add DUMMY, sp, #ARGS_STACK_OFFSET vpush {d8-d15} vld1.32 {d11[0]}, [DUMMY] vdup.8 d8, d11[0] vdup.8 d9, d11[1] vdup.8 d10, d11[2] vdup.8 d11, d11[3] .endm .macro pixman_composite_over_n_8_0565_cleanup vpop {d8-d15} .endm generate_composite_function \ pixman_composite_over_n_8_0565_asm_neon, 0, 8, 16, \ FLAG_DST_READWRITE, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_n_8_0565_init, \ pixman_composite_over_n_8_0565_cleanup, \ pixman_composite_over_8888_8_0565_process_pixblock_head, \ pixman_composite_over_8888_8_0565_process_pixblock_tail, \ pixman_composite_over_8888_8_0565_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_over_8888_n_0565_init add DUMMY, sp, #(ARGS_STACK_OFFSET + 8) vpush {d8-d15} vld1.32 {d24[0]}, [DUMMY] vdup.8 d24, d24[3] .endm .macro pixman_composite_over_8888_n_0565_cleanup vpop {d8-d15} .endm generate_composite_function \ pixman_composite_over_8888_n_0565_asm_neon, 32, 0, 16, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_8888_n_0565_init, \ pixman_composite_over_8888_n_0565_cleanup, \ pixman_composite_over_8888_8_0565_process_pixblock_head, \ pixman_composite_over_8888_8_0565_process_pixblock_tail, \ pixman_composite_over_8888_8_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 8, /* src_basereg */ \ 24 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_0565_0565_process_pixblock_head .endm .macro pixman_composite_src_0565_0565_process_pixblock_tail .endm .macro pixman_composite_src_0565_0565_process_pixblock_tail_head vst1.16 {d0, d1, d2, d3}, [DST_W, :128]! fetch_src_pixblock cache_preload 16, 16 .endm generate_composite_function \ pixman_composite_src_0565_0565_asm_neon, 16, 0, 16, \ FLAG_DST_WRITEONLY, \ 16, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_src_0565_0565_process_pixblock_head, \ pixman_composite_src_0565_0565_process_pixblock_tail, \ pixman_composite_src_0565_0565_process_pixblock_tail_head, \ 0, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_n_8_process_pixblock_head .endm .macro pixman_composite_src_n_8_process_pixblock_tail .endm .macro pixman_composite_src_n_8_process_pixblock_tail_head vst1.8 {d0, d1, d2, d3}, [DST_W, :128]! .endm .macro pixman_composite_src_n_8_init add DUMMY, sp, #ARGS_STACK_OFFSET vld1.32 {d0[0]}, [DUMMY] vsli.u64 d0, d0, #8 vsli.u64 d0, d0, #16 vsli.u64 d0, d0, #32 vorr d1, d0, d0 vorr q1, q0, q0 .endm .macro pixman_composite_src_n_8_cleanup .endm generate_composite_function \ pixman_composite_src_n_8_asm_neon, 0, 0, 8, \ FLAG_DST_WRITEONLY, \ 32, /* number of pixels, processed in a single block */ \ 0, /* prefetch distance */ \ pixman_composite_src_n_8_init, \ pixman_composite_src_n_8_cleanup, \ pixman_composite_src_n_8_process_pixblock_head, \ pixman_composite_src_n_8_process_pixblock_tail, \ pixman_composite_src_n_8_process_pixblock_tail_head, \ 0, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_n_0565_process_pixblock_head .endm .macro pixman_composite_src_n_0565_process_pixblock_tail .endm .macro pixman_composite_src_n_0565_process_pixblock_tail_head vst1.16 {d0, d1, d2, d3}, [DST_W, :128]! .endm .macro pixman_composite_src_n_0565_init add DUMMY, sp, #ARGS_STACK_OFFSET vld1.32 {d0[0]}, [DUMMY] vsli.u64 d0, d0, #16 vsli.u64 d0, d0, #32 vorr d1, d0, d0 vorr q1, q0, q0 .endm .macro pixman_composite_src_n_0565_cleanup .endm generate_composite_function \ pixman_composite_src_n_0565_asm_neon, 0, 0, 16, \ FLAG_DST_WRITEONLY, \ 16, /* number of pixels, processed in a single block */ \ 0, /* prefetch distance */ \ pixman_composite_src_n_0565_init, \ pixman_composite_src_n_0565_cleanup, \ pixman_composite_src_n_0565_process_pixblock_head, \ pixman_composite_src_n_0565_process_pixblock_tail, \ pixman_composite_src_n_0565_process_pixblock_tail_head, \ 0, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_n_8888_process_pixblock_head .endm .macro pixman_composite_src_n_8888_process_pixblock_tail .endm .macro pixman_composite_src_n_8888_process_pixblock_tail_head vst1.32 {d0, d1, d2, d3}, [DST_W, :128]! .endm .macro pixman_composite_src_n_8888_init add DUMMY, sp, #ARGS_STACK_OFFSET vld1.32 {d0[0]}, [DUMMY] vsli.u64 d0, d0, #32 vorr d1, d0, d0 vorr q1, q0, q0 .endm .macro pixman_composite_src_n_8888_cleanup .endm generate_composite_function \ pixman_composite_src_n_8888_asm_neon, 0, 0, 32, \ FLAG_DST_WRITEONLY, \ 8, /* number of pixels, processed in a single block */ \ 0, /* prefetch distance */ \ pixman_composite_src_n_8888_init, \ pixman_composite_src_n_8888_cleanup, \ pixman_composite_src_n_8888_process_pixblock_head, \ pixman_composite_src_n_8888_process_pixblock_tail, \ pixman_composite_src_n_8888_process_pixblock_tail_head, \ 0, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_8888_8888_process_pixblock_head .endm .macro pixman_composite_src_8888_8888_process_pixblock_tail .endm .macro pixman_composite_src_8888_8888_process_pixblock_tail_head vst1.32 {d0, d1, d2, d3}, [DST_W, :128]! fetch_src_pixblock cache_preload 8, 8 .endm generate_composite_function \ pixman_composite_src_8888_8888_asm_neon, 32, 0, 32, \ FLAG_DST_WRITEONLY, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_src_8888_8888_process_pixblock_head, \ pixman_composite_src_8888_8888_process_pixblock_tail, \ pixman_composite_src_8888_8888_process_pixblock_tail_head, \ 0, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_x888_8888_process_pixblock_head vorr q0, q0, q2 vorr q1, q1, q2 .endm .macro pixman_composite_src_x888_8888_process_pixblock_tail .endm .macro pixman_composite_src_x888_8888_process_pixblock_tail_head vst1.32 {d0, d1, d2, d3}, [DST_W, :128]! fetch_src_pixblock vorr q0, q0, q2 vorr q1, q1, q2 cache_preload 8, 8 .endm .macro pixman_composite_src_x888_8888_init vmov.u8 q2, #0xFF vshl.u32 q2, q2, #24 .endm generate_composite_function \ pixman_composite_src_x888_8888_asm_neon, 32, 0, 32, \ FLAG_DST_WRITEONLY, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ pixman_composite_src_x888_8888_init, \ default_cleanup, \ pixman_composite_src_x888_8888_process_pixblock_head, \ pixman_composite_src_x888_8888_process_pixblock_tail, \ pixman_composite_src_x888_8888_process_pixblock_tail_head, \ 0, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_n_8_8888_process_pixblock_head /* expecting solid source in {d0, d1, d2, d3} */ /* mask is in d24 (d25, d26, d27 are unused) */ /* in */ vmull.u8 q8, d24, d0 vmull.u8 q9, d24, d1 vmull.u8 q10, d24, d2 vmull.u8 q11, d24, d3 vrsra.u16 q8, q8, #8 vrsra.u16 q9, q9, #8 vrsra.u16 q10, q10, #8 vrsra.u16 q11, q11, #8 .endm .macro pixman_composite_src_n_8_8888_process_pixblock_tail vrshrn.u16 d28, q8, #8 vrshrn.u16 d29, q9, #8 vrshrn.u16 d30, q10, #8 vrshrn.u16 d31, q11, #8 .endm .macro pixman_composite_src_n_8_8888_process_pixblock_tail_head fetch_mask_pixblock PF add, PF_X, PF_X, #8 vrshrn.u16 d28, q8, #8 PF tst, PF_CTL, #0x0F vrshrn.u16 d29, q9, #8 PF addne, PF_X, PF_X, #8 vrshrn.u16 d30, q10, #8 PF subne, PF_CTL, PF_CTL, #1 vrshrn.u16 d31, q11, #8 PF cmp, PF_X, ORIG_W vmull.u8 q8, d24, d0 PF pld, [PF_MASK, PF_X, lsl #mask_bpp_shift] vmull.u8 q9, d24, d1 PF subge, PF_X, PF_X, ORIG_W vmull.u8 q10, d24, d2 PF subsge, PF_CTL, PF_CTL, #0x10 vmull.u8 q11, d24, d3 PF ldrbge, DUMMY, [PF_MASK, MASK_STRIDE, lsl #mask_bpp_shift]! vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! vrsra.u16 q8, q8, #8 vrsra.u16 q9, q9, #8 vrsra.u16 q10, q10, #8 vrsra.u16 q11, q11, #8 .endm .macro pixman_composite_src_n_8_8888_init add DUMMY, sp, #ARGS_STACK_OFFSET vld1.32 {d3[0]}, [DUMMY] vdup.8 d0, d3[0] vdup.8 d1, d3[1] vdup.8 d2, d3[2] vdup.8 d3, d3[3] .endm .macro pixman_composite_src_n_8_8888_cleanup .endm generate_composite_function \ pixman_composite_src_n_8_8888_asm_neon, 0, 8, 32, \ FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_src_n_8_8888_init, \ pixman_composite_src_n_8_8888_cleanup, \ pixman_composite_src_n_8_8888_process_pixblock_head, \ pixman_composite_src_n_8_8888_process_pixblock_tail, \ pixman_composite_src_n_8_8888_process_pixblock_tail_head, \ /******************************************************************************/ .macro pixman_composite_src_n_8_8_process_pixblock_head vmull.u8 q0, d24, d16 vmull.u8 q1, d25, d16 vmull.u8 q2, d26, d16 vmull.u8 q3, d27, d16 vrsra.u16 q0, q0, #8 vrsra.u16 q1, q1, #8 vrsra.u16 q2, q2, #8 vrsra.u16 q3, q3, #8 .endm .macro pixman_composite_src_n_8_8_process_pixblock_tail vrshrn.u16 d28, q0, #8 vrshrn.u16 d29, q1, #8 vrshrn.u16 d30, q2, #8 vrshrn.u16 d31, q3, #8 .endm .macro pixman_composite_src_n_8_8_process_pixblock_tail_head fetch_mask_pixblock PF add, PF_X, PF_X, #8 vrshrn.u16 d28, q0, #8 PF tst, PF_CTL, #0x0F vrshrn.u16 d29, q1, #8 PF addne, PF_X, PF_X, #8 vrshrn.u16 d30, q2, #8 PF subne, PF_CTL, PF_CTL, #1 vrshrn.u16 d31, q3, #8 PF cmp, PF_X, ORIG_W vmull.u8 q0, d24, d16 PF pld, [PF_MASK, PF_X, lsl #mask_bpp_shift] vmull.u8 q1, d25, d16 PF subge, PF_X, PF_X, ORIG_W vmull.u8 q2, d26, d16 PF subsge, PF_CTL, PF_CTL, #0x10 vmull.u8 q3, d27, d16 PF ldrbge, DUMMY, [PF_MASK, MASK_STRIDE, lsl #mask_bpp_shift]! vst1.8 {d28, d29, d30, d31}, [DST_W, :128]! vrsra.u16 q0, q0, #8 vrsra.u16 q1, q1, #8 vrsra.u16 q2, q2, #8 vrsra.u16 q3, q3, #8 .endm .macro pixman_composite_src_n_8_8_init add DUMMY, sp, #ARGS_STACK_OFFSET vld1.32 {d16[0]}, [DUMMY] vdup.8 d16, d16[3] .endm .macro pixman_composite_src_n_8_8_cleanup .endm generate_composite_function \ pixman_composite_src_n_8_8_asm_neon, 0, 8, 8, \ FLAG_DST_WRITEONLY, \ 32, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_src_n_8_8_init, \ pixman_composite_src_n_8_8_cleanup, \ pixman_composite_src_n_8_8_process_pixblock_head, \ pixman_composite_src_n_8_8_process_pixblock_tail, \ pixman_composite_src_n_8_8_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_over_n_8_8888_process_pixblock_head /* expecting deinterleaved source data in {d8, d9, d10, d11} */ /* d8 - blue, d9 - green, d10 - red, d11 - alpha */ /* and destination data in {d4, d5, d6, d7} */ /* mask is in d24 (d25, d26, d27 are unused) */ /* in */ vmull.u8 q6, d24, d8 vmull.u8 q7, d24, d9 vmull.u8 q8, d24, d10 vmull.u8 q9, d24, d11 vrshr.u16 q10, q6, #8 vrshr.u16 q11, q7, #8 vrshr.u16 q12, q8, #8 vrshr.u16 q13, q9, #8 vraddhn.u16 d0, q6, q10 vraddhn.u16 d1, q7, q11 vraddhn.u16 d2, q8, q12 vraddhn.u16 d3, q9, q13 vmvn.8 d25, d3 /* get inverted alpha */ /* source: d0 - blue, d1 - green, d2 - red, d3 - alpha */ /* destination: d4 - blue, d5 - green, d6 - red, d7 - alpha */ /* now do alpha blending */ vmull.u8 q8, d25, d4 vmull.u8 q9, d25, d5 vmull.u8 q10, d25, d6 vmull.u8 q11, d25, d7 .endm .macro pixman_composite_over_n_8_8888_process_pixblock_tail vrshr.u16 q14, q8, #8 vrshr.u16 q15, q9, #8 vrshr.u16 q6, q10, #8 vrshr.u16 q7, q11, #8 vraddhn.u16 d28, q14, q8 vraddhn.u16 d29, q15, q9 vraddhn.u16 d30, q6, q10 vraddhn.u16 d31, q7, q11 vqadd.u8 q14, q0, q14 vqadd.u8 q15, q1, q15 .endm .macro pixman_composite_over_n_8_8888_process_pixblock_tail_head vrshr.u16 q14, q8, #8 vld4.8 {d4, d5, d6, d7}, [DST_R, :128]! vrshr.u16 q15, q9, #8 fetch_mask_pixblock vrshr.u16 q6, q10, #8 PF add, PF_X, PF_X, #8 vrshr.u16 q7, q11, #8 PF tst, PF_CTL, #0x0F vraddhn.u16 d28, q14, q8 PF addne, PF_X, PF_X, #8 vraddhn.u16 d29, q15, q9 PF subne, PF_CTL, PF_CTL, #1 vraddhn.u16 d30, q6, q10 PF cmp, PF_X, ORIG_W vraddhn.u16 d31, q7, q11 PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift] vmull.u8 q6, d24, d8 PF pld, [PF_MASK, PF_X, lsl #mask_bpp_shift] vmull.u8 q7, d24, d9 PF subge, PF_X, PF_X, ORIG_W vmull.u8 q8, d24, d10 PF subsge, PF_CTL, PF_CTL, #0x10 vmull.u8 q9, d24, d11 PF ldrbge, DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]! vqadd.u8 q14, q0, q14 PF ldrbge, DUMMY, [PF_MASK, MASK_STRIDE, lsl #mask_bpp_shift]! vqadd.u8 q15, q1, q15 vrshr.u16 q10, q6, #8 vrshr.u16 q11, q7, #8 vrshr.u16 q12, q8, #8 vrshr.u16 q13, q9, #8 vraddhn.u16 d0, q6, q10 vraddhn.u16 d1, q7, q11 vraddhn.u16 d2, q8, q12 vraddhn.u16 d3, q9, q13 vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! vmvn.8 d25, d3 vmull.u8 q8, d25, d4 vmull.u8 q9, d25, d5 vmull.u8 q10, d25, d6 vmull.u8 q11, d25, d7 .endm .macro pixman_composite_over_n_8_8888_init add DUMMY, sp, #ARGS_STACK_OFFSET vpush {d8-d15} vld1.32 {d11[0]}, [DUMMY] vdup.8 d8, d11[0] vdup.8 d9, d11[1] vdup.8 d10, d11[2] vdup.8 d11, d11[3] .endm .macro pixman_composite_over_n_8_8888_cleanup vpop {d8-d15} .endm generate_composite_function \ pixman_composite_over_n_8_8888_asm_neon, 0, 8, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_n_8_8888_init, \ pixman_composite_over_n_8_8888_cleanup, \ pixman_composite_over_n_8_8888_process_pixblock_head, \ pixman_composite_over_n_8_8888_process_pixblock_tail, \ pixman_composite_over_n_8_8888_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_over_n_8_8_process_pixblock_head vmull.u8 q0, d24, d8 vmull.u8 q1, d25, d8 vmull.u8 q6, d26, d8 vmull.u8 q7, d27, d8 vrshr.u16 q10, q0, #8 vrshr.u16 q11, q1, #8 vrshr.u16 q12, q6, #8 vrshr.u16 q13, q7, #8 vraddhn.u16 d0, q0, q10 vraddhn.u16 d1, q1, q11 vraddhn.u16 d2, q6, q12 vraddhn.u16 d3, q7, q13 vmvn.8 q12, q0 vmvn.8 q13, q1 vmull.u8 q8, d24, d4 vmull.u8 q9, d25, d5 vmull.u8 q10, d26, d6 vmull.u8 q11, d27, d7 .endm .macro pixman_composite_over_n_8_8_process_pixblock_tail vrshr.u16 q14, q8, #8 vrshr.u16 q15, q9, #8 vrshr.u16 q12, q10, #8 vrshr.u16 q13, q11, #8 vraddhn.u16 d28, q14, q8 vraddhn.u16 d29, q15, q9 vraddhn.u16 d30, q12, q10 vraddhn.u16 d31, q13, q11 vqadd.u8 q14, q0, q14 vqadd.u8 q15, q1, q15 .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_over_n_8_8_process_pixblock_tail_head vld1.8 {d4, d5, d6, d7}, [DST_R, :128]! pixman_composite_over_n_8_8_process_pixblock_tail fetch_mask_pixblock cache_preload 32, 32 vst1.8 {d28, d29, d30, d31}, [DST_W, :128]! pixman_composite_over_n_8_8_process_pixblock_head .endm .macro pixman_composite_over_n_8_8_init add DUMMY, sp, #ARGS_STACK_OFFSET vpush {d8-d15} vld1.32 {d8[0]}, [DUMMY] vdup.8 d8, d8[3] .endm .macro pixman_composite_over_n_8_8_cleanup vpop {d8-d15} .endm generate_composite_function \ pixman_composite_over_n_8_8_asm_neon, 0, 8, 8, \ FLAG_DST_READWRITE, \ 32, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_n_8_8_init, \ pixman_composite_over_n_8_8_cleanup, \ pixman_composite_over_n_8_8_process_pixblock_head, \ pixman_composite_over_n_8_8_process_pixblock_tail, \ pixman_composite_over_n_8_8_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_over_n_8888_8888_ca_process_pixblock_head /* * 'combine_mask_ca' replacement * * input: solid src (n) in {d8, d9, d10, d11} * dest in {d4, d5, d6, d7 } * mask in {d24, d25, d26, d27} * output: updated src in {d0, d1, d2, d3 } * updated mask in {d24, d25, d26, d3 } */ vmull.u8 q0, d24, d8 vmull.u8 q1, d25, d9 vmull.u8 q6, d26, d10 vmull.u8 q7, d27, d11 vmull.u8 q9, d11, d25 vmull.u8 q12, d11, d24 vmull.u8 q13, d11, d26 vrshr.u16 q8, q0, #8 vrshr.u16 q10, q1, #8 vrshr.u16 q11, q6, #8 vraddhn.u16 d0, q0, q8 vraddhn.u16 d1, q1, q10 vraddhn.u16 d2, q6, q11 vrshr.u16 q11, q12, #8 vrshr.u16 q8, q9, #8 vrshr.u16 q6, q13, #8 vrshr.u16 q10, q7, #8 vraddhn.u16 d24, q12, q11 vraddhn.u16 d25, q9, q8 vraddhn.u16 d26, q13, q6 vraddhn.u16 d3, q7, q10 /* * 'combine_over_ca' replacement * * output: updated dest in {d28, d29, d30, d31} */ vmvn.8 q12, q12 vmvn.8 d26, d26 vmull.u8 q8, d24, d4 vmull.u8 q9, d25, d5 vmvn.8 d27, d3 vmull.u8 q10, d26, d6 vmull.u8 q11, d27, d7 .endm .macro pixman_composite_over_n_8888_8888_ca_process_pixblock_tail /* ... continue 'combine_over_ca' replacement */ vrshr.u16 q14, q8, #8 vrshr.u16 q15, q9, #8 vrshr.u16 q6, q10, #8 vrshr.u16 q7, q11, #8 vraddhn.u16 d28, q14, q8 vraddhn.u16 d29, q15, q9 vraddhn.u16 d30, q6, q10 vraddhn.u16 d31, q7, q11 vqadd.u8 q14, q0, q14 vqadd.u8 q15, q1, q15 .endm .macro pixman_composite_over_n_8888_8888_ca_process_pixblock_tail_head vrshr.u16 q14, q8, #8 vrshr.u16 q15, q9, #8 vld4.8 {d4, d5, d6, d7}, [DST_R, :128]! vrshr.u16 q6, q10, #8 vrshr.u16 q7, q11, #8 vraddhn.u16 d28, q14, q8 vraddhn.u16 d29, q15, q9 vraddhn.u16 d30, q6, q10 vraddhn.u16 d31, q7, q11 fetch_mask_pixblock vqadd.u8 q14, q0, q14 vqadd.u8 q15, q1, q15 cache_preload 8, 8 pixman_composite_over_n_8888_8888_ca_process_pixblock_head vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! .endm .macro pixman_composite_over_n_8888_8888_ca_init add DUMMY, sp, #ARGS_STACK_OFFSET vpush {d8-d15} vld1.32 {d11[0]}, [DUMMY] vdup.8 d8, d11[0] vdup.8 d9, d11[1] vdup.8 d10, d11[2] vdup.8 d11, d11[3] .endm .macro pixman_composite_over_n_8888_8888_ca_cleanup vpop {d8-d15} .endm generate_composite_function \ pixman_composite_over_n_8888_8888_ca_asm_neon, 0, 32, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_n_8888_8888_ca_init, \ pixman_composite_over_n_8888_8888_ca_cleanup, \ pixman_composite_over_n_8888_8888_ca_process_pixblock_head, \ pixman_composite_over_n_8888_8888_ca_process_pixblock_tail, \ pixman_composite_over_n_8888_8888_ca_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_over_n_8888_0565_ca_process_pixblock_head /* * 'combine_mask_ca' replacement * * input: solid src (n) in {d8, d9, d10, d11} [B, G, R, A] * mask in {d24, d25, d26} [B, G, R] * output: updated src in {d0, d1, d2 } [B, G, R] * updated mask in {d24, d25, d26} [B, G, R] */ vmull.u8 q0, d24, d8 vmull.u8 q1, d25, d9 vmull.u8 q6, d26, d10 vmull.u8 q9, d11, d25 vmull.u8 q12, d11, d24 vmull.u8 q13, d11, d26 vrshr.u16 q8, q0, #8 vrshr.u16 q10, q1, #8 vrshr.u16 q11, q6, #8 vraddhn.u16 d0, q0, q8 vraddhn.u16 d1, q1, q10 vraddhn.u16 d2, q6, q11 vrshr.u16 q11, q12, #8 vrshr.u16 q8, q9, #8 vrshr.u16 q6, q13, #8 vraddhn.u16 d24, q12, q11 vraddhn.u16 d25, q9, q8 /* * convert 8 r5g6b5 pixel data from {d4, d5} to planar 8-bit format * and put data into d16 - blue, d17 - green, d18 - red */ vshrn.u16 d17, q2, #3 vshrn.u16 d18, q2, #8 vraddhn.u16 d26, q13, q6 vsli.u16 q2, q2, #5 vsri.u8 d18, d18, #5 vsri.u8 d17, d17, #6 /* * 'combine_over_ca' replacement * * output: updated dest in d16 - blue, d17 - green, d18 - red */ vmvn.8 q12, q12 vshrn.u16 d16, q2, #2 vmvn.8 d26, d26 vmull.u8 q6, d16, d24 vmull.u8 q7, d17, d25 vmull.u8 q11, d18, d26 .endm .macro pixman_composite_over_n_8888_0565_ca_process_pixblock_tail /* ... continue 'combine_over_ca' replacement */ vrshr.u16 q10, q6, #8 vrshr.u16 q14, q7, #8 vrshr.u16 q15, q11, #8 vraddhn.u16 d16, q10, q6 vraddhn.u16 d17, q14, q7 vraddhn.u16 d18, q15, q11 vqadd.u8 q8, q0, q8 vqadd.u8 d18, d2, d18 /* * convert the results in d16, d17, d18 to r5g6b5 and store * them into {d28, d29} */ vshll.u8 q14, d18, #8 vshll.u8 q10, d17, #8 vshll.u8 q15, d16, #8 vsri.u16 q14, q10, #5 vsri.u16 q14, q15, #11 .endm .macro pixman_composite_over_n_8888_0565_ca_process_pixblock_tail_head fetch_mask_pixblock vrshr.u16 q10, q6, #8 vrshr.u16 q14, q7, #8 vld1.16 {d4, d5}, [DST_R, :128]! vrshr.u16 q15, q11, #8 vraddhn.u16 d16, q10, q6 vraddhn.u16 d17, q14, q7 vraddhn.u16 d22, q15, q11 /* process_pixblock_head */ /* * 'combine_mask_ca' replacement * * input: solid src (n) in {d8, d9, d10, d11} [B, G, R, A] * mask in {d24, d25, d26} [B, G, R] * output: updated src in {d0, d1, d2 } [B, G, R] * updated mask in {d24, d25, d26} [B, G, R] */ vmull.u8 q6, d26, d10 vqadd.u8 q8, q0, q8 vmull.u8 q0, d24, d8 vqadd.u8 d22, d2, d22 vmull.u8 q1, d25, d9 /* * convert the result in d16, d17, d22 to r5g6b5 and store * it into {d28, d29} */ vshll.u8 q14, d22, #8 vshll.u8 q10, d17, #8 vshll.u8 q15, d16, #8 vmull.u8 q9, d11, d25 vsri.u16 q14, q10, #5 vmull.u8 q12, d11, d24 vmull.u8 q13, d11, d26 vsri.u16 q14, q15, #11 cache_preload 8, 8 vrshr.u16 q8, q0, #8 vrshr.u16 q10, q1, #8 vrshr.u16 q11, q6, #8 vraddhn.u16 d0, q0, q8 vraddhn.u16 d1, q1, q10 vraddhn.u16 d2, q6, q11 vrshr.u16 q11, q12, #8 vrshr.u16 q8, q9, #8 vrshr.u16 q6, q13, #8 vraddhn.u16 d24, q12, q11 vraddhn.u16 d25, q9, q8 /* * convert 8 r5g6b5 pixel data from {d4, d5} to planar * 8-bit format and put data into d16 - blue, d17 - green, * d18 - red */ vshrn.u16 d17, q2, #3 vshrn.u16 d18, q2, #8 vraddhn.u16 d26, q13, q6 vsli.u16 q2, q2, #5 vsri.u8 d17, d17, #6 vsri.u8 d18, d18, #5 /* * 'combine_over_ca' replacement * * output: updated dest in d16 - blue, d17 - green, d18 - red */ vmvn.8 q12, q12 vshrn.u16 d16, q2, #2 vmvn.8 d26, d26 vmull.u8 q7, d17, d25 vmull.u8 q6, d16, d24 vmull.u8 q11, d18, d26 vst1.16 {d28, d29}, [DST_W, :128]! .endm .macro pixman_composite_over_n_8888_0565_ca_init add DUMMY, sp, #ARGS_STACK_OFFSET vpush {d8-d15} vld1.32 {d11[0]}, [DUMMY] vdup.8 d8, d11[0] vdup.8 d9, d11[1] vdup.8 d10, d11[2] vdup.8 d11, d11[3] .endm .macro pixman_composite_over_n_8888_0565_ca_cleanup vpop {d8-d15} .endm generate_composite_function \ pixman_composite_over_n_8888_0565_ca_asm_neon, 0, 32, 16, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_n_8888_0565_ca_init, \ pixman_composite_over_n_8888_0565_ca_cleanup, \ pixman_composite_over_n_8888_0565_ca_process_pixblock_head, \ pixman_composite_over_n_8888_0565_ca_process_pixblock_tail, \ pixman_composite_over_n_8888_0565_ca_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_in_n_8_process_pixblock_head /* expecting source data in {d0, d1, d2, d3} */ /* and destination data in {d4, d5, d6, d7} */ vmull.u8 q8, d4, d3 vmull.u8 q9, d5, d3 vmull.u8 q10, d6, d3 vmull.u8 q11, d7, d3 .endm .macro pixman_composite_in_n_8_process_pixblock_tail vrshr.u16 q14, q8, #8 vrshr.u16 q15, q9, #8 vrshr.u16 q12, q10, #8 vrshr.u16 q13, q11, #8 vraddhn.u16 d28, q8, q14 vraddhn.u16 d29, q9, q15 vraddhn.u16 d30, q10, q12 vraddhn.u16 d31, q11, q13 .endm .macro pixman_composite_in_n_8_process_pixblock_tail_head pixman_composite_in_n_8_process_pixblock_tail vld1.8 {d4, d5, d6, d7}, [DST_R, :128]! cache_preload 32, 32 pixman_composite_in_n_8_process_pixblock_head vst1.8 {d28, d29, d30, d31}, [DST_W, :128]! .endm .macro pixman_composite_in_n_8_init add DUMMY, sp, #ARGS_STACK_OFFSET vld1.32 {d3[0]}, [DUMMY] vdup.8 d3, d3[3] .endm .macro pixman_composite_in_n_8_cleanup .endm generate_composite_function \ pixman_composite_in_n_8_asm_neon, 0, 0, 8, \ FLAG_DST_READWRITE, \ 32, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_in_n_8_init, \ pixman_composite_in_n_8_cleanup, \ pixman_composite_in_n_8_process_pixblock_head, \ pixman_composite_in_n_8_process_pixblock_tail, \ pixman_composite_in_n_8_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 24 /* mask_basereg */ .macro pixman_composite_add_n_8_8_process_pixblock_head /* expecting source data in {d8, d9, d10, d11} */ /* d8 - blue, d9 - green, d10 - red, d11 - alpha */ /* and destination data in {d4, d5, d6, d7} */ /* mask is in d24, d25, d26, d27 */ vmull.u8 q0, d24, d11 vmull.u8 q1, d25, d11 vmull.u8 q6, d26, d11 vmull.u8 q7, d27, d11 vrshr.u16 q10, q0, #8 vrshr.u16 q11, q1, #8 vrshr.u16 q12, q6, #8 vrshr.u16 q13, q7, #8 vraddhn.u16 d0, q0, q10 vraddhn.u16 d1, q1, q11 vraddhn.u16 d2, q6, q12 vraddhn.u16 d3, q7, q13 vqadd.u8 q14, q0, q2 vqadd.u8 q15, q1, q3 .endm .macro pixman_composite_add_n_8_8_process_pixblock_tail .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_add_n_8_8_process_pixblock_tail_head pixman_composite_add_n_8_8_process_pixblock_tail vst1.8 {d28, d29, d30, d31}, [DST_W, :128]! vld1.8 {d4, d5, d6, d7}, [DST_R, :128]! fetch_mask_pixblock cache_preload 32, 32 pixman_composite_add_n_8_8_process_pixblock_head .endm .macro pixman_composite_add_n_8_8_init add DUMMY, sp, #ARGS_STACK_OFFSET vpush {d8-d15} vld1.32 {d11[0]}, [DUMMY] vdup.8 d11, d11[3] .endm .macro pixman_composite_add_n_8_8_cleanup vpop {d8-d15} .endm generate_composite_function \ pixman_composite_add_n_8_8_asm_neon, 0, 8, 8, \ FLAG_DST_READWRITE, \ 32, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_add_n_8_8_init, \ pixman_composite_add_n_8_8_cleanup, \ pixman_composite_add_n_8_8_process_pixblock_head, \ pixman_composite_add_n_8_8_process_pixblock_tail, \ pixman_composite_add_n_8_8_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_add_8_8_8_process_pixblock_head /* expecting source data in {d0, d1, d2, d3} */ /* destination data in {d4, d5, d6, d7} */ /* mask in {d24, d25, d26, d27} */ vmull.u8 q8, d24, d0 vmull.u8 q9, d25, d1 vmull.u8 q10, d26, d2 vmull.u8 q11, d27, d3 vrshr.u16 q0, q8, #8 vrshr.u16 q1, q9, #8 vrshr.u16 q12, q10, #8 vrshr.u16 q13, q11, #8 vraddhn.u16 d0, q0, q8 vraddhn.u16 d1, q1, q9 vraddhn.u16 d2, q12, q10 vraddhn.u16 d3, q13, q11 vqadd.u8 q14, q0, q2 vqadd.u8 q15, q1, q3 .endm .macro pixman_composite_add_8_8_8_process_pixblock_tail .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_add_8_8_8_process_pixblock_tail_head pixman_composite_add_8_8_8_process_pixblock_tail vst1.8 {d28, d29, d30, d31}, [DST_W, :128]! vld1.8 {d4, d5, d6, d7}, [DST_R, :128]! fetch_mask_pixblock fetch_src_pixblock cache_preload 32, 32 pixman_composite_add_8_8_8_process_pixblock_head .endm .macro pixman_composite_add_8_8_8_init .endm .macro pixman_composite_add_8_8_8_cleanup .endm generate_composite_function \ pixman_composite_add_8_8_8_asm_neon, 8, 8, 8, \ FLAG_DST_READWRITE, \ 32, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_add_8_8_8_init, \ pixman_composite_add_8_8_8_cleanup, \ pixman_composite_add_8_8_8_process_pixblock_head, \ pixman_composite_add_8_8_8_process_pixblock_tail, \ pixman_composite_add_8_8_8_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_add_8888_8888_8888_process_pixblock_head /* expecting source data in {d0, d1, d2, d3} */ /* destination data in {d4, d5, d6, d7} */ /* mask in {d24, d25, d26, d27} */ vmull.u8 q8, d27, d0 vmull.u8 q9, d27, d1 vmull.u8 q10, d27, d2 vmull.u8 q11, d27, d3 /* 1 cycle bubble */ vrsra.u16 q8, q8, #8 vrsra.u16 q9, q9, #8 vrsra.u16 q10, q10, #8 vrsra.u16 q11, q11, #8 .endm .macro pixman_composite_add_8888_8888_8888_process_pixblock_tail /* 2 cycle bubble */ vrshrn.u16 d28, q8, #8 vrshrn.u16 d29, q9, #8 vrshrn.u16 d30, q10, #8 vrshrn.u16 d31, q11, #8 vqadd.u8 q14, q2, q14 /* 1 cycle bubble */ vqadd.u8 q15, q3, q15 .endm .macro pixman_composite_add_8888_8888_8888_process_pixblock_tail_head fetch_src_pixblock vrshrn.u16 d28, q8, #8 fetch_mask_pixblock vrshrn.u16 d29, q9, #8 vmull.u8 q8, d27, d0 vrshrn.u16 d30, q10, #8 vmull.u8 q9, d27, d1 vrshrn.u16 d31, q11, #8 vmull.u8 q10, d27, d2 vqadd.u8 q14, q2, q14 vmull.u8 q11, d27, d3 vqadd.u8 q15, q3, q15 vrsra.u16 q8, q8, #8 vld4.8 {d4, d5, d6, d7}, [DST_R, :128]! vrsra.u16 q9, q9, #8 vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! vrsra.u16 q10, q10, #8 cache_preload 8, 8 vrsra.u16 q11, q11, #8 .endm generate_composite_function \ pixman_composite_add_8888_8888_8888_asm_neon, 32, 32, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_add_8888_8888_8888_process_pixblock_head, \ pixman_composite_add_8888_8888_8888_process_pixblock_tail, \ pixman_composite_add_8888_8888_8888_process_pixblock_tail_head generate_composite_function_single_scanline \ pixman_composite_scanline_add_mask_asm_neon, 32, 32, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ default_init, \ default_cleanup, \ pixman_composite_add_8888_8888_8888_process_pixblock_head, \ pixman_composite_add_8888_8888_8888_process_pixblock_tail, \ pixman_composite_add_8888_8888_8888_process_pixblock_tail_head /******************************************************************************/ generate_composite_function \ pixman_composite_add_8888_8_8888_asm_neon, 32, 8, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_add_8888_8888_8888_process_pixblock_head, \ pixman_composite_add_8888_8888_8888_process_pixblock_tail, \ pixman_composite_add_8888_8888_8888_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 27 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_add_n_8_8888_init add DUMMY, sp, #ARGS_STACK_OFFSET vld1.32 {d3[0]}, [DUMMY] vdup.8 d0, d3[0] vdup.8 d1, d3[1] vdup.8 d2, d3[2] vdup.8 d3, d3[3] .endm .macro pixman_composite_add_n_8_8888_cleanup .endm generate_composite_function \ pixman_composite_add_n_8_8888_asm_neon, 0, 8, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_add_n_8_8888_init, \ pixman_composite_add_n_8_8888_cleanup, \ pixman_composite_add_8888_8888_8888_process_pixblock_head, \ pixman_composite_add_8888_8888_8888_process_pixblock_tail, \ pixman_composite_add_8888_8888_8888_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 27 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_add_8888_n_8888_init add DUMMY, sp, #(ARGS_STACK_OFFSET + 8) vld1.32 {d27[0]}, [DUMMY] vdup.8 d27, d27[3] .endm .macro pixman_composite_add_8888_n_8888_cleanup .endm generate_composite_function \ pixman_composite_add_8888_n_8888_asm_neon, 32, 0, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_add_8888_n_8888_init, \ pixman_composite_add_8888_n_8888_cleanup, \ pixman_composite_add_8888_8888_8888_process_pixblock_head, \ pixman_composite_add_8888_8888_8888_process_pixblock_tail, \ pixman_composite_add_8888_8888_8888_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 27 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_out_reverse_8888_n_8888_process_pixblock_head /* expecting source data in {d0, d1, d2, d3} */ /* destination data in {d4, d5, d6, d7} */ /* solid mask is in d15 */ /* 'in' */ vmull.u8 q8, d15, d3 vmull.u8 q6, d15, d2 vmull.u8 q5, d15, d1 vmull.u8 q4, d15, d0 vrshr.u16 q13, q8, #8 vrshr.u16 q12, q6, #8 vrshr.u16 q11, q5, #8 vrshr.u16 q10, q4, #8 vraddhn.u16 d3, q8, q13 vraddhn.u16 d2, q6, q12 vraddhn.u16 d1, q5, q11 vraddhn.u16 d0, q4, q10 vmvn.8 d24, d3 /* get inverted alpha */ /* now do alpha blending */ vmull.u8 q8, d24, d4 vmull.u8 q9, d24, d5 vmull.u8 q10, d24, d6 vmull.u8 q11, d24, d7 .endm .macro pixman_composite_out_reverse_8888_n_8888_process_pixblock_tail vrshr.u16 q14, q8, #8 vrshr.u16 q15, q9, #8 vrshr.u16 q12, q10, #8 vrshr.u16 q13, q11, #8 vraddhn.u16 d28, q14, q8 vraddhn.u16 d29, q15, q9 vraddhn.u16 d30, q12, q10 vraddhn.u16 d31, q13, q11 .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_out_reverse_8888_8888_8888_process_pixblock_tail_head vld4.8 {d4, d5, d6, d7}, [DST_R, :128]! pixman_composite_out_reverse_8888_n_8888_process_pixblock_tail fetch_src_pixblock cache_preload 8, 8 fetch_mask_pixblock pixman_composite_out_reverse_8888_n_8888_process_pixblock_head vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! .endm generate_composite_function_single_scanline \ pixman_composite_scanline_out_reverse_mask_asm_neon, 32, 32, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ default_init_need_all_regs, \ default_cleanup_need_all_regs, \ pixman_composite_out_reverse_8888_n_8888_process_pixblock_head, \ pixman_composite_out_reverse_8888_n_8888_process_pixblock_tail, \ pixman_composite_out_reverse_8888_8888_8888_process_pixblock_tail_head \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 12 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_over_8888_n_8888_process_pixblock_head pixman_composite_out_reverse_8888_n_8888_process_pixblock_head .endm .macro pixman_composite_over_8888_n_8888_process_pixblock_tail pixman_composite_out_reverse_8888_n_8888_process_pixblock_tail vqadd.u8 q14, q0, q14 vqadd.u8 q15, q1, q15 .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_over_8888_n_8888_process_pixblock_tail_head vld4.8 {d4, d5, d6, d7}, [DST_R, :128]! pixman_composite_over_8888_n_8888_process_pixblock_tail fetch_src_pixblock cache_preload 8, 8 pixman_composite_over_8888_n_8888_process_pixblock_head vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! .endm .macro pixman_composite_over_8888_n_8888_init add DUMMY, sp, #48 vpush {d8-d15} vld1.32 {d15[0]}, [DUMMY] vdup.8 d15, d15[3] .endm .macro pixman_composite_over_8888_n_8888_cleanup vpop {d8-d15} .endm generate_composite_function \ pixman_composite_over_8888_n_8888_asm_neon, 32, 0, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_8888_n_8888_init, \ pixman_composite_over_8888_n_8888_cleanup, \ pixman_composite_over_8888_n_8888_process_pixblock_head, \ pixman_composite_over_8888_n_8888_process_pixblock_tail, \ pixman_composite_over_8888_n_8888_process_pixblock_tail_head /******************************************************************************/ /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_over_8888_8888_8888_process_pixblock_tail_head vld4.8 {d4, d5, d6, d7}, [DST_R, :128]! pixman_composite_over_8888_n_8888_process_pixblock_tail fetch_src_pixblock cache_preload 8, 8 fetch_mask_pixblock pixman_composite_over_8888_n_8888_process_pixblock_head vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! .endm generate_composite_function \ pixman_composite_over_8888_8888_8888_asm_neon, 32, 32, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ default_init_need_all_regs, \ default_cleanup_need_all_regs, \ pixman_composite_over_8888_n_8888_process_pixblock_head, \ pixman_composite_over_8888_n_8888_process_pixblock_tail, \ pixman_composite_over_8888_8888_8888_process_pixblock_tail_head \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 12 /* mask_basereg */ generate_composite_function_single_scanline \ pixman_composite_scanline_over_mask_asm_neon, 32, 32, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ default_init_need_all_regs, \ default_cleanup_need_all_regs, \ pixman_composite_over_8888_n_8888_process_pixblock_head, \ pixman_composite_over_8888_n_8888_process_pixblock_tail, \ pixman_composite_over_8888_8888_8888_process_pixblock_tail_head \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 12 /* mask_basereg */ /******************************************************************************/ /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_over_8888_8_8888_process_pixblock_tail_head vld4.8 {d4, d5, d6, d7}, [DST_R, :128]! pixman_composite_over_8888_n_8888_process_pixblock_tail fetch_src_pixblock cache_preload 8, 8 fetch_mask_pixblock pixman_composite_over_8888_n_8888_process_pixblock_head vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! .endm generate_composite_function \ pixman_composite_over_8888_8_8888_asm_neon, 32, 8, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ default_init_need_all_regs, \ default_cleanup_need_all_regs, \ pixman_composite_over_8888_n_8888_process_pixblock_head, \ pixman_composite_over_8888_n_8888_process_pixblock_tail, \ pixman_composite_over_8888_8_8888_process_pixblock_tail_head \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 15 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_0888_0888_process_pixblock_head .endm .macro pixman_composite_src_0888_0888_process_pixblock_tail .endm .macro pixman_composite_src_0888_0888_process_pixblock_tail_head vst3.8 {d0, d1, d2}, [DST_W]! fetch_src_pixblock cache_preload 8, 8 .endm generate_composite_function \ pixman_composite_src_0888_0888_asm_neon, 24, 0, 24, \ FLAG_DST_WRITEONLY, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_src_0888_0888_process_pixblock_head, \ pixman_composite_src_0888_0888_process_pixblock_tail, \ pixman_composite_src_0888_0888_process_pixblock_tail_head, \ 0, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_0888_8888_rev_process_pixblock_head vswp d0, d2 .endm .macro pixman_composite_src_0888_8888_rev_process_pixblock_tail .endm .macro pixman_composite_src_0888_8888_rev_process_pixblock_tail_head vst4.8 {d0, d1, d2, d3}, [DST_W]! fetch_src_pixblock vswp d0, d2 cache_preload 8, 8 .endm .macro pixman_composite_src_0888_8888_rev_init veor d3, d3, d3 .endm generate_composite_function \ pixman_composite_src_0888_8888_rev_asm_neon, 24, 0, 32, \ FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ pixman_composite_src_0888_8888_rev_init, \ default_cleanup, \ pixman_composite_src_0888_8888_rev_process_pixblock_head, \ pixman_composite_src_0888_8888_rev_process_pixblock_tail, \ pixman_composite_src_0888_8888_rev_process_pixblock_tail_head, \ 0, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_0888_0565_rev_process_pixblock_head vshll.u8 q8, d1, #8 vshll.u8 q9, d2, #8 .endm .macro pixman_composite_src_0888_0565_rev_process_pixblock_tail vshll.u8 q14, d0, #8 vsri.u16 q14, q8, #5 vsri.u16 q14, q9, #11 .endm .macro pixman_composite_src_0888_0565_rev_process_pixblock_tail_head vshll.u8 q14, d0, #8 fetch_src_pixblock vsri.u16 q14, q8, #5 vsri.u16 q14, q9, #11 vshll.u8 q8, d1, #8 vst1.16 {d28, d29}, [DST_W, :128]! vshll.u8 q9, d2, #8 .endm generate_composite_function \ pixman_composite_src_0888_0565_rev_asm_neon, 24, 0, 16, \ FLAG_DST_WRITEONLY, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_src_0888_0565_rev_process_pixblock_head, \ pixman_composite_src_0888_0565_rev_process_pixblock_tail, \ pixman_composite_src_0888_0565_rev_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_pixbuf_8888_process_pixblock_head vmull.u8 q8, d3, d0 vmull.u8 q9, d3, d1 vmull.u8 q10, d3, d2 .endm .macro pixman_composite_src_pixbuf_8888_process_pixblock_tail vrshr.u16 q11, q8, #8 vswp d3, d31 vrshr.u16 q12, q9, #8 vrshr.u16 q13, q10, #8 vraddhn.u16 d30, q11, q8 vraddhn.u16 d29, q12, q9 vraddhn.u16 d28, q13, q10 .endm .macro pixman_composite_src_pixbuf_8888_process_pixblock_tail_head vrshr.u16 q11, q8, #8 vswp d3, d31 vrshr.u16 q12, q9, #8 vrshr.u16 q13, q10, #8 fetch_src_pixblock vraddhn.u16 d30, q11, q8 PF add, PF_X, PF_X, #8 PF tst, PF_CTL, #0xF PF addne, PF_X, PF_X, #8 PF subne, PF_CTL, PF_CTL, #1 vraddhn.u16 d29, q12, q9 vraddhn.u16 d28, q13, q10 vmull.u8 q8, d3, d0 vmull.u8 q9, d3, d1 vmull.u8 q10, d3, d2 vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! PF cmp, PF_X, ORIG_W PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift] PF subge, PF_X, PF_X, ORIG_W PF subsge, PF_CTL, PF_CTL, #0x10 PF ldrbge, DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]! .endm generate_composite_function \ pixman_composite_src_pixbuf_8888_asm_neon, 32, 0, 32, \ FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_src_pixbuf_8888_process_pixblock_head, \ pixman_composite_src_pixbuf_8888_process_pixblock_tail, \ pixman_composite_src_pixbuf_8888_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_rpixbuf_8888_process_pixblock_head vmull.u8 q8, d3, d0 vmull.u8 q9, d3, d1 vmull.u8 q10, d3, d2 .endm .macro pixman_composite_src_rpixbuf_8888_process_pixblock_tail vrshr.u16 q11, q8, #8 vswp d3, d31 vrshr.u16 q12, q9, #8 vrshr.u16 q13, q10, #8 vraddhn.u16 d28, q11, q8 vraddhn.u16 d29, q12, q9 vraddhn.u16 d30, q13, q10 .endm .macro pixman_composite_src_rpixbuf_8888_process_pixblock_tail_head vrshr.u16 q11, q8, #8 vswp d3, d31 vrshr.u16 q12, q9, #8 vrshr.u16 q13, q10, #8 fetch_src_pixblock vraddhn.u16 d28, q11, q8 PF add, PF_X, PF_X, #8 PF tst, PF_CTL, #0xF PF addne, PF_X, PF_X, #8 PF subne, PF_CTL, PF_CTL, #1 vraddhn.u16 d29, q12, q9 vraddhn.u16 d30, q13, q10 vmull.u8 q8, d3, d0 vmull.u8 q9, d3, d1 vmull.u8 q10, d3, d2 vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! PF cmp, PF_X, ORIG_W PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift] PF subge, PF_X, PF_X, ORIG_W PF subsge, PF_CTL, PF_CTL, #0x10 PF ldrbge, DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]! .endm generate_composite_function \ pixman_composite_src_rpixbuf_8888_asm_neon, 32, 0, 32, \ FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_src_rpixbuf_8888_process_pixblock_head, \ pixman_composite_src_rpixbuf_8888_process_pixblock_tail, \ pixman_composite_src_rpixbuf_8888_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_over_0565_8_0565_process_pixblock_head /* mask is in d15 */ convert_0565_to_x888 q4, d2, d1, d0 convert_0565_to_x888 q5, d6, d5, d4 /* source pixel data is in {d0, d1, d2, XX} */ /* destination pixel data is in {d4, d5, d6, XX} */ vmvn.8 d7, d15 vmull.u8 q6, d15, d2 vmull.u8 q5, d15, d1 vmull.u8 q4, d15, d0 vmull.u8 q8, d7, d4 vmull.u8 q9, d7, d5 vmull.u8 q13, d7, d6 vrshr.u16 q12, q6, #8 vrshr.u16 q11, q5, #8 vrshr.u16 q10, q4, #8 vraddhn.u16 d2, q6, q12 vraddhn.u16 d1, q5, q11 vraddhn.u16 d0, q4, q10 .endm .macro pixman_composite_over_0565_8_0565_process_pixblock_tail vrshr.u16 q14, q8, #8 vrshr.u16 q15, q9, #8 vrshr.u16 q12, q13, #8 vraddhn.u16 d28, q14, q8 vraddhn.u16 d29, q15, q9 vraddhn.u16 d30, q12, q13 vqadd.u8 q0, q0, q14 vqadd.u8 q1, q1, q15 /* 32bpp result is in {d0, d1, d2, XX} */ convert_8888_to_0565 d2, d1, d0, q14, q15, q3 .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_over_0565_8_0565_process_pixblock_tail_head fetch_mask_pixblock pixman_composite_over_0565_8_0565_process_pixblock_tail fetch_src_pixblock vld1.16 {d10, d11}, [DST_R, :128]! cache_preload 8, 8 pixman_composite_over_0565_8_0565_process_pixblock_head vst1.16 {d28, d29}, [DST_W, :128]! .endm generate_composite_function \ pixman_composite_over_0565_8_0565_asm_neon, 16, 8, 16, \ FLAG_DST_READWRITE, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ default_init_need_all_regs, \ default_cleanup_need_all_regs, \ pixman_composite_over_0565_8_0565_process_pixblock_head, \ pixman_composite_over_0565_8_0565_process_pixblock_tail, \ pixman_composite_over_0565_8_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 10, /* dst_r_basereg */ \ 8, /* src_basereg */ \ 15 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_over_0565_n_0565_init add DUMMY, sp, #(ARGS_STACK_OFFSET + 8) vpush {d8-d15} vld1.32 {d15[0]}, [DUMMY] vdup.8 d15, d15[3] .endm .macro pixman_composite_over_0565_n_0565_cleanup vpop {d8-d15} .endm generate_composite_function \ pixman_composite_over_0565_n_0565_asm_neon, 16, 0, 16, \ FLAG_DST_READWRITE, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_0565_n_0565_init, \ pixman_composite_over_0565_n_0565_cleanup, \ pixman_composite_over_0565_8_0565_process_pixblock_head, \ pixman_composite_over_0565_8_0565_process_pixblock_tail, \ pixman_composite_over_0565_8_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 10, /* dst_r_basereg */ \ 8, /* src_basereg */ \ 15 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_add_0565_8_0565_process_pixblock_head /* mask is in d15 */ convert_0565_to_x888 q4, d2, d1, d0 convert_0565_to_x888 q5, d6, d5, d4 /* source pixel data is in {d0, d1, d2, XX} */ /* destination pixel data is in {d4, d5, d6, XX} */ vmull.u8 q6, d15, d2 vmull.u8 q5, d15, d1 vmull.u8 q4, d15, d0 vrshr.u16 q12, q6, #8 vrshr.u16 q11, q5, #8 vrshr.u16 q10, q4, #8 vraddhn.u16 d2, q6, q12 vraddhn.u16 d1, q5, q11 vraddhn.u16 d0, q4, q10 .endm .macro pixman_composite_add_0565_8_0565_process_pixblock_tail vqadd.u8 q0, q0, q2 vqadd.u8 q1, q1, q3 /* 32bpp result is in {d0, d1, d2, XX} */ convert_8888_to_0565 d2, d1, d0, q14, q15, q3 .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_add_0565_8_0565_process_pixblock_tail_head fetch_mask_pixblock pixman_composite_add_0565_8_0565_process_pixblock_tail fetch_src_pixblock vld1.16 {d10, d11}, [DST_R, :128]! cache_preload 8, 8 pixman_composite_add_0565_8_0565_process_pixblock_head vst1.16 {d28, d29}, [DST_W, :128]! .endm generate_composite_function \ pixman_composite_add_0565_8_0565_asm_neon, 16, 8, 16, \ FLAG_DST_READWRITE, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ default_init_need_all_regs, \ default_cleanup_need_all_regs, \ pixman_composite_add_0565_8_0565_process_pixblock_head, \ pixman_composite_add_0565_8_0565_process_pixblock_tail, \ pixman_composite_add_0565_8_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 10, /* dst_r_basereg */ \ 8, /* src_basereg */ \ 15 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_out_reverse_8_0565_process_pixblock_head /* mask is in d15 */ convert_0565_to_x888 q5, d6, d5, d4 /* destination pixel data is in {d4, d5, d6, xx} */ vmvn.8 d24, d15 /* get inverted alpha */ /* now do alpha blending */ vmull.u8 q8, d24, d4 vmull.u8 q9, d24, d5 vmull.u8 q10, d24, d6 .endm .macro pixman_composite_out_reverse_8_0565_process_pixblock_tail vrshr.u16 q14, q8, #8 vrshr.u16 q15, q9, #8 vrshr.u16 q12, q10, #8 vraddhn.u16 d0, q14, q8 vraddhn.u16 d1, q15, q9 vraddhn.u16 d2, q12, q10 /* 32bpp result is in {d0, d1, d2, XX} */ convert_8888_to_0565 d2, d1, d0, q14, q15, q3 .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_out_reverse_8_0565_process_pixblock_tail_head fetch_src_pixblock pixman_composite_out_reverse_8_0565_process_pixblock_tail vld1.16 {d10, d11}, [DST_R, :128]! cache_preload 8, 8 pixman_composite_out_reverse_8_0565_process_pixblock_head vst1.16 {d28, d29}, [DST_W, :128]! .endm generate_composite_function \ pixman_composite_out_reverse_8_0565_asm_neon, 8, 0, 16, \ FLAG_DST_READWRITE, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ default_init_need_all_regs, \ default_cleanup_need_all_regs, \ pixman_composite_out_reverse_8_0565_process_pixblock_head, \ pixman_composite_out_reverse_8_0565_process_pixblock_tail, \ pixman_composite_out_reverse_8_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 10, /* dst_r_basereg */ \ 15, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_out_reverse_8_8888_process_pixblock_head /* src is in d0 */ /* destination pixel data is in {d4, d5, d6, d7} */ vmvn.8 d1, d0 /* get inverted alpha */ /* now do alpha blending */ vmull.u8 q8, d1, d4 vmull.u8 q9, d1, d5 vmull.u8 q10, d1, d6 vmull.u8 q11, d1, d7 .endm .macro pixman_composite_out_reverse_8_8888_process_pixblock_tail vrshr.u16 q14, q8, #8 vrshr.u16 q15, q9, #8 vrshr.u16 q12, q10, #8 vrshr.u16 q13, q11, #8 vraddhn.u16 d28, q14, q8 vraddhn.u16 d29, q15, q9 vraddhn.u16 d30, q12, q10 vraddhn.u16 d31, q13, q11 /* 32bpp result is in {d28, d29, d30, d31} */ .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_out_reverse_8_8888_process_pixblock_tail_head fetch_src_pixblock pixman_composite_out_reverse_8_8888_process_pixblock_tail vld4.8 {d4, d5, d6, d7}, [DST_R, :128]! cache_preload 8, 8 pixman_composite_out_reverse_8_8888_process_pixblock_head vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! .endm generate_composite_function \ pixman_composite_out_reverse_8_8888_asm_neon, 8, 0, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_out_reverse_8_8888_process_pixblock_head, \ pixman_composite_out_reverse_8_8888_process_pixblock_tail, \ pixman_composite_out_reverse_8_8888_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ generate_composite_function_nearest_scanline \ pixman_scaled_nearest_scanline_8888_8888_OVER_asm_neon, 32, 0, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ default_init, \ default_cleanup, \ pixman_composite_over_8888_8888_process_pixblock_head, \ pixman_composite_over_8888_8888_process_pixblock_tail, \ pixman_composite_over_8888_8888_process_pixblock_tail_head generate_composite_function_nearest_scanline \ pixman_scaled_nearest_scanline_8888_0565_OVER_asm_neon, 32, 0, 16, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ default_init, \ default_cleanup, \ pixman_composite_over_8888_0565_process_pixblock_head, \ pixman_composite_over_8888_0565_process_pixblock_tail, \ pixman_composite_over_8888_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 24 /* mask_basereg */ generate_composite_function_nearest_scanline \ pixman_scaled_nearest_scanline_8888_0565_SRC_asm_neon, 32, 0, 16, \ FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ default_init, \ default_cleanup, \ pixman_composite_src_8888_0565_process_pixblock_head, \ pixman_composite_src_8888_0565_process_pixblock_tail, \ pixman_composite_src_8888_0565_process_pixblock_tail_head generate_composite_function_nearest_scanline \ pixman_scaled_nearest_scanline_0565_8888_SRC_asm_neon, 16, 0, 32, \ FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ default_init, \ default_cleanup, \ pixman_composite_src_0565_8888_process_pixblock_head, \ pixman_composite_src_0565_8888_process_pixblock_tail, \ pixman_composite_src_0565_8888_process_pixblock_tail_head generate_composite_function_nearest_scanline \ pixman_scaled_nearest_scanline_8888_8_0565_OVER_asm_neon, 32, 8, 16, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ default_init_need_all_regs, \ default_cleanup_need_all_regs, \ pixman_composite_over_8888_8_0565_process_pixblock_head, \ pixman_composite_over_8888_8_0565_process_pixblock_tail, \ pixman_composite_over_8888_8_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 8, /* src_basereg */ \ 24 /* mask_basereg */ generate_composite_function_nearest_scanline \ pixman_scaled_nearest_scanline_0565_8_0565_OVER_asm_neon, 16, 8, 16, \ FLAG_DST_READWRITE, \ 8, /* number of pixels, processed in a single block */ \ default_init_need_all_regs, \ default_cleanup_need_all_regs, \ pixman_composite_over_0565_8_0565_process_pixblock_head, \ pixman_composite_over_0565_8_0565_process_pixblock_tail, \ pixman_composite_over_0565_8_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 10, /* dst_r_basereg */ \ 8, /* src_basereg */ \ 15 /* mask_basereg */ /******************************************************************************/ /* * Bilinear scaling support code which tries to provide pixel fetching, color * format conversion, and interpolation as separate macros which can be used * as the basic building blocks for constructing bilinear scanline functions. */ .macro bilinear_load_8888 reg1, reg2, tmp mov TMP1, X, asr #16 add X, X, UX add TMP1, TOP, TMP1, asl #2 vld1.32 {\reg1}, [TMP1], STRIDE vld1.32 {\reg2}, [TMP1] .endm .macro bilinear_load_0565 reg1, reg2, tmp mov TMP1, X, asr #16 add X, X, UX add TMP1, TOP, TMP1, asl #1 vld1.32 {\reg2[0]}, [TMP1], STRIDE vld1.32 {\reg2[1]}, [TMP1] convert_four_0565_to_x888_packed \reg2, \reg1, \reg2, \tmp .endm .macro bilinear_load_and_vertical_interpolate_two_8888 \ acc1, acc2, reg1, reg2, reg3, reg4, tmp1, tmp2 bilinear_load_8888 \reg1, \reg2, \tmp1 vmull.u8 \acc1, \reg1, d28 vmlal.u8 \acc1, \reg2, d29 bilinear_load_8888 \reg3, \reg4, \tmp2 vmull.u8 \acc2, \reg3, d28 vmlal.u8 \acc2, \reg4, d29 .endm .macro bilinear_load_and_vertical_interpolate_four_8888 \ xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi \ yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi bilinear_load_and_vertical_interpolate_two_8888 \ \xacc1, \xacc2, \xreg1, \xreg2, \xreg3, \xreg4, \xacc2lo, \xacc2hi bilinear_load_and_vertical_interpolate_two_8888 \ \yacc1, \yacc2, \yreg1, \yreg2, \yreg3, \yreg4, \yacc2lo, \yacc2hi .endm .macro bilinear_load_and_vertical_interpolate_two_0565 \ acc1, acc2, reg1, reg2, reg3, reg4, acc2lo, acc2hi mov TMP1, X, asr #16 add X, X, UX add TMP1, TOP, TMP1, asl #1 mov TMP2, X, asr #16 add X, X, UX add TMP2, TOP, TMP2, asl #1 vld1.32 {\acc2lo[0]}, [TMP1], STRIDE vld1.32 {\acc2hi[0]}, [TMP2], STRIDE vld1.32 {\acc2lo[1]}, [TMP1] vld1.32 {\acc2hi[1]}, [TMP2] convert_0565_to_x888 \acc2, \reg3, \reg2, \reg1 vzip.u8 \reg1, \reg3 vzip.u8 \reg2, \reg4 vzip.u8 \reg3, \reg4 vzip.u8 \reg1, \reg2 vmull.u8 \acc1, \reg1, d28 vmlal.u8 \acc1, \reg2, d29 vmull.u8 \acc2, \reg3, d28 vmlal.u8 \acc2, \reg4, d29 .endm .macro bilinear_load_and_vertical_interpolate_four_0565 \ xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi \ yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi mov TMP1, X, asr #16 add X, X, UX add TMP1, TOP, TMP1, asl #1 mov TMP2, X, asr #16 add X, X, UX add TMP2, TOP, TMP2, asl #1 vld1.32 {\xacc2lo[0]}, [TMP1], STRIDE vld1.32 {\xacc2hi[0]}, [TMP2], STRIDE vld1.32 {\xacc2lo[1]}, [TMP1] vld1.32 {\xacc2hi[1]}, [TMP2] convert_0565_to_x888 \xacc2, \xreg3, \xreg2, \xreg1 mov TMP1, X, asr #16 add X, X, UX add TMP1, TOP, TMP1, asl #1 mov TMP2, X, asr #16 add X, X, UX add TMP2, TOP, TMP2, asl #1 vld1.32 {\yacc2lo[0]}, [TMP1], STRIDE vzip.u8 \xreg1, \xreg3 vld1.32 {\yacc2hi[0]}, [TMP2], STRIDE vzip.u8 \xreg2, \xreg4 vld1.32 {\yacc2lo[1]}, [TMP1] vzip.u8 \xreg3, \xreg4 vld1.32 {\yacc2hi[1]}, [TMP2] vzip.u8 \xreg1, \xreg2 convert_0565_to_x888 \yacc2, \yreg3, \yreg2, \yreg1 vmull.u8 \xacc1, \xreg1, d28 vzip.u8 \yreg1, \yreg3 vmlal.u8 \xacc1, \xreg2, d29 vzip.u8 \yreg2, \yreg4 vmull.u8 \xacc2, \xreg3, d28 vzip.u8 \yreg3, \yreg4 vmlal.u8 \xacc2, \xreg4, d29 vzip.u8 \yreg1, \yreg2 vmull.u8 \yacc1, \yreg1, d28 vmlal.u8 \yacc1, \yreg2, d29 vmull.u8 \yacc2, \yreg3, d28 vmlal.u8 \yacc2, \yreg4, d29 .endm .macro bilinear_store_8888 numpix, tmp1, tmp2 .if \numpix == 4 vst1.32 {d0, d1}, [OUT, :128]! .elseif \numpix == 2 vst1.32 {d0}, [OUT, :64]! .elseif \numpix == 1 vst1.32 {d0[0]}, [OUT, :32]! .else .error bilinear_store_8888 \numpix is unsupported .endif .endm .macro bilinear_store_0565 numpix, tmp1, tmp2 vuzp.u8 d0, d1 vuzp.u8 d2, d3 vuzp.u8 d1, d3 vuzp.u8 d0, d2 convert_8888_to_0565 d2, d1, d0, q1, \tmp1, \tmp2 .if \numpix == 4 vst1.16 {d2}, [OUT, :64]! .elseif \numpix == 2 vst1.32 {d2[0]}, [OUT, :32]! .elseif \numpix == 1 vst1.16 {d2[0]}, [OUT, :16]! .else .error bilinear_store_0565 \numpix is unsupported .endif .endm .macro bilinear_interpolate_last_pixel src_fmt, dst_fmt bilinear_load_\()\src_fmt d0, d1, d2 vmull.u8 q1, d0, d28 vmlal.u8 q1, d1, d29 /* 5 cycles bubble */ vshll.u16 q0, d2, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q0, d2, d30 vmlal.u16 q0, d3, d30 /* 5 cycles bubble */ vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) /* 3 cycles bubble */ vmovn.u16 d0, q0 /* 1 cycle bubble */ bilinear_store_\()\dst_fmt 1, q2, q3 .endm .macro bilinear_interpolate_two_pixels src_fmt, dst_fmt bilinear_load_and_vertical_interpolate_two_\()\src_fmt \ q1, q11, d0, d1, d20, d21, d22, d23 vshll.u16 q0, d2, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q0, d2, d30 vmlal.u16 q0, d3, d30 vshll.u16 q10, d22, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q10, d22, d31 vmlal.u16 q10, d23, d31 vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d1, q10, #(2 * BILINEAR_INTERPOLATION_BITS) vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vadd.u16 q12, q12, q13 vmovn.u16 d0, q0 bilinear_store_\()\dst_fmt 2, q2, q3 .endm .macro bilinear_interpolate_four_pixels src_fmt, dst_fmt bilinear_load_and_vertical_interpolate_four_\()\src_fmt \ q1, q11, d0, d1, d20, d21, d22, d23 \ q3, q9, d4, d5, d16, d17, d18, d19 pld [TMP1, PF_OFFS] sub TMP1, TMP1, STRIDE vshll.u16 q0, d2, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q0, d2, d30 vmlal.u16 q0, d3, d30 vshll.u16 q10, d22, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q10, d22, d31 vmlal.u16 q10, d23, d31 vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vshll.u16 q2, d6, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q2, d6, d30 vmlal.u16 q2, d7, d30 vshll.u16 q8, d18, #BILINEAR_INTERPOLATION_BITS pld [TMP2, PF_OFFS] vmlsl.u16 q8, d18, d31 vmlal.u16 q8, d19, d31 vadd.u16 q12, q12, q13 vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d1, q10, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d5, q8, #(2 * BILINEAR_INTERPOLATION_BITS) vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vmovn.u16 d0, q0 vmovn.u16 d1, q2 vadd.u16 q12, q12, q13 bilinear_store_\()\dst_fmt 4, q2, q3 .endm .macro bilinear_interpolate_four_pixels_head src_fmt, dst_fmt .ifdef have_bilinear_interpolate_four_pixels_\()\src_fmt\()_\()\dst_fmt bilinear_interpolate_four_pixels_\()\src_fmt\()_\()\dst_fmt\()_head .else bilinear_interpolate_four_pixels \src_fmt, \dst_fmt .endif .endm .macro bilinear_interpolate_four_pixels_tail src_fmt, dst_fmt .ifdef have_bilinear_interpolate_four_pixels_\()\src_fmt\()_\()\dst_fmt bilinear_interpolate_four_pixels_\()\src_fmt\()_\()\dst_fmt\()_tail .endif .endm .macro bilinear_interpolate_four_pixels_tail_head src_fmt, dst_fmt .ifdef have_bilinear_interpolate_four_pixels_\()\src_fmt\()_\()\dst_fmt bilinear_interpolate_four_pixels_\()\src_fmt\()_\()\dst_fmt\()_tail_head .else bilinear_interpolate_four_pixels \src_fmt, \dst_fmt .endif .endm .macro bilinear_interpolate_eight_pixels_head src_fmt, dst_fmt .ifdef have_bilinear_interpolate_eight_pixels_\()\src_fmt\()_\()\dst_fmt bilinear_interpolate_eight_pixels_\()\src_fmt\()_\()\dst_fmt\()_head .else bilinear_interpolate_four_pixels_head \src_fmt, \dst_fmt bilinear_interpolate_four_pixels_tail_head \src_fmt, \dst_fmt .endif .endm .macro bilinear_interpolate_eight_pixels_tail src_fmt, dst_fmt .ifdef have_bilinear_interpolate_eight_pixels_\()\src_fmt\()_\()\dst_fmt bilinear_interpolate_eight_pixels_\()\src_fmt\()_\()\dst_fmt\()_tail .else bilinear_interpolate_four_pixels_tail \src_fmt, \dst_fmt .endif .endm .macro bilinear_interpolate_eight_pixels_tail_head src_fmt, dst_fmt .ifdef have_bilinear_interpolate_eight_pixels_\()\src_fmt\()_\()\dst_fmt bilinear_interpolate_eight_pixels_\()\src_fmt\()_\()\dst_fmt\()_tail_head .else bilinear_interpolate_four_pixels_tail_head \src_fmt, \dst_fmt bilinear_interpolate_four_pixels_tail_head \src_fmt, \dst_fmt .endif .endm .set BILINEAR_FLAG_UNROLL_4, 0 .set BILINEAR_FLAG_UNROLL_8, 1 .set BILINEAR_FLAG_USE_ALL_NEON_REGS, 2 /* * Main template macro for generating NEON optimized bilinear scanline * functions. * * Bilinear scanline scaler macro template uses the following arguments: * fname - name of the function to generate * src_fmt - source color format (8888 or 0565) * dst_fmt - destination color format (8888 or 0565) * bpp_shift - (1 << bpp_shift) is the size of source pixel in bytes * prefetch_distance - prefetch in the source image by that many * pixels ahead */ .macro generate_bilinear_scanline_func fname, src_fmt, dst_fmt, \ src_bpp_shift, dst_bpp_shift, \ prefetch_distance, flags pixman_asm_function \fname OUT .req r0 TOP .req r1 BOTTOM .req r2 WT .req r3 WB .req r4 X .req r5 UX .req r6 WIDTH .req ip TMP1 .req r3 TMP2 .req r4 PF_OFFS .req r7 TMP3 .req r8 TMP4 .req r9 STRIDE .req r2 mov ip, sp push {r4, r5, r6, r7, r8, r9} mov PF_OFFS, #\prefetch_distance ldmia ip, {WB, X, UX, WIDTH} mul PF_OFFS, PF_OFFS, UX .if ((\flags) & BILINEAR_FLAG_USE_ALL_NEON_REGS) != 0 vpush {d8-d15} .endif sub STRIDE, BOTTOM, TOP .unreq BOTTOM cmp WIDTH, #0 ble 3f vdup.u16 q12, X vdup.u16 q13, UX vdup.u8 d28, WT vdup.u8 d29, WB vadd.u16 d25, d25, d26 /* ensure good destination alignment */ cmp WIDTH, #1 blt 0f tst OUT, #(1 << \dst_bpp_shift) beq 0f vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vadd.u16 q12, q12, q13 bilinear_interpolate_last_pixel \src_fmt, \dst_fmt sub WIDTH, WIDTH, #1 0: vadd.u16 q13, q13, q13 vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vadd.u16 q12, q12, q13 cmp WIDTH, #2 blt 0f tst OUT, #(1 << (\dst_bpp_shift + 1)) beq 0f bilinear_interpolate_two_pixels \src_fmt, \dst_fmt sub WIDTH, WIDTH, #2 0: .if ((\flags) & BILINEAR_FLAG_UNROLL_8) != 0 /*********** 8 pixels per iteration *****************/ cmp WIDTH, #4 blt 0f tst OUT, #(1 << (\dst_bpp_shift + 2)) beq 0f bilinear_interpolate_four_pixels \src_fmt, \dst_fmt sub WIDTH, WIDTH, #4 0: subs WIDTH, WIDTH, #8 blt 1f mov PF_OFFS, PF_OFFS, asr #(16 - src_bpp_shift) bilinear_interpolate_eight_pixels_head \src_fmt, \dst_fmt subs WIDTH, WIDTH, #8 blt 5f 0: bilinear_interpolate_eight_pixels_tail_head \src_fmt, \dst_fmt subs WIDTH, WIDTH, #8 bge 0b 5: bilinear_interpolate_eight_pixels_tail \src_fmt, \dst_fmt 1: tst WIDTH, #4 beq 2f bilinear_interpolate_four_pixels \src_fmt, \dst_fmt 2: .else /*********** 4 pixels per iteration *****************/ subs WIDTH, WIDTH, #4 blt 1f mov PF_OFFS, PF_OFFS, asr #(16 - src_bpp_shift) bilinear_interpolate_four_pixels_head \src_fmt, \dst_fmt subs WIDTH, WIDTH, #4 blt 5f 0: bilinear_interpolate_four_pixels_tail_head \src_fmt, \dst_fmt subs WIDTH, WIDTH, #4 bge 0b 5: bilinear_interpolate_four_pixels_tail \src_fmt, \dst_fmt 1: /****************************************************/ .endif /* handle the remaining trailing pixels */ tst WIDTH, #2 beq 2f bilinear_interpolate_two_pixels \src_fmt, \dst_fmt 2: tst WIDTH, #1 beq 3f bilinear_interpolate_last_pixel \src_fmt, \dst_fmt 3: .if ((\flags) & BILINEAR_FLAG_USE_ALL_NEON_REGS) != 0 vpop {d8-d15} .endif pop {r4, r5, r6, r7, r8, r9} bx lr .unreq OUT .unreq TOP .unreq WT .unreq WB .unreq X .unreq UX .unreq WIDTH .unreq TMP1 .unreq TMP2 .unreq PF_OFFS .unreq TMP3 .unreq TMP4 .unreq STRIDE pixman_end_asm_function .endm /*****************************************************************************/ .set have_bilinear_interpolate_four_pixels_8888_8888, 1 .macro bilinear_interpolate_four_pixels_8888_8888_head mov TMP1, X, asr #16 add X, X, UX add TMP1, TOP, TMP1, asl #2 mov TMP2, X, asr #16 add X, X, UX add TMP2, TOP, TMP2, asl #2 vld1.32 {d22}, [TMP1], STRIDE vld1.32 {d23}, [TMP1] mov TMP3, X, asr #16 add X, X, UX add TMP3, TOP, TMP3, asl #2 vmull.u8 q8, d22, d28 vmlal.u8 q8, d23, d29 vld1.32 {d22}, [TMP2], STRIDE vld1.32 {d23}, [TMP2] mov TMP4, X, asr #16 add X, X, UX add TMP4, TOP, TMP4, asl #2 vmull.u8 q9, d22, d28 vmlal.u8 q9, d23, d29 vld1.32 {d22}, [TMP3], STRIDE vld1.32 {d23}, [TMP3] vmull.u8 q10, d22, d28 vmlal.u8 q10, d23, d29 vshll.u16 q0, d16, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q0, d16, d30 vmlal.u16 q0, d17, d30 pld [TMP4, PF_OFFS] vld1.32 {d16}, [TMP4], STRIDE vld1.32 {d17}, [TMP4] pld [TMP4, PF_OFFS] vmull.u8 q11, d16, d28 vmlal.u8 q11, d17, d29 vshll.u16 q1, d18, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q1, d18, d31 .endm .macro bilinear_interpolate_four_pixels_8888_8888_tail vmlal.u16 q1, d19, d31 vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vshll.u16 q2, d20, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q2, d20, d30 vmlal.u16 q2, d21, d30 vshll.u16 q3, d22, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q3, d22, d31 vmlal.u16 q3, d23, d31 vadd.u16 q12, q12, q13 vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS) vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vshrn.u32 d5, q3, #(2 * BILINEAR_INTERPOLATION_BITS) vmovn.u16 d6, q0 vmovn.u16 d7, q2 vadd.u16 q12, q12, q13 vst1.32 {d6, d7}, [OUT, :128]! .endm .macro bilinear_interpolate_four_pixels_8888_8888_tail_head mov TMP1, X, asr #16 add X, X, UX add TMP1, TOP, TMP1, asl #2 mov TMP2, X, asr #16 add X, X, UX add TMP2, TOP, TMP2, asl #2 vmlal.u16 q1, d19, d31 vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vshll.u16 q2, d20, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q2, d20, d30 vmlal.u16 q2, d21, d30 vshll.u16 q3, d22, #BILINEAR_INTERPOLATION_BITS vld1.32 {d20}, [TMP1], STRIDE vmlsl.u16 q3, d22, d31 vmlal.u16 q3, d23, d31 vld1.32 {d21}, [TMP1] vmull.u8 q8, d20, d28 vmlal.u8 q8, d21, d29 vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS) vld1.32 {d22}, [TMP2], STRIDE vshrn.u32 d5, q3, #(2 * BILINEAR_INTERPOLATION_BITS) vadd.u16 q12, q12, q13 vld1.32 {d23}, [TMP2] vmull.u8 q9, d22, d28 mov TMP3, X, asr #16 add X, X, UX add TMP3, TOP, TMP3, asl #2 mov TMP4, X, asr #16 add X, X, UX add TMP4, TOP, TMP4, asl #2 vmlal.u8 q9, d23, d29 vld1.32 {d22}, [TMP3], STRIDE vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vld1.32 {d23}, [TMP3] vmull.u8 q10, d22, d28 vmlal.u8 q10, d23, d29 vmovn.u16 d6, q0 vshll.u16 q0, d16, #BILINEAR_INTERPOLATION_BITS vmovn.u16 d7, q2 vmlsl.u16 q0, d16, d30 vmlal.u16 q0, d17, d30 pld [TMP4, PF_OFFS] vld1.32 {d16}, [TMP4], STRIDE vadd.u16 q12, q12, q13 vld1.32 {d17}, [TMP4] pld [TMP4, PF_OFFS] vmull.u8 q11, d16, d28 vmlal.u8 q11, d17, d29 vst1.32 {d6, d7}, [OUT, :128]! vshll.u16 q1, d18, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q1, d18, d31 .endm /*****************************************************************************/ .set have_bilinear_interpolate_eight_pixels_8888_0565, 1 .macro bilinear_interpolate_eight_pixels_8888_0565_head mov TMP1, X, asr #16 add X, X, UX add TMP1, TOP, TMP1, asl #2 mov TMP2, X, asr #16 add X, X, UX add TMP2, TOP, TMP2, asl #2 vld1.32 {d20}, [TMP1], STRIDE vld1.32 {d21}, [TMP1] vmull.u8 q8, d20, d28 vmlal.u8 q8, d21, d29 vld1.32 {d22}, [TMP2], STRIDE vld1.32 {d23}, [TMP2] vmull.u8 q9, d22, d28 mov TMP3, X, asr #16 add X, X, UX add TMP3, TOP, TMP3, asl #2 mov TMP4, X, asr #16 add X, X, UX add TMP4, TOP, TMP4, asl #2 vmlal.u8 q9, d23, d29 vld1.32 {d22}, [TMP3], STRIDE vld1.32 {d23}, [TMP3] vmull.u8 q10, d22, d28 vmlal.u8 q10, d23, d29 vshll.u16 q0, d16, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q0, d16, d30 vmlal.u16 q0, d17, d30 pld [TMP4, PF_OFFS] vld1.32 {d16}, [TMP4], STRIDE vld1.32 {d17}, [TMP4] pld [TMP4, PF_OFFS] vmull.u8 q11, d16, d28 vmlal.u8 q11, d17, d29 vshll.u16 q1, d18, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q1, d18, d31 mov TMP1, X, asr #16 add X, X, UX add TMP1, TOP, TMP1, asl #2 mov TMP2, X, asr #16 add X, X, UX add TMP2, TOP, TMP2, asl #2 vmlal.u16 q1, d19, d31 vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vshll.u16 q2, d20, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q2, d20, d30 vmlal.u16 q2, d21, d30 vshll.u16 q3, d22, #BILINEAR_INTERPOLATION_BITS vld1.32 {d20}, [TMP1], STRIDE vmlsl.u16 q3, d22, d31 vmlal.u16 q3, d23, d31 vld1.32 {d21}, [TMP1] vmull.u8 q8, d20, d28 vmlal.u8 q8, d21, d29 vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS) vld1.32 {d22}, [TMP2], STRIDE vshrn.u32 d5, q3, #(2 * BILINEAR_INTERPOLATION_BITS) vadd.u16 q12, q12, q13 vld1.32 {d23}, [TMP2] vmull.u8 q9, d22, d28 mov TMP3, X, asr #16 add X, X, UX add TMP3, TOP, TMP3, asl #2 mov TMP4, X, asr #16 add X, X, UX add TMP4, TOP, TMP4, asl #2 vmlal.u8 q9, d23, d29 vld1.32 {d22}, [TMP3], STRIDE vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vld1.32 {d23}, [TMP3] vmull.u8 q10, d22, d28 vmlal.u8 q10, d23, d29 vmovn.u16 d8, q0 vshll.u16 q0, d16, #BILINEAR_INTERPOLATION_BITS vmovn.u16 d9, q2 vmlsl.u16 q0, d16, d30 vmlal.u16 q0, d17, d30 pld [TMP4, PF_OFFS] vld1.32 {d16}, [TMP4], STRIDE vadd.u16 q12, q12, q13 vld1.32 {d17}, [TMP4] pld [TMP4, PF_OFFS] vmull.u8 q11, d16, d28 vmlal.u8 q11, d17, d29 vshll.u16 q1, d18, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q1, d18, d31 .endm .macro bilinear_interpolate_eight_pixels_8888_0565_tail vmlal.u16 q1, d19, d31 vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vshll.u16 q2, d20, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q2, d20, d30 vmlal.u16 q2, d21, d30 vshll.u16 q3, d22, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q3, d22, d31 vmlal.u16 q3, d23, d31 vadd.u16 q12, q12, q13 vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS) vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vshrn.u32 d5, q3, #(2 * BILINEAR_INTERPOLATION_BITS) vmovn.u16 d10, q0 vmovn.u16 d11, q2 vadd.u16 q12, q12, q13 vuzp.u8 d8, d9 vuzp.u8 d10, d11 vuzp.u8 d9, d11 vuzp.u8 d8, d10 vshll.u8 q6, d9, #8 vshll.u8 q5, d10, #8 vshll.u8 q7, d8, #8 vsri.u16 q5, q6, #5 vsri.u16 q5, q7, #11 vst1.32 {d10, d11}, [OUT, :128]! .endm .macro bilinear_interpolate_eight_pixels_8888_0565_tail_head mov TMP1, X, asr #16 add X, X, UX add TMP1, TOP, TMP1, asl #2 mov TMP2, X, asr #16 add X, X, UX add TMP2, TOP, TMP2, asl #2 vmlal.u16 q1, d19, d31 vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vuzp.u8 d8, d9 vshll.u16 q2, d20, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q2, d20, d30 vmlal.u16 q2, d21, d30 vshll.u16 q3, d22, #BILINEAR_INTERPOLATION_BITS vld1.32 {d20}, [TMP1], STRIDE vmlsl.u16 q3, d22, d31 vmlal.u16 q3, d23, d31 vld1.32 {d21}, [TMP1] vmull.u8 q8, d20, d28 vmlal.u8 q8, d21, d29 vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS) vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS) vld1.32 {d22}, [TMP2], STRIDE vshrn.u32 d5, q3, #(2 * BILINEAR_INTERPOLATION_BITS) vadd.u16 q12, q12, q13 vld1.32 {d23}, [TMP2] vmull.u8 q9, d22, d28 mov TMP3, X, asr #16 add X, X, UX add TMP3, TOP, TMP3, asl #2 mov TMP4, X, asr #16 add X, X, UX add TMP4, TOP, TMP4, asl #2 vmlal.u8 q9, d23, d29 vld1.32 {d22}, [TMP3], STRIDE vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vld1.32 {d23}, [TMP3] vmull.u8 q10, d22, d28 vmlal.u8 q10, d23, d29 vmovn.u16 d10, q0 vshll.u16 q0, d16, #BILINEAR_INTERPOLATION_BITS vmovn.u16 d11, q2 vmlsl.u16 q0, d16, d30 vmlal.u16 q0, d17, d30 pld [TMP4, PF_OFFS] vld1.32 {d16}, [TMP4], STRIDE vadd.u16 q12, q12, q13 vld1.32 {d17}, [TMP4] pld [TMP4, PF_OFFS] vmull.u8 q11, d16, d28 vmlal.u8 q11, d17, d29 vuzp.u8 d10, d11 vshll.u16 q1, d18, #BILINEAR_INTERPOLATION_BITS vmlsl.u16 q1, d18, d31 mov TMP1, X, asr #16 add X, X, UX add TMP1, TOP, TMP1, asl #2 mov TMP2, X, asr #16 add X, X, UX add TMP2, TOP, TMP2, asl #2 vmlal.u16 q1, d19, d31 vuzp.u8 d9, d11 vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vshll.u16 q2, d20, #BILINEAR_INTERPOLATION_BITS vuzp.u8 d8, d10 vmlsl.u16 q2, d20, d30 vmlal.u16 q2, d21, d30 vshll.u16 q3, d22, #BILINEAR_INTERPOLATION_BITS vld1.32 {d20}, [TMP1], STRIDE vmlsl.u16 q3, d22, d31 vmlal.u16 q3, d23, d31 vld1.32 {d21}, [TMP1] vmull.u8 q8, d20, d28 vmlal.u8 q8, d21, d29 vshll.u8 q6, d9, #8 vshll.u8 q5, d10, #8 vshll.u8 q7, d8, #8 vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS) vsri.u16 q5, q6, #5 vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS) vsri.u16 q5, q7, #11 vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS) vld1.32 {d22}, [TMP2], STRIDE vshrn.u32 d5, q3, #(2 * BILINEAR_INTERPOLATION_BITS) vadd.u16 q12, q12, q13 vld1.32 {d23}, [TMP2] vmull.u8 q9, d22, d28 mov TMP3, X, asr #16 add X, X, UX add TMP3, TOP, TMP3, asl #2 mov TMP4, X, asr #16 add X, X, UX add TMP4, TOP, TMP4, asl #2 vmlal.u8 q9, d23, d29 vld1.32 {d22}, [TMP3], STRIDE vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS) vld1.32 {d23}, [TMP3] vmull.u8 q10, d22, d28 vmlal.u8 q10, d23, d29 vmovn.u16 d8, q0 vshll.u16 q0, d16, #BILINEAR_INTERPOLATION_BITS vmovn.u16 d9, q2 vmlsl.u16 q0, d16, d30 vmlal.u16 q0, d17, d30 pld [TMP4, PF_OFFS] vld1.32 {d16}, [TMP4], STRIDE vadd.u16 q12, q12, q13 vld1.32 {d17}, [TMP4] pld [TMP4, PF_OFFS] vmull.u8 q11, d16, d28 vmlal.u8 q11, d17, d29 vshll.u16 q1, d18, #BILINEAR_INTERPOLATION_BITS vst1.32 {d10, d11}, [OUT, :128]! vmlsl.u16 q1, d18, d31 .endm /*****************************************************************************/ generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_8888_8888_SRC_asm_neon, 8888, 8888, \ 2, 2, 28, BILINEAR_FLAG_UNROLL_4 generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_8888_0565_SRC_asm_neon, 8888, 0565, \ 2, 1, 28, BILINEAR_FLAG_UNROLL_8 | BILINEAR_FLAG_USE_ALL_NEON_REGS generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_0565_x888_SRC_asm_neon, 0565, 8888, \ 1, 2, 28, BILINEAR_FLAG_UNROLL_4 generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_0565_0565_SRC_asm_neon, 0565, 0565, \ 1, 1, 28, BILINEAR_FLAG_UNROLL_4 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-arm-neon-asm.h0000664000175000017500000011705114712446423020354 0ustar00mattst88mattst88/* * Copyright Âİ 2009 Nokia Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Author: Siarhei Siamashka (siarhei.siamashka@nokia.com) */ /* * This file contains a macro ('generate_composite_function') which can * construct 2D image processing functions, based on a common template. * Any combinations of source, destination and mask images with 8bpp, * 16bpp, 24bpp, 32bpp color formats are supported. * * This macro takes care of: * - handling of leading and trailing unaligned pixels * - doing most of the work related to L2 cache preload * - encourages the use of software pipelining for better instructions * scheduling * * The user of this macro has to provide some configuration parameters * (bit depths for the images, prefetch distance, etc.) and a set of * macros, which should implement basic code chunks responsible for * pixels processing. See 'pixman-arm-neon-asm.S' file for the usage * examples. * * TODO: * - try overlapped pixel method (from Ian Rickards) when processing * exactly two blocks of pixels * - maybe add an option to do reverse scanline processing */ /* * Bit flags for 'generate_composite_function' macro which are used * to tune generated functions behavior. */ .set FLAG_DST_WRITEONLY, 0 .set FLAG_DST_READWRITE, 1 .set FLAG_DEINTERLEAVE_32BPP, 2 /* * Offset in stack where mask and source pointer/stride can be accessed * from 'init' macro. This is useful for doing special handling for solid mask. */ .set ARGS_STACK_OFFSET, 40 /* * Constants for selecting preferable prefetch type. */ .set PREFETCH_TYPE_NONE, 0 /* No prefetch at all */ .set PREFETCH_TYPE_SIMPLE, 1 /* A simple, fixed-distance-ahead prefetch */ .set PREFETCH_TYPE_ADVANCED, 2 /* Advanced fine-grained prefetch */ /* * Definitions of supplementary pixld/pixst macros (for partial load/store of * pixel data). */ .macro pixldst1 op, elem_size, reg1, mem_operand, abits .if \abits > 0 \op\().\()\elem_size {d\()\reg1}, [\()\mem_operand\(), :\()\abits\()]! .else \op\().\()\elem_size {d\()\reg1}, [\()\mem_operand\()]! .endif .endm .macro pixldst2 op, elem_size, reg1, reg2, mem_operand, abits .if \abits > 0 \op\().\()\elem_size {d\()\reg1, d\()\reg2}, [\()\mem_operand\(), :\()\abits\()]! .else \op\().\()\elem_size {d\()\reg1, d\()\reg2}, [\()\mem_operand\()]! .endif .endm .macro pixldst4 op, elem_size, reg1, reg2, reg3, reg4, mem_operand, abits .if \abits > 0 \op\().\()\elem_size {d\()\reg1, d\()\reg2, d\()\reg3, d\()\reg4}, [\()\mem_operand\(), :\()\abits\()]! .else \op\().\()\elem_size {d\()\reg1, d\()\reg2, d\()\reg3, d\()\reg4}, [\()\mem_operand\()]! .endif .endm .macro pixldst0 op, elem_size, reg1, idx, mem_operand, abits \op\().\()\elem_size {d\()\reg1[\idx]}, [\()\mem_operand\()]! .endm .macro pixldst3 op, elem_size, reg1, reg2, reg3, mem_operand \op\().\()\elem_size {d\()\reg1, d\()\reg2, d\()\reg3}, [\()\mem_operand\()]! .endm .macro pixldst30 op, elem_size, reg1, reg2, reg3, idx, mem_operand \op\().\()\elem_size {d\()\reg1[\idx], d\()\reg2[\idx], d\()\reg3[\idx]}, [\()\mem_operand\()]! .endm .macro pixldst numbytes, op, elem_size, basereg, mem_operand, abits .if \numbytes == 32 pixldst4 \op, \elem_size, %(\basereg+4), %(\basereg+5), \ %(\basereg+6), %(\basereg+7), \mem_operand, \abits .elseif \numbytes == 16 pixldst2 \op, \elem_size, %(\basereg+2), %(\basereg+3), \mem_operand, \abits .elseif \numbytes == 8 pixldst1 \op, \elem_size, %(\basereg+1), \mem_operand, \abits .elseif \numbytes == 4 .if !RESPECT_STRICT_ALIGNMENT || (\elem_size == 32) pixldst0 \op, 32, %(\basereg+0), 1, \mem_operand, \abits .elseif \elem_size == 16 pixldst0 \op, 16, %(\basereg+0), 2, \mem_operand, \abits pixldst0 \op, 16, %(\basereg+0), 3, \mem_operand, \abits .else pixldst0 \op, 8, %(\basereg+0), 4, \mem_operand, \abits pixldst0 \op, 8, %(\basereg+0), 5, \mem_operand, \abits pixldst0 \op, 8, %(\basereg+0), 6, \mem_operand, \abits pixldst0 \op, 8, %(\basereg+0), 7, \mem_operand, \abits .endif .elseif \numbytes == 2 .if !RESPECT_STRICT_ALIGNMENT || (\elem_size == 16) pixldst0 \op, 16, %(\basereg+0), 1, \mem_operand, \abits .else pixldst0 \op, 8, %(\basereg+0), 2, \mem_operand, \abits pixldst0 \op, 8, %(\basereg+0), 3, \mem_operand, \abits .endif .elseif \numbytes == 1 pixldst0 \op, 8, %(\basereg+0), 1, \mem_operand, \abits .else .error "unsupported size: \numbytes" .endif .endm .macro pixld numpix, bpp, basereg, mem_operand, abits=0 .if \bpp > 0 .if (\bpp == 32) && (\numpix == 8) && (DEINTERLEAVE_32BPP_ENABLED != 0) pixldst4 vld4, 8, %(\basereg+4), %(\basereg+5), \ %(\basereg+6), %(\basereg+7), \mem_operand, \abits .elseif (\bpp == 24) && (\numpix == 8) pixldst3 vld3, 8, %(\basereg+3), %(\basereg+4), %(\basereg+5), \mem_operand .elseif (\bpp == 24) && (\numpix == 4) pixldst30 vld3, 8, %(\basereg+0), %(\basereg+1), %(\basereg+2), 4, \mem_operand pixldst30 vld3, 8, %(\basereg+0), %(\basereg+1), %(\basereg+2), 5, \mem_operand pixldst30 vld3, 8, %(\basereg+0), %(\basereg+1), %(\basereg+2), 6, \mem_operand pixldst30 vld3, 8, %(\basereg+0), %(\basereg+1), %(\basereg+2), 7, \mem_operand .elseif (\bpp == 24) && (\numpix == 2) pixldst30 vld3, 8, %(\basereg+0), %(\basereg+1), %(\basereg+2), 2, \mem_operand pixldst30 vld3, 8, %(\basereg+0), %(\basereg+1), %(\basereg+2), 3, \mem_operand .elseif (\bpp == 24) && (\numpix == 1) pixldst30 vld3, 8, %(\basereg+0), %(\basereg+1), %(\basereg+2), 1, \mem_operand .else pixldst %(\numpix * \bpp / 8), vld1, %(\bpp), \basereg, \mem_operand, \abits .endif .endif .endm .macro pixst numpix, bpp, basereg, mem_operand, abits=0 .if \bpp > 0 .if (\bpp == 32) && (\numpix == 8) && (DEINTERLEAVE_32BPP_ENABLED != 0) pixldst4 vst4, 8, %(\basereg+4), %(\basereg+5), \ %(\basereg+6), %(\basereg+7), \mem_operand, \abits .elseif (\bpp == 24) && (\numpix == 8) pixldst3 vst3, 8, %(\basereg+3), %(\basereg+4), %(\basereg+5), \mem_operand .elseif (\bpp == 24) && (\numpix == 4) pixldst30 vst3, 8, %(\basereg+0), %(\basereg+1), %(\basereg+2), 4, \mem_operand pixldst30 vst3, 8, %(\basereg+0), %(\basereg+1), %(\basereg+2), 5, \mem_operand pixldst30 vst3, 8, %(\basereg+0), %(\basereg+1), %(\basereg+2), 6, \mem_operand pixldst30 vst3, 8, %(\basereg+0), %(\basereg+1), %(\basereg+2), 7, \mem_operand .elseif (\bpp == 24) && (\numpix == 2) pixldst30 vst3, 8, %(\basereg+0), %(\basereg+1), %(\basereg+2), 2, \mem_operand pixldst30 vst3, 8, %(\basereg+0), %(\basereg+1), %(\basereg+2), 3, \mem_operand .elseif (\bpp == 24) && (\numpix == 1) pixldst30 vst3, 8, %(\basereg+0), %(\basereg+1), %(\basereg+2), 1, \mem_operand .else pixldst %(\numpix * \bpp / 8), vst1, %(\bpp), \basereg, \mem_operand, \abits .endif .endif .endm .macro pixld_a numpix, bpp, basereg, mem_operand .if (\bpp * \numpix) <= 128 pixld \numpix, \bpp, \basereg, \mem_operand, %(\bpp * \numpix) .else pixld \numpix, \bpp, \basereg, \mem_operand, 128 .endif .endm .macro pixst_a numpix, bpp, basereg, mem_operand .if (\bpp * \numpix) <= 128 pixst \numpix, \bpp, \basereg, \mem_operand, %(\bpp * \numpix) .else pixst \numpix, \bpp, \basereg, \mem_operand, 128 .endif .endm /* * Pixel fetcher for nearest scaling (needs TMP1, TMP2, VX, UNIT_X register * aliases to be defined) */ .macro pixld1_s elem_size, reg1, mem_operand .if \elem_size == 16 mov TMP1, VX, asr #16 adds VX, VX, UNIT_X 5: subspl VX, VX, SRC_WIDTH_FIXED bpl 5b add TMP1, \mem_operand, TMP1, asl #1 mov TMP2, VX, asr #16 adds VX, VX, UNIT_X 5: subspl VX, VX, SRC_WIDTH_FIXED bpl 5b add TMP2, \mem_operand, TMP2, asl #1 vld1.16 {d\()\reg1\()[0]}, [TMP1, :16] mov TMP1, VX, asr #16 adds VX, VX, UNIT_X 5: subspl VX, VX, SRC_WIDTH_FIXED bpl 5b add TMP1, \mem_operand, TMP1, asl #1 vld1.16 {d\()\reg1\()[1]}, [TMP2, :16] mov TMP2, VX, asr #16 adds VX, VX, UNIT_X 5: subspl VX, VX, SRC_WIDTH_FIXED bpl 5b add TMP2, \mem_operand, TMP2, asl #1 vld1.16 {d\()\reg1\()[2]}, [TMP1, :16] vld1.16 {d\()\reg1\()[3]}, [TMP2, :16] .elseif \elem_size == 32 mov TMP1, VX, asr #16 adds VX, VX, UNIT_X 5: subspl VX, VX, SRC_WIDTH_FIXED bpl 5b add TMP1, \mem_operand, TMP1, asl #2 mov TMP2, VX, asr #16 adds VX, VX, UNIT_X 5: subspl VX, VX, SRC_WIDTH_FIXED bpl 5b add TMP2, \mem_operand, TMP2, asl #2 vld1.32 {d\()\reg1\()[0]}, [TMP1, :32] vld1.32 {d\()\reg1\()[1]}, [TMP2, :32] .else .error "unsupported" .endif .endm .macro pixld2_s elem_size, reg1, reg2, mem_operand .if 0 /* elem_size == 32 */ mov TMP1, VX, asr #16 add VX, VX, UNIT_X, asl #1 add TMP1, \mem_operand, TMP1, asl #2 mov TMP2, VX, asr #16 sub VX, VX, UNIT_X add TMP2, \mem_operand, TMP2, asl #2 vld1.32 {d\()\reg1\()[0]}, [TMP1, :32] mov TMP1, VX, asr #16 add VX, VX, UNIT_X, asl #1 add TMP1, \mem_operand, TMP1, asl #2 vld1.32 {d\()\reg2\()[0]}, [TMP2, :32] mov TMP2, VX, asr #16 add VX, VX, UNIT_X add TMP2, \mem_operand, TMP2, asl #2 vld1.32 {d\()\reg1\()[1]}, [TMP1, :32] vld1.32 {d\()\reg2\()[1]}, [TMP2, :32] .else pixld1_s \elem_size, \reg1, \mem_operand pixld1_s \elem_size, \reg2, \mem_operand .endif .endm .macro pixld0_s elem_size, reg1, idx, mem_operand .if \elem_size == 16 mov TMP1, VX, asr #16 adds VX, VX, UNIT_X 5: subspl VX, VX, SRC_WIDTH_FIXED bpl 5b add TMP1, \mem_operand, TMP1, asl #1 vld1.16 {d\()\reg1\()[\idx]}, [TMP1, :16] .elseif \elem_size == 32 mov TMP1, VX, asr #16 adds VX, VX, UNIT_X 5: subspl VX, VX, SRC_WIDTH_FIXED bpl 5b add TMP1, \mem_operand, TMP1, asl #2 vld1.32 {d\()\reg1\()[\idx]}, [TMP1, :32] .endif .endm .macro pixld_s_internal numbytes, elem_size, basereg, mem_operand .if \numbytes == 32 pixld2_s \elem_size, %(\basereg+4), %(\basereg+5), \mem_operand pixld2_s \elem_size, %(\basereg+6), %(\basereg+7), \mem_operand pixdeinterleave \elem_size, %(\basereg+4) .elseif \numbytes == 16 pixld2_s \elem_size, %(\basereg+2), %(\basereg+3), \mem_operand .elseif \numbytes == 8 pixld1_s \elem_size, %(\basereg+1), \mem_operand .elseif \numbytes == 4 .if \elem_size == 32 pixld0_s \elem_size, %(\basereg+0), 1, \mem_operand .elseif \elem_size == 16 pixld0_s \elem_size, %(\basereg+0), 2, \mem_operand pixld0_s \elem_size, %(\basereg+0), 3, \mem_operand .else pixld0_s \elem_size, %(\basereg+0), 4, \mem_operand pixld0_s \elem_size, %(\basereg+0), 5, \mem_operand pixld0_s \elem_size, %(\basereg+0), 6, \mem_operand pixld0_s \elem_size, %(\basereg+0), 7, \mem_operand .endif .elseif \numbytes == 2 .if \elem_size == 16 pixld0_s \elem_size, %(\basereg+0), 1, \mem_operand .else pixld0_s \elem_size, %(\basereg+0), 2, \mem_operand pixld0_s \elem_size, %(\basereg+0), 3, \mem_operand .endif .elseif \numbytes == 1 pixld0_s \elem_size, %(\basereg+0), 1, \mem_operand .else .error "unsupported size: \numbytes" .endif .endm .macro pixld_s numpix, bpp, basereg, mem_operand .if \bpp > 0 pixld_s_internal %(\numpix * \bpp / 8), %(\bpp), \basereg, \mem_operand .endif .endm .macro vuzp8 reg1, reg2 vuzp.8 d\()\reg1, d\()\reg2 .endm .macro vzip8 reg1, reg2 vzip.8 d\()\reg1, d\()\reg2 .endm /* deinterleave B, G, R, A channels for eight 32bpp pixels in 4 registers */ .macro pixdeinterleave bpp, basereg .if (\bpp == 32) && (DEINTERLEAVE_32BPP_ENABLED != 0) vuzp8 %(\basereg+0), %(\basereg+1) vuzp8 %(\basereg+2), %(\basereg+3) vuzp8 %(\basereg+1), %(\basereg+3) vuzp8 %(\basereg+0), %(\basereg+2) .endif .endm /* interleave B, G, R, A channels for eight 32bpp pixels in 4 registers */ .macro pixinterleave bpp, basereg .if (\bpp == 32) && (DEINTERLEAVE_32BPP_ENABLED != 0) vzip8 %(\basereg+0), %(\basereg+2) vzip8 %(\basereg+1), %(\basereg+3) vzip8 %(\basereg+2), %(\basereg+3) vzip8 %(\basereg+0), %(\basereg+1) .endif .endm /* * This is a macro for implementing cache preload. The main idea is that * cache preload logic is mostly independent from the rest of pixels * processing code. It starts at the top left pixel and moves forward * across pixels and can jump across scanlines. Prefetch distance is * handled in an 'incremental' way: it starts from 0 and advances to the * optimal distance over time. After reaching optimal prefetch distance, * it is kept constant. There are some checks which prevent prefetching * unneeded pixel lines below the image (but it still can prefetch a bit * more data on the right side of the image - not a big issue and may * be actually helpful when rendering text glyphs). Additional trick is * the use of LDR instruction for prefetch instead of PLD when moving to * the next line, the point is that we have a high chance of getting TLB * miss in this case, and PLD would be useless. * * This sounds like it may introduce a noticeable overhead (when working with * fully cached data). But in reality, due to having a separate pipeline and * instruction queue for NEON unit in ARM Cortex-A8, normal ARM code can * execute simultaneously with NEON and be completely shadowed by it. Thus * we get no performance overhead at all (*). This looks like a very nice * feature of Cortex-A8, if used wisely. We don't have a hardware prefetcher, * but still can implement some rather advanced prefetch logic in software * for almost zero cost! * * (*) The overhead of the prefetcher is visible when running some trivial * pixels processing like simple copy. Anyway, having prefetch is a must * when working with the graphics data. */ .macro PF a, x:vararg .if (PREFETCH_TYPE_CURRENT == PREFETCH_TYPE_ADVANCED) \a \x .endif .endm .macro cache_preload std_increment, boost_increment .if (src_bpp_shift >= 0) || (dst_r_bpp != 0) || (mask_bpp_shift >= 0) .if regs_shortage PF ldr, ORIG_W, [sp] /* If we are short on regs, ORIG_W is kept on stack */ .endif .if \std_increment != 0 PF add, PF_X, PF_X, #\std_increment .endif PF tst, PF_CTL, #0xF PF addne, PF_X, PF_X, #\boost_increment PF subne, PF_CTL, PF_CTL, #1 PF cmp, PF_X, ORIG_W .if src_bpp_shift >= 0 PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift] .endif .if dst_r_bpp != 0 PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift] .endif .if mask_bpp_shift >= 0 PF pld, [PF_MASK, PF_X, lsl #mask_bpp_shift] .endif PF subge, PF_X, PF_X, ORIG_W PF subsge, PF_CTL, PF_CTL, #0x10 .if src_bpp_shift >= 0 PF ldrbge, DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]! .endif .if dst_r_bpp != 0 PF ldrbge, DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]! .endif .if mask_bpp_shift >= 0 PF ldrbge, DUMMY, [PF_MASK, MASK_STRIDE, lsl #mask_bpp_shift]! .endif .endif .endm .macro cache_preload_simple .if (PREFETCH_TYPE_CURRENT == PREFETCH_TYPE_SIMPLE) .if src_bpp > 0 pld [SRC, #(PREFETCH_DISTANCE_SIMPLE * src_bpp / 8)] .endif .if dst_r_bpp > 0 pld [DST_R, #(PREFETCH_DISTANCE_SIMPLE * dst_r_bpp / 8)] .endif .if mask_bpp > 0 pld [MASK, #(PREFETCH_DISTANCE_SIMPLE * mask_bpp / 8)] .endif .endif .endm .macro fetch_mask_pixblock pixld pixblock_size, mask_bpp, \ (mask_basereg - pixblock_size * mask_bpp / 64), MASK .endm /* * Macro which is used to process leading pixels until destination * pointer is properly aligned (at 16 bytes boundary). When destination * buffer uses 16bpp format, this is unnecessary, or even pointless. */ .macro ensure_destination_ptr_alignment process_pixblock_head, \ process_pixblock_tail, \ process_pixblock_tail_head .if dst_w_bpp != 24 tst DST_R, #0xF beq 2f .irp lowbit, 1, 2, 4, 8, 16 .if (dst_w_bpp <= (\lowbit * 8)) && ((\lowbit * 8) < (pixblock_size * dst_w_bpp)) .if \lowbit < 16 /* we don't need more than 16-byte alignment */ tst DST_R, #\lowbit beq 1f .endif pixld_src (\lowbit * 8 / dst_w_bpp), src_bpp, src_basereg, SRC pixld (\lowbit * 8 / dst_w_bpp), mask_bpp, mask_basereg, MASK .if dst_r_bpp > 0 pixld_a (\lowbit * 8 / dst_r_bpp), dst_r_bpp, dst_r_basereg, DST_R .else add DST_R, DST_R, #\lowbit .endif PF add, PF_X, PF_X, #(\lowbit * 8 / dst_w_bpp) sub W, W, #(\lowbit * 8 / dst_w_bpp) 1: .endif .endr pixdeinterleave src_bpp, src_basereg pixdeinterleave mask_bpp, mask_basereg pixdeinterleave dst_r_bpp, dst_r_basereg \process_pixblock_head cache_preload 0, pixblock_size cache_preload_simple \process_pixblock_tail pixinterleave dst_w_bpp, dst_w_basereg .irp lowbit, 1, 2, 4, 8, 16 .if (dst_w_bpp <= (\lowbit * 8)) && ((\lowbit * 8) < (pixblock_size * dst_w_bpp)) .if \lowbit < 16 /* we don't need more than 16-byte alignment */ tst DST_W, #\lowbit beq 1f .endif pixst_a (\lowbit * 8 / dst_w_bpp), dst_w_bpp, dst_w_basereg, DST_W 1: .endif .endr .endif 2: .endm /* * Special code for processing up to (pixblock_size - 1) remaining * trailing pixels. As SIMD processing performs operation on * pixblock_size pixels, anything smaller than this has to be loaded * and stored in a special way. Loading and storing of pixel data is * performed in such a way that we fill some 'slots' in the NEON * registers (some slots naturally are unused), then perform compositing * operation as usual. In the end, the data is taken from these 'slots' * and saved to memory. * * cache_preload_flag - allows to suppress prefetch if * set to 0 * dst_aligned_flag - selects whether destination buffer * is aligned */ .macro process_trailing_pixels cache_preload_flag, \ dst_aligned_flag, \ process_pixblock_head, \ process_pixblock_tail, \ process_pixblock_tail_head tst W, #(pixblock_size - 1) beq 2f .irp chunk_size, 16, 8, 4, 2, 1 .if pixblock_size > \chunk_size tst W, #\chunk_size beq 1f pixld_src \chunk_size, src_bpp, src_basereg, SRC pixld \chunk_size, mask_bpp, mask_basereg, MASK .if \dst_aligned_flag != 0 pixld_a \chunk_size, dst_r_bpp, dst_r_basereg, DST_R .else pixld \chunk_size, dst_r_bpp, dst_r_basereg, DST_R .endif .if \cache_preload_flag != 0 PF add, PF_X, PF_X, #\chunk_size .endif 1: .endif .endr pixdeinterleave src_bpp, src_basereg pixdeinterleave mask_bpp, mask_basereg pixdeinterleave dst_r_bpp, dst_r_basereg \process_pixblock_head .if \cache_preload_flag != 0 cache_preload 0, pixblock_size cache_preload_simple .endif \process_pixblock_tail pixinterleave dst_w_bpp, dst_w_basereg .irp chunk_size, 16, 8, 4, 2, 1 .if pixblock_size > \chunk_size tst W, #\chunk_size beq 1f .if \dst_aligned_flag != 0 pixst_a \chunk_size, dst_w_bpp, dst_w_basereg, DST_W .else pixst \chunk_size, dst_w_bpp, dst_w_basereg, DST_W .endif 1: .endif .endr 2: .endm /* * Macro, which performs all the needed operations to switch to the next * scanline and start the next loop iteration unless all the scanlines * are already processed. */ .macro advance_to_next_scanline start_of_loop_label .if regs_shortage ldrd W, [sp] /* load W and H (width and height) from stack */ .else mov W, ORIG_W .endif add DST_W, DST_W, DST_STRIDE, lsl #dst_bpp_shift .if src_bpp != 0 add SRC, SRC, SRC_STRIDE, lsl #src_bpp_shift .endif .if mask_bpp != 0 add MASK, MASK, MASK_STRIDE, lsl #mask_bpp_shift .endif .if (dst_w_bpp != 24) sub DST_W, DST_W, W, lsl #dst_bpp_shift .endif .if (src_bpp != 24) && (src_bpp != 0) sub SRC, SRC, W, lsl #src_bpp_shift .endif .if (mask_bpp != 24) && (mask_bpp != 0) sub MASK, MASK, W, lsl #mask_bpp_shift .endif subs H, H, #1 mov DST_R, DST_W .if regs_shortage str H, [sp, #4] /* save updated height to stack */ .endif bge \start_of_loop_label .endm /* * Registers are allocated in the following way by default: * d0, d1, d2, d3 - reserved for loading source pixel data * d4, d5, d6, d7 - reserved for loading destination pixel data * d24, d25, d26, d27 - reserved for loading mask pixel data * d28, d29, d30, d31 - final destination pixel data for writeback to memory */ .macro generate_composite_function fname, \ src_bpp_, \ mask_bpp_, \ dst_w_bpp_, \ flags, \ pixblock_size_, \ prefetch_distance, \ init, \ cleanup, \ process_pixblock_head, \ process_pixblock_tail, \ process_pixblock_tail_head, \ dst_w_basereg_ = 28, \ dst_r_basereg_ = 4, \ src_basereg_ = 0, \ mask_basereg_ = 24 pixman_asm_function \fname push {r4-r12, lr} /* save all registers */ /* * Select prefetch type for this function. If prefetch distance is * set to 0 or one of the color formats is 24bpp, SIMPLE prefetch * has to be used instead of ADVANCED. */ .set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_DEFAULT .if \prefetch_distance == 0 .set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_NONE .elseif (PREFETCH_TYPE_CURRENT > PREFETCH_TYPE_SIMPLE) && \ ((\src_bpp_ == 24) || (\mask_bpp_ == 24) || (\dst_w_bpp_ == 24)) .set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_SIMPLE .endif /* * Make some macro arguments globally visible and accessible * from other macros */ .set src_bpp, \src_bpp_ .set mask_bpp, \mask_bpp_ .set dst_w_bpp, \dst_w_bpp_ .set pixblock_size, \pixblock_size_ .set dst_w_basereg, \dst_w_basereg_ .set dst_r_basereg, \dst_r_basereg_ .set src_basereg, \src_basereg_ .set mask_basereg, \mask_basereg_ .macro pixld_src x:vararg pixld \x .endm .macro fetch_src_pixblock pixld_src pixblock_size, src_bpp, \ (src_basereg - pixblock_size * src_bpp / 64), SRC .endm /* * Assign symbolic names to registers */ W .req r0 /* width (is updated during processing) */ H .req r1 /* height (is updated during processing) */ DST_W .req r2 /* destination buffer pointer for writes */ DST_STRIDE .req r3 /* destination image stride */ SRC .req r4 /* source buffer pointer */ SRC_STRIDE .req r5 /* source image stride */ DST_R .req r6 /* destination buffer pointer for reads */ MASK .req r7 /* mask pointer */ MASK_STRIDE .req r8 /* mask stride */ PF_CTL .req r9 /* combined lines counter and prefetch */ /* distance increment counter */ PF_X .req r10 /* pixel index in a scanline for current */ /* pretetch position */ PF_SRC .req r11 /* pointer to source scanline start */ /* for prefetch purposes */ PF_DST .req r12 /* pointer to destination scanline start */ /* for prefetch purposes */ PF_MASK .req r14 /* pointer to mask scanline start */ /* for prefetch purposes */ /* * Check whether we have enough registers for all the local variables. * If we don't have enough registers, original width and height are * kept on top of stack (and 'regs_shortage' variable is set to indicate * this for the rest of code). Even if there are enough registers, the * allocation scheme may be a bit different depending on whether source * or mask is not used. */ .if (PREFETCH_TYPE_CURRENT < PREFETCH_TYPE_ADVANCED) ORIG_W .req r10 /* saved original width */ DUMMY .req r12 /* temporary register */ .set regs_shortage, 0 .elseif mask_bpp == 0 ORIG_W .req r7 /* saved original width */ DUMMY .req r8 /* temporary register */ .set regs_shortage, 0 .elseif src_bpp == 0 ORIG_W .req r4 /* saved original width */ DUMMY .req r5 /* temporary register */ .set regs_shortage, 0 .else ORIG_W .req r1 /* saved original width */ DUMMY .req r1 /* temporary register */ .set regs_shortage, 1 .endif .set mask_bpp_shift, -1 .if src_bpp == 32 .set src_bpp_shift, 2 .elseif src_bpp == 24 .set src_bpp_shift, 0 .elseif src_bpp == 16 .set src_bpp_shift, 1 .elseif src_bpp == 8 .set src_bpp_shift, 0 .elseif src_bpp == 0 .set src_bpp_shift, -1 .else .error "requested src bpp (src_bpp) is not supported" .endif .if mask_bpp == 32 .set mask_bpp_shift, 2 .elseif mask_bpp == 24 .set mask_bpp_shift, 0 .elseif mask_bpp == 8 .set mask_bpp_shift, 0 .elseif mask_bpp == 0 .set mask_bpp_shift, -1 .else .error "requested mask bpp (mask_bpp) is not supported" .endif .if dst_w_bpp == 32 .set dst_bpp_shift, 2 .elseif dst_w_bpp == 24 .set dst_bpp_shift, 0 .elseif dst_w_bpp == 16 .set dst_bpp_shift, 1 .elseif dst_w_bpp == 8 .set dst_bpp_shift, 0 .else .error "requested dst bpp (dst_w_bpp) is not supported" .endif .if (((\flags) & FLAG_DST_READWRITE) != 0) .set dst_r_bpp, dst_w_bpp .else .set dst_r_bpp, 0 .endif .if (((\flags) & FLAG_DEINTERLEAVE_32BPP) != 0) .set DEINTERLEAVE_32BPP_ENABLED, 1 .else .set DEINTERLEAVE_32BPP_ENABLED, 0 .endif .if \prefetch_distance < 0 || \prefetch_distance > 15 .error "invalid prefetch distance (\prefetch_distance)" .endif .if src_bpp > 0 ldr SRC, [sp, #40] .endif .if mask_bpp > 0 ldr MASK, [sp, #48] .endif PF mov, PF_X, #0 .if src_bpp > 0 ldr SRC_STRIDE, [sp, #44] .endif .if mask_bpp > 0 ldr MASK_STRIDE, [sp, #52] .endif mov DST_R, DST_W .if src_bpp == 24 sub SRC_STRIDE, SRC_STRIDE, W sub SRC_STRIDE, SRC_STRIDE, W, lsl #1 .endif .if mask_bpp == 24 sub MASK_STRIDE, MASK_STRIDE, W sub MASK_STRIDE, MASK_STRIDE, W, lsl #1 .endif .if dst_w_bpp == 24 sub DST_STRIDE, DST_STRIDE, W sub DST_STRIDE, DST_STRIDE, W, lsl #1 .endif /* * Setup advanced prefetcher initial state */ PF mov, PF_SRC, SRC PF mov, PF_DST, DST_R PF mov, PF_MASK, MASK /* PF_CTL = prefetch_distance | ((h - 1) << 4) */ PF mov, PF_CTL, H, lsl #4 PF add, PF_CTL, #(\prefetch_distance - 0x10) \init .if regs_shortage push {r0, r1} .endif subs H, H, #1 .if regs_shortage str H, [sp, #4] /* save updated height to stack */ .else mov ORIG_W, W .endif blt 9f cmp W, #(pixblock_size * 2) blt 8f /* * This is the start of the pipelined loop, which if optimized for * long scanlines */ 0: ensure_destination_ptr_alignment \process_pixblock_head, \ \process_pixblock_tail, \ \process_pixblock_tail_head /* Implement "head (tail_head) ... (tail_head) tail" loop pattern */ pixld_a pixblock_size, dst_r_bpp, \ (dst_r_basereg - pixblock_size * dst_r_bpp / 64), DST_R fetch_src_pixblock pixld pixblock_size, mask_bpp, \ (mask_basereg - pixblock_size * mask_bpp / 64), MASK PF add, PF_X, PF_X, #pixblock_size \process_pixblock_head cache_preload 0, pixblock_size cache_preload_simple subs W, W, #(pixblock_size * 2) blt 2f 1: \process_pixblock_tail_head cache_preload_simple subs W, W, #pixblock_size bge 1b 2: \process_pixblock_tail pixst_a pixblock_size, dst_w_bpp, \ (dst_w_basereg - pixblock_size * dst_w_bpp / 64), DST_W /* Process the remaining trailing pixels in the scanline */ process_trailing_pixels 1, 1, \ \process_pixblock_head, \ \process_pixblock_tail, \ \process_pixblock_tail_head advance_to_next_scanline 0b .if regs_shortage pop {r0, r1} .endif \cleanup pop {r4-r12, pc} /* exit */ /* * This is the start of the loop, designed to process images with small width * (less than pixblock_size * 2 pixels). In this case neither pipelining * nor prefetch are used. */ 8: /* Process exactly pixblock_size pixels if needed */ tst W, #pixblock_size beq 1f pixld pixblock_size, dst_r_bpp, \ (dst_r_basereg - pixblock_size * dst_r_bpp / 64), DST_R fetch_src_pixblock pixld pixblock_size, mask_bpp, \ (mask_basereg - pixblock_size * mask_bpp / 64), MASK \process_pixblock_head \process_pixblock_tail pixst pixblock_size, dst_w_bpp, \ (dst_w_basereg - pixblock_size * dst_w_bpp / 64), DST_W 1: /* Process the remaining trailing pixels in the scanline */ process_trailing_pixels 0, 0, \ \process_pixblock_head, \ \process_pixblock_tail, \ \process_pixblock_tail_head advance_to_next_scanline 8b 9: .if regs_shortage pop {r0, r1} .endif \cleanup pop {r4-r12, pc} /* exit */ .purgem fetch_src_pixblock .purgem pixld_src .unreq SRC .unreq MASK .unreq DST_R .unreq DST_W .unreq ORIG_W .unreq W .unreq H .unreq SRC_STRIDE .unreq DST_STRIDE .unreq MASK_STRIDE .unreq PF_CTL .unreq PF_X .unreq PF_SRC .unreq PF_DST .unreq PF_MASK .unreq DUMMY pixman_end_asm_function .endm /* * A simplified variant of function generation template for a single * scanline processing (for implementing pixman combine functions) */ .macro generate_composite_function_scanline use_nearest_scaling, \ fname, \ src_bpp_, \ mask_bpp_, \ dst_w_bpp_, \ flags, \ pixblock_size_, \ init, \ cleanup, \ process_pixblock_head, \ process_pixblock_tail, \ process_pixblock_tail_head, \ dst_w_basereg_ = 28, \ dst_r_basereg_ = 4, \ src_basereg_ = 0, \ mask_basereg_ = 24 pixman_asm_function \fname .set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_NONE /* * Make some macro arguments globally visible and accessible * from other macros */ .set src_bpp, \src_bpp_ .set mask_bpp, \mask_bpp_ .set dst_w_bpp, \dst_w_bpp_ .set pixblock_size, \pixblock_size_ .set dst_w_basereg, \dst_w_basereg_ .set dst_r_basereg, \dst_r_basereg_ .set src_basereg, \src_basereg_ .set mask_basereg, \mask_basereg_ .if \use_nearest_scaling != 0 /* * Assign symbolic names to registers for nearest scaling */ W .req r0 DST_W .req r1 SRC .req r2 VX .req r3 UNIT_X .req ip MASK .req lr TMP1 .req r4 TMP2 .req r5 DST_R .req r6 SRC_WIDTH_FIXED .req r7 .macro pixld_src x:vararg pixld_s \x .endm ldr UNIT_X, [sp] push {r4-r8, lr} ldr SRC_WIDTH_FIXED, [sp, #(24 + 4)] .if mask_bpp != 0 ldr MASK, [sp, #(24 + 8)] .endif .else /* * Assign symbolic names to registers */ W .req r0 /* width (is updated during processing) */ DST_W .req r1 /* destination buffer pointer for writes */ SRC .req r2 /* source buffer pointer */ DST_R .req ip /* destination buffer pointer for reads */ MASK .req r3 /* mask pointer */ .macro pixld_src x:vararg pixld \x .endm .endif .if (((\flags) & FLAG_DST_READWRITE) != 0) .set dst_r_bpp, dst_w_bpp .else .set dst_r_bpp, 0 .endif .if (((\flags) & FLAG_DEINTERLEAVE_32BPP) != 0) .set DEINTERLEAVE_32BPP_ENABLED, 1 .else .set DEINTERLEAVE_32BPP_ENABLED, 0 .endif .macro fetch_src_pixblock pixld_src pixblock_size, src_bpp, \ (src_basereg - pixblock_size * src_bpp / 64), SRC .endm \init mov DST_R, DST_W cmp W, #pixblock_size blt 8f ensure_destination_ptr_alignment \process_pixblock_head, \ \process_pixblock_tail, \ \process_pixblock_tail_head subs W, W, #pixblock_size blt 7f /* Implement "head (tail_head) ... (tail_head) tail" loop pattern */ pixld_a pixblock_size, dst_r_bpp, \ (dst_r_basereg - pixblock_size * dst_r_bpp / 64), DST_R fetch_src_pixblock pixld pixblock_size, mask_bpp, \ (mask_basereg - pixblock_size * mask_bpp / 64), MASK \process_pixblock_head subs W, W, #pixblock_size blt 2f 1: \process_pixblock_tail_head subs W, W, #pixblock_size bge 1b 2: \process_pixblock_tail pixst_a pixblock_size, dst_w_bpp, \ (dst_w_basereg - pixblock_size * dst_w_bpp / 64), DST_W 7: /* Process the remaining trailing pixels in the scanline (dst aligned) */ process_trailing_pixels 0, 1, \ \process_pixblock_head, \ \process_pixblock_tail, \ \process_pixblock_tail_head \cleanup .if \use_nearest_scaling != 0 pop {r4-r8, pc} /* exit */ .else bx lr /* exit */ .endif 8: /* Process the remaining trailing pixels in the scanline (dst unaligned) */ process_trailing_pixels 0, 0, \ \process_pixblock_head, \ \process_pixblock_tail, \ \process_pixblock_tail_head \cleanup .if \use_nearest_scaling != 0 pop {r4-r8, pc} /* exit */ .unreq DST_R .unreq SRC .unreq W .unreq VX .unreq UNIT_X .unreq TMP1 .unreq TMP2 .unreq DST_W .unreq MASK .unreq SRC_WIDTH_FIXED .else bx lr /* exit */ .unreq SRC .unreq MASK .unreq DST_R .unreq DST_W .unreq W .endif .purgem fetch_src_pixblock .purgem pixld_src pixman_end_asm_function .endm .macro generate_composite_function_single_scanline x:vararg generate_composite_function_scanline 0, \x .endm .macro generate_composite_function_nearest_scanline x:vararg generate_composite_function_scanline 1, \x .endm /* Default prologue/epilogue, nothing special needs to be done */ .macro default_init .endm .macro default_cleanup .endm /* * Prologue/epilogue variant which additionally saves/restores d8-d15 * registers (they need to be saved/restored by callee according to ABI). * This is required if the code needs to use all the NEON registers. */ .macro default_init_need_all_regs vpush {d8-d15} .endm .macro default_cleanup_need_all_regs vpop {d8-d15} .endm /******************************************************************************/ /* * Conversion of 8 r5g6b6 pixels packed in 128-bit register (in) * into a planar a8r8g8b8 format (with a, r, g, b color components * stored into 64-bit registers out_a, out_r, out_g, out_b respectively). * * Warning: the conversion is destructive and the original * value (in) is lost. */ .macro convert_0565_to_8888 in, out_a, out_r, out_g, out_b vshrn.u16 \out_r, \in, #8 vshrn.u16 \out_g, \in, #3 vsli.u16 \in, \in, #5 vmov.u8 \out_a, #255 vsri.u8 \out_r, \out_r, #5 vsri.u8 \out_g, \out_g, #6 vshrn.u16 \out_b, \in, #2 .endm .macro convert_0565_to_x888 in, out_r, out_g, out_b vshrn.u16 \out_r, \in, #8 vshrn.u16 \out_g, \in, #3 vsli.u16 \in, \in, #5 vsri.u8 \out_r, \out_r, #5 vsri.u8 \out_g, \out_g, #6 vshrn.u16 \out_b, \in, #2 .endm /* * Conversion from planar a8r8g8b8 format (with a, r, g, b color components * in 64-bit registers in_a, in_r, in_g, in_b respectively) into 8 r5g6b6 * pixels packed in 128-bit register (out). Requires two temporary 128-bit * registers (tmp1, tmp2) */ .macro convert_8888_to_0565 in_r, in_g, in_b, out, tmp1, tmp2 vshll.u8 \tmp1, \in_g, #8 vshll.u8 \out, \in_r, #8 vshll.u8 \tmp2, \in_b, #8 vsri.u16 \out, \tmp1, #5 vsri.u16 \out, \tmp2, #11 .endm /* * Conversion of four r5g6b5 pixels (in) to four x8r8g8b8 pixels * returned in (out0, out1) registers pair. Requires one temporary * 64-bit register (tmp). 'out1' and 'in' may overlap, the original * value from 'in' is lost */ .macro convert_four_0565_to_x888_packed in, out0, out1, tmp vshl.u16 \out0, \in, #5 /* G top 6 bits */ vshl.u16 \tmp, \in, #11 /* B top 5 bits */ vsri.u16 \in, \in, #5 /* R is ready in top bits */ vsri.u16 \out0, \out0, #6 /* G is ready in top bits */ vsri.u16 \tmp, \tmp, #5 /* B is ready in top bits */ vshr.u16 \out1, \in, #8 /* R is in place */ vsri.u16 \out0, \tmp, #8 /* G & B is in place */ vzip.u16 \out0, \out1 /* everything is in place */ .endm ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-arm-neon.c0000664000175000017500000006761114712446423017577 0ustar00mattst88mattst88/* * Copyright Âİ 2009 ARM Ltd, Movial Creative Technologies Oy * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of ARM Ltd not be used in * advertising or publicity pertaining to distribution of the software without * specific, written prior permission. ARM Ltd makes no * representations about the suitability of this software for any purpose. It * is provided "as is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS * SOFTWARE. * * Author: Ian Rickards (ian.rickards@arm.com) * Author: Jonathan Morton (jonathan.morton@movial.com) * Author: Markku Vire (markku.vire@movial.com) * */ #ifdef HAVE_CONFIG_H #include #endif #include #include "pixman-private.h" #include "pixman-arm-common.h" PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, src_8888_8888, uint32_t, 1, uint32_t, 1) PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, src_x888_8888, uint32_t, 1, uint32_t, 1) PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, src_0565_0565, uint16_t, 1, uint16_t, 1) PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, src_0888_0888, uint8_t, 3, uint8_t, 3) PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, src_8888_0565, uint32_t, 1, uint16_t, 1) PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, src_0565_8888, uint16_t, 1, uint32_t, 1) PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, src_0888_8888_rev, uint8_t, 3, uint32_t, 1) PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, src_0888_0565_rev, uint8_t, 3, uint16_t, 1) PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, src_pixbuf_8888, uint32_t, 1, uint32_t, 1) PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, src_rpixbuf_8888, uint32_t, 1, uint32_t, 1) PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, add_8_8, uint8_t, 1, uint8_t, 1) PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, add_8888_8888, uint32_t, 1, uint32_t, 1) PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, over_8888_0565, uint32_t, 1, uint16_t, 1) PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, over_8888_8888, uint32_t, 1, uint32_t, 1) PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, out_reverse_8_0565, uint8_t, 1, uint16_t, 1) PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, out_reverse_8_8888, uint8_t, 1, uint32_t, 1) PIXMAN_ARM_BIND_FAST_PATH_N_DST (SKIP_ZERO_SRC, neon, over_n_0565, uint16_t, 1) PIXMAN_ARM_BIND_FAST_PATH_N_DST (SKIP_ZERO_SRC, neon, over_n_8888, uint32_t, 1) PIXMAN_ARM_BIND_FAST_PATH_N_DST (SKIP_ZERO_SRC, neon, over_reverse_n_8888, uint32_t, 1) PIXMAN_ARM_BIND_FAST_PATH_N_DST (0, neon, in_n_8, uint8_t, 1) PIXMAN_ARM_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, neon, over_n_8_0565, uint8_t, 1, uint16_t, 1) PIXMAN_ARM_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, neon, over_n_8_8888, uint8_t, 1, uint32_t, 1) PIXMAN_ARM_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, neon, over_n_8888_8888_ca, uint32_t, 1, uint32_t, 1) PIXMAN_ARM_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, neon, over_n_8888_0565_ca, uint32_t, 1, uint16_t, 1) PIXMAN_ARM_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, neon, over_n_8_8, uint8_t, 1, uint8_t, 1) PIXMAN_ARM_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, neon, add_n_8_8, uint8_t, 1, uint8_t, 1) PIXMAN_ARM_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, neon, add_n_8_8888, uint8_t, 1, uint32_t, 1) PIXMAN_ARM_BIND_FAST_PATH_N_MASK_DST (0, neon, src_n_8_8888, uint8_t, 1, uint32_t, 1) PIXMAN_ARM_BIND_FAST_PATH_N_MASK_DST (0, neon, src_n_8_8, uint8_t, 1, uint8_t, 1) PIXMAN_ARM_BIND_FAST_PATH_SRC_N_DST (SKIP_ZERO_MASK, neon, over_8888_n_8888, uint32_t, 1, uint32_t, 1) PIXMAN_ARM_BIND_FAST_PATH_SRC_N_DST (SKIP_ZERO_MASK, neon, over_8888_n_0565, uint32_t, 1, uint16_t, 1) PIXMAN_ARM_BIND_FAST_PATH_SRC_N_DST (SKIP_ZERO_MASK, neon, over_0565_n_0565, uint16_t, 1, uint16_t, 1) PIXMAN_ARM_BIND_FAST_PATH_SRC_N_DST (SKIP_ZERO_MASK, neon, add_8888_n_8888, uint32_t, 1, uint32_t, 1) PIXMAN_ARM_BIND_FAST_PATH_SRC_MASK_DST (neon, add_8_8_8, uint8_t, 1, uint8_t, 1, uint8_t, 1) PIXMAN_ARM_BIND_FAST_PATH_SRC_MASK_DST (neon, add_0565_8_0565, uint16_t, 1, uint8_t, 1, uint16_t, 1) PIXMAN_ARM_BIND_FAST_PATH_SRC_MASK_DST (neon, add_8888_8_8888, uint32_t, 1, uint8_t, 1, uint32_t, 1) PIXMAN_ARM_BIND_FAST_PATH_SRC_MASK_DST (neon, add_8888_8888_8888, uint32_t, 1, uint32_t, 1, uint32_t, 1) PIXMAN_ARM_BIND_FAST_PATH_SRC_MASK_DST (neon, over_8888_8_8888, uint32_t, 1, uint8_t, 1, uint32_t, 1) PIXMAN_ARM_BIND_FAST_PATH_SRC_MASK_DST (neon, over_8888_8888_8888, uint32_t, 1, uint32_t, 1, uint32_t, 1) PIXMAN_ARM_BIND_FAST_PATH_SRC_MASK_DST (neon, over_8888_8_0565, uint32_t, 1, uint8_t, 1, uint16_t, 1) PIXMAN_ARM_BIND_FAST_PATH_SRC_MASK_DST (neon, over_0565_8_0565, uint16_t, 1, uint8_t, 1, uint16_t, 1) PIXMAN_ARM_BIND_SCALED_NEAREST_SRC_DST (neon, 8888_8888, OVER, uint32_t, uint32_t) PIXMAN_ARM_BIND_SCALED_NEAREST_SRC_DST (neon, 8888_0565, OVER, uint32_t, uint16_t) PIXMAN_ARM_BIND_SCALED_NEAREST_SRC_DST (neon, 8888_0565, SRC, uint32_t, uint16_t) PIXMAN_ARM_BIND_SCALED_NEAREST_SRC_DST (neon, 0565_8888, SRC, uint16_t, uint32_t) PIXMAN_ARM_BIND_SCALED_NEAREST_SRC_A8_DST (SKIP_ZERO_SRC, neon, 8888_8_0565, OVER, uint32_t, uint16_t) PIXMAN_ARM_BIND_SCALED_NEAREST_SRC_A8_DST (SKIP_ZERO_SRC, neon, 0565_8_0565, OVER, uint16_t, uint16_t) PIXMAN_ARM_BIND_SCALED_BILINEAR_SRC_DST (0, neon, 8888_8888, SRC, uint32_t, uint32_t) PIXMAN_ARM_BIND_SCALED_BILINEAR_SRC_DST (0, neon, 8888_0565, SRC, uint32_t, uint16_t) PIXMAN_ARM_BIND_SCALED_BILINEAR_SRC_DST (0, neon, 0565_x888, SRC, uint16_t, uint32_t) PIXMAN_ARM_BIND_SCALED_BILINEAR_SRC_DST (0, neon, 0565_0565, SRC, uint16_t, uint16_t) PIXMAN_ARM_BIND_SCALED_BILINEAR_SRC_DST (SKIP_ZERO_SRC, neon, 8888_8888, OVER, uint32_t, uint32_t) PIXMAN_ARM_BIND_SCALED_BILINEAR_SRC_DST (SKIP_ZERO_SRC, neon, 8888_8888, ADD, uint32_t, uint32_t) PIXMAN_ARM_BIND_SCALED_BILINEAR_SRC_A8_DST (0, neon, 8888_8_8888, SRC, uint32_t, uint32_t) PIXMAN_ARM_BIND_SCALED_BILINEAR_SRC_A8_DST (0, neon, 8888_8_0565, SRC, uint32_t, uint16_t) PIXMAN_ARM_BIND_SCALED_BILINEAR_SRC_A8_DST (0, neon, 0565_8_x888, SRC, uint16_t, uint32_t) PIXMAN_ARM_BIND_SCALED_BILINEAR_SRC_A8_DST (0, neon, 0565_8_0565, SRC, uint16_t, uint16_t) PIXMAN_ARM_BIND_SCALED_BILINEAR_SRC_A8_DST (SKIP_ZERO_SRC, neon, 8888_8_8888, OVER, uint32_t, uint32_t) PIXMAN_ARM_BIND_SCALED_BILINEAR_SRC_A8_DST (SKIP_ZERO_SRC, neon, 8888_8_8888, ADD, uint32_t, uint32_t) void pixman_composite_src_n_8_asm_neon (int32_t w, int32_t h, uint8_t *dst, int32_t dst_stride, uint8_t src); void pixman_composite_src_n_0565_asm_neon (int32_t w, int32_t h, uint16_t *dst, int32_t dst_stride, uint16_t src); void pixman_composite_src_n_8888_asm_neon (int32_t w, int32_t h, uint32_t *dst, int32_t dst_stride, uint32_t src); static pixman_bool_t arm_neon_fill (pixman_implementation_t *imp, uint32_t * bits, int stride, int bpp, int x, int y, int width, int height, uint32_t _xor) { /* stride is always multiple of 32bit units in pixman */ int32_t byte_stride = stride * sizeof(uint32_t); switch (bpp) { case 8: pixman_composite_src_n_8_asm_neon ( width, height, (uint8_t *)(((char *) bits) + y * byte_stride + x), byte_stride, _xor & 0xff); return TRUE; case 16: pixman_composite_src_n_0565_asm_neon ( width, height, (uint16_t *)(((char *) bits) + y * byte_stride + x * 2), byte_stride / 2, _xor & 0xffff); return TRUE; case 32: pixman_composite_src_n_8888_asm_neon ( width, height, (uint32_t *)(((char *) bits) + y * byte_stride + x * 4), byte_stride / 4, _xor); return TRUE; default: return FALSE; } } static pixman_bool_t arm_neon_blt (pixman_implementation_t *imp, uint32_t * src_bits, uint32_t * dst_bits, int src_stride, int dst_stride, int src_bpp, int dst_bpp, int src_x, int src_y, int dest_x, int dest_y, int width, int height) { if (src_bpp != dst_bpp) return FALSE; switch (src_bpp) { case 16: pixman_composite_src_0565_0565_asm_neon ( width, height, (uint16_t *)(((char *) dst_bits) + dest_y * dst_stride * 4 + dest_x * 2), dst_stride * 2, (uint16_t *)(((char *) src_bits) + src_y * src_stride * 4 + src_x * 2), src_stride * 2); return TRUE; case 32: pixman_composite_src_8888_8888_asm_neon ( width, height, (uint32_t *)(((char *) dst_bits) + dest_y * dst_stride * 4 + dest_x * 4), dst_stride, (uint32_t *)(((char *) src_bits) + src_y * src_stride * 4 + src_x * 4), src_stride); return TRUE; default: return FALSE; } } static const pixman_fast_path_t arm_neon_fast_paths[] = { PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, r5g6b5, neon_composite_src_0565_0565), PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, b5g6r5, neon_composite_src_0565_0565), PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, r5g6b5, neon_composite_src_8888_0565), PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, r5g6b5, neon_composite_src_8888_0565), PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, b5g6r5, neon_composite_src_8888_0565), PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, b5g6r5, neon_composite_src_8888_0565), PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, a8r8g8b8, neon_composite_src_0565_8888), PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, x8r8g8b8, neon_composite_src_0565_8888), PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, a8b8g8r8, neon_composite_src_0565_8888), PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, x8b8g8r8, neon_composite_src_0565_8888), PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, x8r8g8b8, neon_composite_src_8888_8888), PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, x8r8g8b8, neon_composite_src_8888_8888), PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, x8b8g8r8, neon_composite_src_8888_8888), PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, x8b8g8r8, neon_composite_src_8888_8888), PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, a8r8g8b8, neon_composite_src_8888_8888), PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, a8b8g8r8, neon_composite_src_8888_8888), PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, a8r8g8b8, neon_composite_src_x888_8888), PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, a8b8g8r8, neon_composite_src_x888_8888), PIXMAN_STD_FAST_PATH (SRC, r8g8b8, null, r8g8b8, neon_composite_src_0888_0888), PIXMAN_STD_FAST_PATH (SRC, b8g8r8, null, x8r8g8b8, neon_composite_src_0888_8888_rev), PIXMAN_STD_FAST_PATH (SRC, b8g8r8, null, r5g6b5, neon_composite_src_0888_0565_rev), PIXMAN_STD_FAST_PATH (SRC, pixbuf, pixbuf, a8r8g8b8, neon_composite_src_pixbuf_8888), PIXMAN_STD_FAST_PATH (SRC, pixbuf, pixbuf, a8b8g8r8, neon_composite_src_rpixbuf_8888), PIXMAN_STD_FAST_PATH (SRC, rpixbuf, rpixbuf, a8r8g8b8, neon_composite_src_rpixbuf_8888), PIXMAN_STD_FAST_PATH (SRC, rpixbuf, rpixbuf, a8b8g8r8, neon_composite_src_pixbuf_8888), PIXMAN_STD_FAST_PATH (SRC, solid, a8, a8r8g8b8, neon_composite_src_n_8_8888), PIXMAN_STD_FAST_PATH (SRC, solid, a8, x8r8g8b8, neon_composite_src_n_8_8888), PIXMAN_STD_FAST_PATH (SRC, solid, a8, a8b8g8r8, neon_composite_src_n_8_8888), PIXMAN_STD_FAST_PATH (SRC, solid, a8, x8b8g8r8, neon_composite_src_n_8_8888), PIXMAN_STD_FAST_PATH (SRC, solid, a8, a8, neon_composite_src_n_8_8), PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8, neon_composite_over_n_8_8), PIXMAN_STD_FAST_PATH (OVER, solid, a8, r5g6b5, neon_composite_over_n_8_0565), PIXMAN_STD_FAST_PATH (OVER, solid, a8, b5g6r5, neon_composite_over_n_8_0565), PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8r8g8b8, neon_composite_over_n_8_8888), PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8r8g8b8, neon_composite_over_n_8_8888), PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8b8g8r8, neon_composite_over_n_8_8888), PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8b8g8r8, neon_composite_over_n_8_8888), PIXMAN_STD_FAST_PATH (OVER, solid, null, r5g6b5, neon_composite_over_n_0565), PIXMAN_STD_FAST_PATH (OVER, solid, null, a8r8g8b8, neon_composite_over_n_8888), PIXMAN_STD_FAST_PATH (OVER, solid, null, x8r8g8b8, neon_composite_over_n_8888), PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, a8r8g8b8, neon_composite_over_n_8888_8888_ca), PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, x8r8g8b8, neon_composite_over_n_8888_8888_ca), PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, a8b8g8r8, neon_composite_over_n_8888_8888_ca), PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, x8b8g8r8, neon_composite_over_n_8888_8888_ca), PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, r5g6b5, neon_composite_over_n_8888_0565_ca), PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, b5g6r5, neon_composite_over_n_8888_0565_ca), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, a8r8g8b8, neon_composite_over_8888_n_8888), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, x8r8g8b8, neon_composite_over_8888_n_8888), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, r5g6b5, neon_composite_over_8888_n_0565), PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, solid, b5g6r5, neon_composite_over_8888_n_0565), PIXMAN_STD_FAST_PATH (OVER, r5g6b5, solid, r5g6b5, neon_composite_over_0565_n_0565), PIXMAN_STD_FAST_PATH (OVER, b5g6r5, solid, b5g6r5, neon_composite_over_0565_n_0565), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, a8, a8r8g8b8, neon_composite_over_8888_8_8888), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, a8, x8r8g8b8, neon_composite_over_8888_8_8888), PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, a8, a8b8g8r8, neon_composite_over_8888_8_8888), PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, a8, x8b8g8r8, neon_composite_over_8888_8_8888), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, a8, r5g6b5, neon_composite_over_8888_8_0565), PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, a8, b5g6r5, neon_composite_over_8888_8_0565), PIXMAN_STD_FAST_PATH (OVER, r5g6b5, a8, r5g6b5, neon_composite_over_0565_8_0565), PIXMAN_STD_FAST_PATH (OVER, b5g6r5, a8, b5g6r5, neon_composite_over_0565_8_0565), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, x8r8g8b8, neon_composite_over_8888_8888_8888), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, a8r8g8b8, neon_composite_over_8888_8888_8888), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, r5g6b5, neon_composite_over_8888_0565), PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, b5g6r5, neon_composite_over_8888_0565), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, a8r8g8b8, neon_composite_over_8888_8888), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, x8r8g8b8, neon_composite_over_8888_8888), PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, a8b8g8r8, neon_composite_over_8888_8888), PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, x8b8g8r8, neon_composite_over_8888_8888), PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, null, a8r8g8b8, neon_composite_src_x888_8888), PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, null, a8b8g8r8, neon_composite_src_x888_8888), PIXMAN_STD_FAST_PATH (ADD, solid, a8, a8, neon_composite_add_n_8_8), PIXMAN_STD_FAST_PATH (ADD, solid, a8, x8r8g8b8, neon_composite_add_n_8_8888), PIXMAN_STD_FAST_PATH (ADD, solid, a8, a8r8g8b8, neon_composite_add_n_8_8888), PIXMAN_STD_FAST_PATH (ADD, solid, a8, x8b8g8r8, neon_composite_add_n_8_8888), PIXMAN_STD_FAST_PATH (ADD, solid, a8, a8b8g8r8, neon_composite_add_n_8_8888), PIXMAN_STD_FAST_PATH (ADD, a8, a8, a8, neon_composite_add_8_8_8), PIXMAN_STD_FAST_PATH (ADD, r5g6b5, a8, r5g6b5, neon_composite_add_0565_8_0565), PIXMAN_STD_FAST_PATH (ADD, b5g6r5, a8, b5g6r5, neon_composite_add_0565_8_0565), PIXMAN_STD_FAST_PATH (ADD, x8r8g8b8, a8, x8r8g8b8, neon_composite_add_8888_8_8888), PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, a8, x8r8g8b8, neon_composite_add_8888_8_8888), PIXMAN_STD_FAST_PATH (ADD, x8b8g8r8, a8, x8b8g8r8, neon_composite_add_8888_8_8888), PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, a8, x8b8g8r8, neon_composite_add_8888_8_8888), PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, a8, a8r8g8b8, neon_composite_add_8888_8_8888), PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, a8, a8b8g8r8, neon_composite_add_8888_8_8888), PIXMAN_STD_FAST_PATH (ADD, x8r8g8b8, a8r8g8b8, x8r8g8b8, neon_composite_add_8888_8888_8888), PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, a8r8g8b8, x8r8g8b8, neon_composite_add_8888_8888_8888), PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, a8r8g8b8, a8r8g8b8, neon_composite_add_8888_8888_8888), PIXMAN_STD_FAST_PATH (ADD, x8r8g8b8, solid, x8r8g8b8, neon_composite_add_8888_n_8888), PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, solid, x8r8g8b8, neon_composite_add_8888_n_8888), PIXMAN_STD_FAST_PATH (ADD, x8b8g8r8, solid, x8b8g8r8, neon_composite_add_8888_n_8888), PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, solid, x8b8g8r8, neon_composite_add_8888_n_8888), PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, solid, a8r8g8b8, neon_composite_add_8888_n_8888), PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, solid, a8b8g8r8, neon_composite_add_8888_n_8888), PIXMAN_STD_FAST_PATH (ADD, a8, null, a8, neon_composite_add_8_8), PIXMAN_STD_FAST_PATH (ADD, x8r8g8b8, null, x8r8g8b8, neon_composite_add_8888_8888), PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, null, x8r8g8b8, neon_composite_add_8888_8888), PIXMAN_STD_FAST_PATH (ADD, x8b8g8r8, null, x8b8g8r8, neon_composite_add_8888_8888), PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, null, x8b8g8r8, neon_composite_add_8888_8888), PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, null, a8r8g8b8, neon_composite_add_8888_8888), PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, null, a8b8g8r8, neon_composite_add_8888_8888), PIXMAN_STD_FAST_PATH (IN, solid, null, a8, neon_composite_in_n_8), PIXMAN_STD_FAST_PATH (OVER_REVERSE, solid, null, a8r8g8b8, neon_composite_over_reverse_n_8888), PIXMAN_STD_FAST_PATH (OVER_REVERSE, solid, null, a8b8g8r8, neon_composite_over_reverse_n_8888), PIXMAN_STD_FAST_PATH (OUT_REVERSE, a8, null, r5g6b5, neon_composite_out_reverse_8_0565), PIXMAN_STD_FAST_PATH (OUT_REVERSE, a8, null, b5g6r5, neon_composite_out_reverse_8_0565), PIXMAN_STD_FAST_PATH (OUT_REVERSE, a8, null, x8r8g8b8, neon_composite_out_reverse_8_8888), PIXMAN_STD_FAST_PATH (OUT_REVERSE, a8, null, a8r8g8b8, neon_composite_out_reverse_8_8888), PIXMAN_STD_FAST_PATH (OUT_REVERSE, a8, null, x8b8g8r8, neon_composite_out_reverse_8_8888), PIXMAN_STD_FAST_PATH (OUT_REVERSE, a8, null, a8b8g8r8, neon_composite_out_reverse_8_8888), SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, neon_8888_8888), SIMPLE_NEAREST_FAST_PATH (OVER, a8b8g8r8, a8b8g8r8, neon_8888_8888), SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8, neon_8888_8888), SIMPLE_NEAREST_FAST_PATH (OVER, a8b8g8r8, x8b8g8r8, neon_8888_8888), SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, r5g6b5, neon_8888_0565), SIMPLE_NEAREST_FAST_PATH (OVER, a8b8g8r8, b5g6r5, neon_8888_0565), SIMPLE_NEAREST_FAST_PATH (SRC, a8r8g8b8, r5g6b5, neon_8888_0565), SIMPLE_NEAREST_FAST_PATH (SRC, x8r8g8b8, r5g6b5, neon_8888_0565), SIMPLE_NEAREST_FAST_PATH (SRC, a8b8g8r8, b5g6r5, neon_8888_0565), SIMPLE_NEAREST_FAST_PATH (SRC, x8b8g8r8, b5g6r5, neon_8888_0565), SIMPLE_NEAREST_FAST_PATH (SRC, b5g6r5, x8b8g8r8, neon_0565_8888), SIMPLE_NEAREST_FAST_PATH (SRC, r5g6b5, x8r8g8b8, neon_0565_8888), /* Note: NONE repeat is not supported yet */ SIMPLE_NEAREST_FAST_PATH_COVER (SRC, r5g6b5, a8r8g8b8, neon_0565_8888), SIMPLE_NEAREST_FAST_PATH_COVER (SRC, b5g6r5, a8b8g8r8, neon_0565_8888), SIMPLE_NEAREST_FAST_PATH_PAD (SRC, r5g6b5, a8r8g8b8, neon_0565_8888), SIMPLE_NEAREST_FAST_PATH_PAD (SRC, b5g6r5, a8b8g8r8, neon_0565_8888), PIXMAN_ARM_SIMPLE_NEAREST_A8_MASK_FAST_PATH (OVER, a8r8g8b8, r5g6b5, neon_8888_8_0565), PIXMAN_ARM_SIMPLE_NEAREST_A8_MASK_FAST_PATH (OVER, a8b8g8r8, b5g6r5, neon_8888_8_0565), PIXMAN_ARM_SIMPLE_NEAREST_A8_MASK_FAST_PATH (OVER, r5g6b5, r5g6b5, neon_0565_8_0565), PIXMAN_ARM_SIMPLE_NEAREST_A8_MASK_FAST_PATH (OVER, b5g6r5, b5g6r5, neon_0565_8_0565), SIMPLE_BILINEAR_FAST_PATH (SRC, a8r8g8b8, a8r8g8b8, neon_8888_8888), SIMPLE_BILINEAR_FAST_PATH (SRC, a8r8g8b8, x8r8g8b8, neon_8888_8888), SIMPLE_BILINEAR_FAST_PATH (SRC, x8r8g8b8, x8r8g8b8, neon_8888_8888), SIMPLE_BILINEAR_FAST_PATH (SRC, a8r8g8b8, r5g6b5, neon_8888_0565), SIMPLE_BILINEAR_FAST_PATH (SRC, x8r8g8b8, r5g6b5, neon_8888_0565), SIMPLE_BILINEAR_FAST_PATH (SRC, r5g6b5, x8r8g8b8, neon_0565_x888), SIMPLE_BILINEAR_FAST_PATH (SRC, r5g6b5, r5g6b5, neon_0565_0565), SIMPLE_BILINEAR_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, neon_8888_8888), SIMPLE_BILINEAR_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8, neon_8888_8888), SIMPLE_BILINEAR_FAST_PATH (ADD, a8r8g8b8, a8r8g8b8, neon_8888_8888), SIMPLE_BILINEAR_FAST_PATH (ADD, a8r8g8b8, x8r8g8b8, neon_8888_8888), SIMPLE_BILINEAR_FAST_PATH (ADD, x8r8g8b8, x8r8g8b8, neon_8888_8888), SIMPLE_BILINEAR_A8_MASK_FAST_PATH (SRC, a8r8g8b8, a8r8g8b8, neon_8888_8_8888), SIMPLE_BILINEAR_A8_MASK_FAST_PATH (SRC, a8r8g8b8, x8r8g8b8, neon_8888_8_8888), SIMPLE_BILINEAR_A8_MASK_FAST_PATH (SRC, x8r8g8b8, x8r8g8b8, neon_8888_8_8888), SIMPLE_BILINEAR_A8_MASK_FAST_PATH (SRC, a8r8g8b8, r5g6b5, neon_8888_8_0565), SIMPLE_BILINEAR_A8_MASK_FAST_PATH (SRC, x8r8g8b8, r5g6b5, neon_8888_8_0565), SIMPLE_BILINEAR_A8_MASK_FAST_PATH (SRC, r5g6b5, x8r8g8b8, neon_0565_8_x888), SIMPLE_BILINEAR_A8_MASK_FAST_PATH (SRC, r5g6b5, r5g6b5, neon_0565_8_0565), SIMPLE_BILINEAR_A8_MASK_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, neon_8888_8_8888), SIMPLE_BILINEAR_A8_MASK_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8, neon_8888_8_8888), SIMPLE_BILINEAR_A8_MASK_FAST_PATH (ADD, a8r8g8b8, a8r8g8b8, neon_8888_8_8888), SIMPLE_BILINEAR_A8_MASK_FAST_PATH (ADD, a8r8g8b8, x8r8g8b8, neon_8888_8_8888), SIMPLE_BILINEAR_A8_MASK_FAST_PATH (ADD, x8r8g8b8, x8r8g8b8, neon_8888_8_8888), { PIXMAN_OP_NONE }, }; #define BIND_COMBINE_U(name) \ void \ pixman_composite_scanline_##name##_mask_asm_neon (int32_t w, \ const uint32_t *dst, \ const uint32_t *src, \ const uint32_t *mask); \ \ void \ pixman_composite_scanline_##name##_asm_neon (int32_t w, \ const uint32_t *dst, \ const uint32_t *src); \ \ static void \ neon_combine_##name##_u (pixman_implementation_t *imp, \ pixman_op_t op, \ uint32_t * dest, \ const uint32_t * src, \ const uint32_t * mask, \ int width) \ { \ if (mask) \ pixman_composite_scanline_##name##_mask_asm_neon (width, dest, \ src, mask); \ else \ pixman_composite_scanline_##name##_asm_neon (width, dest, src); \ } BIND_COMBINE_U (over) BIND_COMBINE_U (add) BIND_COMBINE_U (out_reverse) pixman_implementation_t * _pixman_implementation_create_arm_neon (pixman_implementation_t *fallback) { pixman_implementation_t *imp = _pixman_implementation_create (fallback, arm_neon_fast_paths); imp->combine_32[PIXMAN_OP_OVER] = neon_combine_over_u; imp->combine_32[PIXMAN_OP_ADD] = neon_combine_add_u; imp->combine_32[PIXMAN_OP_OUT_REVERSE] = neon_combine_out_reverse_u; imp->blt = arm_neon_blt; imp->fill = arm_neon_fill; return imp; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-arm-simd-asm-scaled.S0000664000175000017500000001120114712446423021543 0ustar00mattst88mattst88/* * Copyright Âİ 2008 Mozilla Corporation * Copyright Âİ 2010 Nokia Corporation * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of Mozilla Corporation not be used in * advertising or publicity pertaining to distribution of the software without * specific, written prior permission. Mozilla Corporation makes no * representations about the suitability of this software for any purpose. It * is provided "as is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS * SOFTWARE. * * Author: Jeff Muizelaar (jeff@infidigm.net) * */ /* Prevent the stack from becoming executable */ #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif .text .arch armv6 .object_arch armv4 .arm .altmacro .p2align 2 #include "pixman-arm-asm.h" pixman_syntax_unified /* * Note: This code is only using armv5te instructions (not even armv6), * but is scheduled for ARM Cortex-A8 pipeline. So it might need to * be split into a few variants, tuned for each microarchitecture. * * TODO: In order to get good performance on ARM9/ARM11 cores (which don't * have efficient write combining), it needs to be changed to use 16-byte * aligned writes using STM instruction. * * Nearest scanline scaler macro template uses the following arguments: * fname - name of the function to generate * bpp_shift - (1 << bpp_shift) is the size of pixel in bytes * t - type suffix for LDR/STR instructions * prefetch_distance - prefetch in the source image by that many * pixels ahead * prefetch_braking_distance - stop prefetching when that many pixels are * remaining before the end of scanline */ .macro generate_nearest_scanline_func fname, bpp_shift, t, \ prefetch_distance, \ prefetch_braking_distance pixman_asm_function \fname W .req r0 DST .req r1 SRC .req r2 VX .req r3 UNIT_X .req ip TMP1 .req r4 TMP2 .req r5 VXMASK .req r6 PF_OFFS .req r7 SRC_WIDTH_FIXED .req r8 ldr UNIT_X, [sp] push {r4, r5, r6, r7, r8, r10} mvn VXMASK, #((1 << \bpp_shift) - 1) ldr SRC_WIDTH_FIXED, [sp, #28] /* define helper macro */ .macro scale_2_pixels ldr\()\t TMP1, [SRC, TMP1] and TMP2, VXMASK, VX, asr #(16 - \bpp_shift) adds VX, VX, UNIT_X str\()\t TMP1, [DST], #(1 << \bpp_shift) 9: subspl VX, VX, SRC_WIDTH_FIXED bpl 9b ldr\()\t TMP2, [SRC, TMP2] and TMP1, VXMASK, VX, asr #(16 - \bpp_shift) adds VX, VX, UNIT_X str\()\t TMP2, [DST], #(1 << \bpp_shift) 9: subspl VX, VX, SRC_WIDTH_FIXED bpl 9b .endm /* now do the scaling */ and TMP1, VXMASK, VX, asr #(16 - \bpp_shift) adds VX, VX, UNIT_X 9: subspl VX, VX, SRC_WIDTH_FIXED bpl 9b subs W, W, #(8 + \prefetch_braking_distance) blt 2f /* calculate prefetch offset */ mov PF_OFFS, #\prefetch_distance mla PF_OFFS, UNIT_X, PF_OFFS, VX 1: /* main loop, process 8 pixels per iteration with prefetch */ pld [SRC, PF_OFFS, asr #(16 - \bpp_shift)] add PF_OFFS, PF_OFFS, UNIT_X, lsl #3 scale_2_pixels scale_2_pixels scale_2_pixels scale_2_pixels subs W, W, #8 bge 1b 2: subs W, W, #(4 - 8 - \prefetch_braking_distance) blt 2f 1: /* process the remaining pixels */ scale_2_pixels scale_2_pixels subs W, W, #4 bge 1b 2: tst W, #2 beq 2f scale_2_pixels 2: tst W, #1 ldr\()\t\()ne TMP1, [SRC, TMP1] str\()\t\()ne TMP1, [DST] /* cleanup helper macro */ .purgem scale_2_pixels .unreq DST .unreq SRC .unreq W .unreq VX .unreq UNIT_X .unreq TMP1 .unreq TMP2 .unreq VXMASK .unreq PF_OFFS .unreq SRC_WIDTH_FIXED /* return */ pop {r4, r5, r6, r7, r8, r10} bx lr pixman_end_asm_function .endm generate_nearest_scanline_func \ pixman_scaled_nearest_scanline_0565_0565_SRC_asm_armv6, 1, h, 80, 32 generate_nearest_scanline_func \ pixman_scaled_nearest_scanline_8888_8888_SRC_asm_armv6, 2, , 48, 32 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-arm-simd-asm.S0000664000175000017500000012623714712446423020332 0ustar00mattst88mattst88/* * Copyright Âİ 2012 Raspberry Pi Foundation * Copyright Âİ 2012 RISC OS Open Ltd * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of the copyright holders not be used in * advertising or publicity pertaining to distribution of the software without * specific, written prior permission. The copyright holders make no * representations about the suitability of this software for any purpose. It * is provided "as is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS * SOFTWARE. * * Author: Ben Avison (bavison@riscosopen.org) * */ /* Prevent the stack from becoming executable */ #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif .text .arch armv6 .object_arch armv4 .arm .altmacro .p2align 2 #include "pixman-arm-asm.h" #include "pixman-arm-simd-asm.h" pixman_syntax_unified /* A head macro should do all processing which results in an output of up to * 16 bytes, as far as the final load instruction. The corresponding tail macro * should complete the processing of the up-to-16 bytes. The calling macro will * sometimes choose to insert a preload or a decrement of X between them. * cond ARM condition code for code block * numbytes Number of output bytes that should be generated this time * firstreg First WK register in which to place output * unaligned_src Whether to use non-wordaligned loads of source image * unaligned_mask Whether to use non-wordaligned loads of mask image * preload If outputting 16 bytes causes 64 bytes to be read, whether an extra preload should be output */ .macro blit_init line_saved_regs STRIDE_D, STRIDE_S .endm .macro blit_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload pixld \cond, \numbytes, \firstreg, SRC, \unaligned_src .endm .macro blit_inner_loop process_head, process_tail, unaligned_src, unaligned_mask, dst_alignment WK4 .req STRIDE_D WK5 .req STRIDE_S WK6 .req MASK WK7 .req STRIDE_M 110: pixld , 16, 0, SRC, \unaligned_src pixld , 16, 4, SRC, \unaligned_src pld [SRC, SCRATCH] pixst , 16, 0, DST pixst , 16, 4, DST subs X, X, #32*8/src_bpp bhs 110b .unreq WK4 .unreq WK5 .unreq WK6 .unreq WK7 .endm generate_composite_function \ pixman_composite_src_8888_8888_asm_armv6, 32, 0, 32, \ FLAG_DST_WRITEONLY | FLAG_COND_EXEC | FLAG_SPILL_LINE_VARS_WIDE | FLAG_PROCESS_PRESERVES_SCRATCH, \ 4, /* prefetch distance */ \ blit_init, \ nop_macro, /* newline */ \ nop_macro, /* cleanup */ \ blit_process_head, \ nop_macro, /* process tail */ \ blit_inner_loop generate_composite_function \ pixman_composite_src_0565_0565_asm_armv6, 16, 0, 16, \ FLAG_DST_WRITEONLY | FLAG_COND_EXEC | FLAG_SPILL_LINE_VARS_WIDE | FLAG_PROCESS_PRESERVES_SCRATCH, \ 4, /* prefetch distance */ \ blit_init, \ nop_macro, /* newline */ \ nop_macro, /* cleanup */ \ blit_process_head, \ nop_macro, /* process tail */ \ blit_inner_loop generate_composite_function \ pixman_composite_src_8_8_asm_armv6, 8, 0, 8, \ FLAG_DST_WRITEONLY | FLAG_COND_EXEC | FLAG_SPILL_LINE_VARS_WIDE | FLAG_PROCESS_PRESERVES_SCRATCH, \ 3, /* prefetch distance */ \ blit_init, \ nop_macro, /* newline */ \ nop_macro, /* cleanup */ \ blit_process_head, \ nop_macro, /* process tail */ \ blit_inner_loop /******************************************************************************/ .macro src_n_8888_init ldr SRC, [sp, #ARGS_STACK_OFFSET] mov STRIDE_S, SRC mov MASK, SRC mov STRIDE_M, SRC .endm .macro src_n_0565_init ldrh SRC, [sp, #ARGS_STACK_OFFSET] orr SRC, SRC, SRC, lsl #16 mov STRIDE_S, SRC mov MASK, SRC mov STRIDE_M, SRC .endm .macro src_n_8_init ldrb SRC, [sp, #ARGS_STACK_OFFSET] orr SRC, SRC, SRC, lsl #8 orr SRC, SRC, SRC, lsl #16 mov STRIDE_S, SRC mov MASK, SRC mov STRIDE_M, SRC .endm .macro fill_process_tail cond, numbytes, firstreg WK4 .req SRC WK5 .req STRIDE_S WK6 .req MASK WK7 .req STRIDE_M pixst \cond, \numbytes, 4, DST .unreq WK4 .unreq WK5 .unreq WK6 .unreq WK7 .endm generate_composite_function \ pixman_composite_src_n_8888_asm_armv6, 0, 0, 32, \ FLAG_DST_WRITEONLY | FLAG_COND_EXEC | FLAG_PROCESS_PRESERVES_PSR | FLAG_PROCESS_DOES_STORE | FLAG_PROCESS_PRESERVES_SCRATCH \ 0, /* prefetch distance doesn't apply */ \ src_n_8888_init \ nop_macro, /* newline */ \ nop_macro /* cleanup */ \ nop_macro /* process head */ \ fill_process_tail generate_composite_function \ pixman_composite_src_n_0565_asm_armv6, 0, 0, 16, \ FLAG_DST_WRITEONLY | FLAG_COND_EXEC | FLAG_PROCESS_PRESERVES_PSR | FLAG_PROCESS_DOES_STORE | FLAG_PROCESS_PRESERVES_SCRATCH \ 0, /* prefetch distance doesn't apply */ \ src_n_0565_init \ nop_macro, /* newline */ \ nop_macro /* cleanup */ \ nop_macro /* process head */ \ fill_process_tail generate_composite_function \ pixman_composite_src_n_8_asm_armv6, 0, 0, 8, \ FLAG_DST_WRITEONLY | FLAG_COND_EXEC | FLAG_PROCESS_PRESERVES_PSR | FLAG_PROCESS_DOES_STORE | FLAG_PROCESS_PRESERVES_SCRATCH \ 0, /* prefetch distance doesn't apply */ \ src_n_8_init \ nop_macro, /* newline */ \ nop_macro /* cleanup */ \ nop_macro /* process head */ \ fill_process_tail /******************************************************************************/ .macro src_x888_8888_pixel, cond, reg orr\()\cond WK\()\reg, WK\()\reg, #0xFF000000 .endm .macro pixman_composite_src_x888_8888_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload pixld \cond, \numbytes, \firstreg, SRC, \unaligned_src .endm .macro pixman_composite_src_x888_8888_process_tail cond, numbytes, firstreg src_x888_8888_pixel \cond, %(\firstreg+0) .if \numbytes >= 8 src_x888_8888_pixel \cond, %(\firstreg+1) .if \numbytes == 16 src_x888_8888_pixel \cond, %(\firstreg+2) src_x888_8888_pixel \cond, %(\firstreg+3) .endif .endif .endm generate_composite_function \ pixman_composite_src_x888_8888_asm_armv6, 32, 0, 32, \ FLAG_DST_WRITEONLY | FLAG_COND_EXEC | FLAG_PROCESS_PRESERVES_SCRATCH, \ 3, /* prefetch distance */ \ nop_macro, /* init */ \ nop_macro, /* newline */ \ nop_macro, /* cleanup */ \ pixman_composite_src_x888_8888_process_head, \ pixman_composite_src_x888_8888_process_tail /******************************************************************************/ .macro src_0565_8888_init /* Hold loop invariants in MASK and STRIDE_M */ ldr MASK, =0x07E007E0 mov STRIDE_M, #0xFF000000 /* Set GE[3:0] to 1010 so SEL instructions do what we want */ ldr SCRATCH, =0x80008000 uadd8 SCRATCH, SCRATCH, SCRATCH .endm .macro src_0565_8888_2pixels, reg1, reg2 and SCRATCH, WK\()\reg1, MASK @ 00000GGGGGG0000000000gggggg00000 bic WK\()\reg2, WK\()\reg1, MASK @ RRRRR000000BBBBBrrrrr000000bbbbb orr SCRATCH, SCRATCH, SCRATCH, lsr #6 @ 00000GGGGGGGGGGGG0000ggggggggggg mov WK\()\reg1, WK\()\reg2, lsl #16 @ rrrrr000000bbbbb0000000000000000 mov SCRATCH, SCRATCH, ror #19 @ GGGG0000ggggggggggg00000GGGGGGGG bic WK\()\reg2, WK\()\reg2, WK\()\reg1, lsr #16 @ RRRRR000000BBBBB0000000000000000 orr WK\()\reg1, WK\()\reg1, WK\()\reg1, lsr #5 @ rrrrrrrrrr0bbbbbbbbbb00000000000 orr WK\()\reg2, WK\()\reg2, WK\()\reg2, lsr #5 @ RRRRRRRRRR0BBBBBBBBBB00000000000 pkhtb WK\()\reg1, WK\()\reg1, WK\()\reg1, asr #5 @ rrrrrrrr--------bbbbbbbb-------- sel WK\()\reg1, WK\()\reg1, SCRATCH @ rrrrrrrrggggggggbbbbbbbb-------- mov SCRATCH, SCRATCH, ror #16 @ ggg00000GGGGGGGGGGGG0000gggggggg pkhtb WK\()\reg2, WK\()\reg2, WK\()\reg2, asr #5 @ RRRRRRRR--------BBBBBBBB-------- sel WK\()\reg2, WK\()\reg2, SCRATCH @ RRRRRRRRGGGGGGGGBBBBBBBB-------- orr WK\()\reg1, STRIDE_M, WK\()\reg1, lsr #8 @ 11111111rrrrrrrrggggggggbbbbbbbb orr WK\()\reg2, STRIDE_M, WK\()\reg2, lsr #8 @ 11111111RRRRRRRRGGGGGGGGBBBBBBBB .endm /* This version doesn't need STRIDE_M, but is one instruction longer. It would however be preferable for an XRGB target, since we could knock off the last 2 instructions, but is that a common case? and SCRATCH, WK\()\reg1, MASK @ 00000GGGGGG0000000000gggggg00000 bic WK\()\reg1, WK\()\reg1, MASK @ RRRRR000000BBBBBrrrrr000000bbbbb orr SCRATCH, SCRATCH, SCRATCH, lsr #6 @ 00000GGGGGGGGGGGG0000ggggggggggg mov WK\()\reg2, WK\()\reg1, lsr #16 @ 0000000000000000RRRRR000000BBBBB mov SCRATCH, SCRATCH, ror #27 @ GGGGGGGGGGGG0000ggggggggggg00000 bic WK\()\reg1, WK\()\reg1, WK\()\reg2, lsl #16 @ 0000000000000000rrrrr000000bbbbb mov WK\()\reg2, WK\()\reg2, lsl #3 @ 0000000000000RRRRR000000BBBBB000 mov WK\()\reg1, WK\()\reg1, lsl #3 @ 0000000000000rrrrr000000bbbbb000 orr WK\()\reg2, WK\()\reg2, WK\()\reg2, lsr #5 @ 0000000000000RRRRRRRRRR0BBBBBBBB orr WK\()\reg1, WK\()\reg1, WK\()\reg1, lsr #5 @ 0000000000000rrrrrrrrrr0bbbbbbbb pkhbt WK\()\reg2, WK\()\reg2, WK\()\reg2, lsl #5 @ --------RRRRRRRR--------BBBBBBBB pkhbt WK\()\reg1, WK\()\reg1, WK\()\reg1, lsl #5 @ --------rrrrrrrr--------bbbbbbbb sel WK\()\reg2, SCRATCH, WK\()\reg2 @ --------RRRRRRRRGGGGGGGGBBBBBBBB sel WK\()\reg1, SCRATCH, WK\()\reg1 @ --------rrrrrrrrggggggggbbbbbbbb orr WK\()\reg2, WK\()\reg2, #0xFF000000 @ 11111111RRRRRRRRGGGGGGGGBBBBBBBB orr WK\()\reg1, WK\()\reg1, #0xFF000000 @ 11111111rrrrrrrrggggggggbbbbbbbb */ .macro src_0565_8888_1pixel, reg bic SCRATCH, WK\()\reg, MASK @ 0000000000000000rrrrr000000bbbbb and WK\()\reg, WK\()\reg, MASK @ 000000000000000000000gggggg00000 mov SCRATCH, SCRATCH, lsl #3 @ 0000000000000rrrrr000000bbbbb000 mov WK\()\reg, WK\()\reg, lsl #5 @ 0000000000000000gggggg0000000000 orr SCRATCH, SCRATCH, SCRATCH, lsr #5 @ 0000000000000rrrrrrrrrr0bbbbbbbb orr WK\()\reg, WK\()\reg, WK\()\reg, lsr #6 @ 000000000000000gggggggggggg00000 pkhbt SCRATCH, SCRATCH, SCRATCH, lsl #5 @ --------rrrrrrrr--------bbbbbbbb sel WK\()\reg, WK\()\reg, SCRATCH @ --------rrrrrrrrggggggggbbbbbbbb orr WK\()\reg, WK\()\reg, #0xFF000000 @ 11111111rrrrrrrrggggggggbbbbbbbb .endm .macro src_0565_8888_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload .if \numbytes == 16 pixldst ld,, 8, \firstreg, %(\firstreg+2),,, SRC, \unaligned_src .elseif \numbytes == 8 pixld , 4, \firstreg, SRC, \unaligned_src .elseif \numbytes == 4 pixld , 2, \firstreg, SRC, \unaligned_src .endif .endm .macro src_0565_8888_process_tail cond, numbytes, firstreg .if \numbytes == 16 src_0565_8888_2pixels \firstreg, %(\firstreg+1) src_0565_8888_2pixels %(\firstreg+2), %(\firstreg+3) .elseif \numbytes == 8 src_0565_8888_2pixels \firstreg, %(\firstreg+1) .else src_0565_8888_1pixel \firstreg .endif .endm generate_composite_function \ pixman_composite_src_0565_8888_asm_armv6, 16, 0, 32, \ FLAG_DST_WRITEONLY | FLAG_BRANCH_OVER, \ 3, /* prefetch distance */ \ src_0565_8888_init, \ nop_macro, /* newline */ \ nop_macro, /* cleanup */ \ src_0565_8888_process_head, \ src_0565_8888_process_tail /******************************************************************************/ .macro src_x888_0565_init /* Hold loop invariant in MASK */ ldr MASK, =0x001F001F line_saved_regs STRIDE_S, ORIG_W .endm .macro src_x888_0565_1pixel s, d and WK\()\d, MASK, WK\()\s, lsr #3 @ 00000000000rrrrr00000000000bbbbb and STRIDE_S, WK\()\s, #0xFC00 @ 0000000000000000gggggg0000000000 orr WK\()\d, WK\()\d, WK\()\d, lsr #5 @ 00000000000-----rrrrr000000bbbbb orr WK\()\d, WK\()\d, STRIDE_S, lsr #5 @ 00000000000-----rrrrrggggggbbbbb /* Top 16 bits are discarded during the following STRH */ .endm .macro src_x888_0565_2pixels slo, shi, d, tmp and SCRATCH, WK\()\shi, #0xFC00 @ 0000000000000000GGGGGG0000000000 and WK\()\tmp, MASK, WK\()\shi, lsr #3 @ 00000000000RRRRR00000000000BBBBB and WK\()\shi, MASK, WK\()\slo, lsr #3 @ 00000000000rrrrr00000000000bbbbb orr WK\()\tmp, WK\()\tmp, WK\()\tmp, lsr #5 @ 00000000000-----RRRRR000000BBBBB orr WK\()\tmp, WK\()\tmp, SCRATCH, lsr #5 @ 00000000000-----RRRRRGGGGGGBBBBB and SCRATCH, WK\()\slo, #0xFC00 @ 0000000000000000gggggg0000000000 orr WK\()\shi, WK\()\shi, WK\()\shi, lsr #5 @ 00000000000-----rrrrr000000bbbbb orr WK\()\shi, WK\()\shi, SCRATCH, lsr #5 @ 00000000000-----rrrrrggggggbbbbb pkhbt WK\()\d, WK\()\shi, WK\()\tmp, lsl #16 @ RRRRRGGGGGGBBBBBrrrrrggggggbbbbb .endm .macro src_x888_0565_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload WK4 .req STRIDE_S WK5 .req STRIDE_M WK6 .req WK3 WK7 .req ORIG_W .if \numbytes == 16 pixld , 16, 4, SRC, 0 src_x888_0565_2pixels 4, 5, 0, 0 pixld , 8, 4, SRC, 0 src_x888_0565_2pixels 6, 7, 1, 1 pixld , 8, 6, SRC, 0 .else pixld , \numbytes*2, 4, SRC, 0 .endif .endm .macro src_x888_0565_process_tail cond, numbytes, firstreg .if \numbytes == 16 src_x888_0565_2pixels 4, 5, 2, 2 src_x888_0565_2pixels 6, 7, 3, 4 .elseif \numbytes == 8 src_x888_0565_2pixels 4, 5, 1, 1 src_x888_0565_2pixels 6, 7, 2, 2 .elseif \numbytes == 4 src_x888_0565_2pixels 4, 5, 1, 1 .else src_x888_0565_1pixel 4, 1 .endif .if \numbytes == 16 pixst , \numbytes, 0, DST .else pixst , \numbytes, 1, DST .endif .unreq WK4 .unreq WK5 .unreq WK6 .unreq WK7 .endm generate_composite_function \ pixman_composite_src_x888_0565_asm_armv6, 32, 0, 16, \ FLAG_DST_WRITEONLY | FLAG_BRANCH_OVER | FLAG_PROCESS_DOES_STORE | FLAG_SPILL_LINE_VARS | FLAG_PROCESS_CORRUPTS_SCRATCH, \ 3, /* prefetch distance */ \ src_x888_0565_init, \ nop_macro, /* newline */ \ nop_macro, /* cleanup */ \ src_x888_0565_process_head, \ src_x888_0565_process_tail /******************************************************************************/ .macro add_8_8_8pixels cond, dst1, dst2 uqadd8\()\cond WK\()\dst1, WK\()\dst1, MASK uqadd8\()\cond WK\()\dst2, WK\()\dst2, STRIDE_M .endm .macro add_8_8_4pixels cond, dst uqadd8\()\cond WK\()\dst, WK\()\dst, MASK .endm .macro add_8_8_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload WK4 .req MASK WK5 .req STRIDE_M .if \numbytes == 16 pixld \cond, 8, 4, SRC, \unaligned_src pixld \cond, 16, \firstreg, DST, 0 add_8_8_8pixels \cond, \firstreg, %(\firstreg+1) pixld \cond, 8, 4, SRC, \unaligned_src .else pixld \cond, \numbytes, 4, SRC, \unaligned_src pixld \cond, \numbytes, \firstreg, DST, 0 .endif .unreq WK4 .unreq WK5 .endm .macro add_8_8_process_tail cond, numbytes, firstreg .if \numbytes == 16 add_8_8_8pixels \cond, %(\firstreg+2), %(\firstreg+3) .elseif \numbytes == 8 add_8_8_8pixels \cond, \firstreg, %(\firstreg+1) .else add_8_8_4pixels \cond, \firstreg .endif .endm generate_composite_function \ pixman_composite_add_8_8_asm_armv6, 8, 0, 8, \ FLAG_DST_READWRITE | FLAG_BRANCH_OVER | FLAG_PROCESS_PRESERVES_SCRATCH, \ 2, /* prefetch distance */ \ nop_macro, /* init */ \ nop_macro, /* newline */ \ nop_macro, /* cleanup */ \ add_8_8_process_head, \ add_8_8_process_tail /******************************************************************************/ .macro over_8888_8888_init /* Hold loop invariant in MASK */ ldr MASK, =0x00800080 /* Set GE[3:0] to 0101 so SEL instructions do what we want */ uadd8 SCRATCH, MASK, MASK line_saved_regs STRIDE_D, STRIDE_S, ORIG_W .endm .macro over_8888_8888_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload WK4 .req STRIDE_D WK5 .req STRIDE_S WK6 .req STRIDE_M WK7 .req ORIG_W pixld , \numbytes, %(4+\firstreg), SRC, \unaligned_src pixld , \numbytes, \firstreg, DST, 0 .unreq WK4 .unreq WK5 .unreq WK6 .unreq WK7 .endm .macro over_8888_8888_check_transparent numbytes, reg0, reg1, reg2, reg3 /* Since these colours a premultiplied by alpha, only 0 indicates transparent (any other colour with 0 in the alpha byte is luminous) */ teq WK\()\reg0, #0 .if \numbytes > 4 teqeq WK\()\reg1, #0 .if \numbytes > 8 teqeq WK\()\reg2, #0 teqeq WK\()\reg3, #0 .endif .endif .endm .macro over_8888_8888_prepare next mov WK\()\next, WK\()\next, lsr #24 .endm .macro over_8888_8888_1pixel src, dst, offset, next /* src = destination component multiplier */ rsb WK\()\src, WK\()\src, #255 /* Split even/odd bytes of dst into SCRATCH/dst */ uxtb16 SCRATCH, WK\()\dst uxtb16 WK\()\dst, WK\()\dst, ror #8 /* Multiply through, adding 0.5 to the upper byte of result for rounding */ mla SCRATCH, SCRATCH, WK\()\src, MASK mla WK\()\dst, WK\()\dst, WK\()\src, MASK /* Where we would have had a stall between the result of the first MLA and the shifter input, * reload the complete source pixel */ ldr WK\()\src, [SRC, #\offset] /* Multiply by 257/256 to approximate 256/255 */ uxtab16 SCRATCH, SCRATCH, SCRATCH, ror #8 /* In this stall, start processing the next pixel */ .if \offset < -4 mov WK\()\next, WK\()\next, lsr #24 .endif uxtab16 WK\()\dst, WK\()\dst, WK\()\dst, ror #8 /* Recombine even/odd bytes of multiplied destination */ mov SCRATCH, SCRATCH, ror #8 sel WK\()\dst, SCRATCH, WK\()\dst /* Saturated add of source to multiplied destination */ uqadd8 WK\()\dst, WK\()\dst, WK\()\src .endm .macro over_8888_8888_process_tail cond, numbytes, firstreg WK4 .req STRIDE_D WK5 .req STRIDE_S WK6 .req STRIDE_M WK7 .req ORIG_W over_8888_8888_check_transparent \numbytes, %(4+\firstreg), %(5+\firstreg), %(6+\firstreg), %(7+\firstreg) beq 10f over_8888_8888_prepare %(4+\firstreg) .set PROCESS_REG, \firstreg .set PROCESS_OFF, -\numbytes .rept \numbytes / 4 over_8888_8888_1pixel %(4+PROCESS_REG), %(0+PROCESS_REG), PROCESS_OFF, %(5+PROCESS_REG) .set PROCESS_REG, PROCESS_REG+1 .set PROCESS_OFF, PROCESS_OFF+4 .endr pixst , \numbytes, \firstreg, DST 10: .unreq WK4 .unreq WK5 .unreq WK6 .unreq WK7 .endm generate_composite_function \ pixman_composite_over_8888_8888_asm_armv6, 32, 0, 32 \ FLAG_DST_READWRITE | FLAG_BRANCH_OVER | FLAG_PROCESS_CORRUPTS_PSR | FLAG_PROCESS_DOES_STORE | FLAG_SPILL_LINE_VARS \ 2, /* prefetch distance */ \ over_8888_8888_init, \ nop_macro, /* newline */ \ nop_macro, /* cleanup */ \ over_8888_8888_process_head, \ over_8888_8888_process_tail /******************************************************************************/ /* Multiply each byte of a word by a byte. * Useful when there aren't any obvious ways to fill the stalls with other instructions. * word Register containing 4 bytes * byte Register containing byte multiplier (bits 8-31 must be 0) * tmp Scratch register * half Register containing the constant 0x00800080 * GE[3:0] bits must contain 0101 */ .macro mul_8888_8 word, byte, tmp, half /* Split even/odd bytes of word apart */ uxtb16 \tmp, \word uxtb16 \word, \word, ror #8 /* Multiply bytes together with rounding, then by 257/256 */ mla \tmp, \tmp, \byte, \half mla \word, \word, \byte, \half /* 1 stall follows */ uxtab16 \tmp, \tmp, \tmp, ror #8 /* 1 stall follows */ uxtab16 \word, \word, \word, ror #8 /* Recombine bytes */ mov \tmp, \tmp, ror #8 sel \word, \tmp, \word .endm /******************************************************************************/ .macro over_8888_n_8888_init /* Mask is constant */ ldr MASK, [sp, #ARGS_STACK_OFFSET+8] /* Hold loop invariant in STRIDE_M */ ldr STRIDE_M, =0x00800080 /* We only want the alpha bits of the constant mask */ mov MASK, MASK, lsr #24 /* Set GE[3:0] to 0101 so SEL instructions do what we want */ uadd8 SCRATCH, STRIDE_M, STRIDE_M line_saved_regs Y, STRIDE_D, STRIDE_S, ORIG_W .endm .macro over_8888_n_8888_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload WK4 .req Y WK5 .req STRIDE_D WK6 .req STRIDE_S WK7 .req ORIG_W pixld , \numbytes, %(4+(\firstreg%2)), SRC, \unaligned_src pixld , \numbytes, \firstreg, DST, 0 .unreq WK4 .unreq WK5 .unreq WK6 .unreq WK7 .endm .macro over_8888_n_8888_1pixel src, dst mul_8888_8 WK\()\src, MASK, SCRATCH, STRIDE_M sub WK7, WK6, WK\()\src, lsr #24 mul_8888_8 WK\()\dst, WK7, SCRATCH, STRIDE_M uqadd8 WK\()\dst, WK\()\dst, WK\()\src .endm .macro over_8888_n_8888_process_tail cond, numbytes, firstreg WK4 .req Y WK5 .req STRIDE_D WK6 .req STRIDE_S WK7 .req ORIG_W over_8888_8888_check_transparent \numbytes, %(4+(\firstreg%2)), %(5+(\firstreg%2)), %(6+\firstreg), %(7+\firstreg) beq 10f mov WK6, #255 .set PROCESS_REG, \firstreg .rept \numbytes / 4 .if \numbytes == 16 && PROCESS_REG == 2 /* We're using WK6 and WK7 as temporaries, so half way through * 4 pixels, reload the second two source pixels but this time * into WK4 and WK5 */ ldmdb SRC, {WK4, WK5} .endif over_8888_n_8888_1pixel %(4+(PROCESS_REG%2)), %(PROCESS_REG) .set PROCESS_REG, PROCESS_REG+1 .endr pixst , \numbytes, \firstreg, DST 10: .unreq WK4 .unreq WK5 .unreq WK6 .unreq WK7 .endm generate_composite_function \ pixman_composite_over_8888_n_8888_asm_armv6, 32, 0, 32 \ FLAG_DST_READWRITE | FLAG_BRANCH_OVER | FLAG_PROCESS_CORRUPTS_PSR | FLAG_PROCESS_DOES_STORE | FLAG_SPILL_LINE_VARS \ 2, /* prefetch distance */ \ over_8888_n_8888_init, \ nop_macro, /* newline */ \ nop_macro, /* cleanup */ \ over_8888_n_8888_process_head, \ over_8888_n_8888_process_tail /******************************************************************************/ .macro over_n_8_8888_init /* Source is constant, but splitting it into even/odd bytes is a loop invariant */ ldr SRC, [sp, #ARGS_STACK_OFFSET] /* Not enough registers to hold this constant, but we still use it here to set GE[3:0] */ ldr SCRATCH, =0x00800080 uxtb16 STRIDE_S, SRC uxtb16 SRC, SRC, ror #8 /* Set GE[3:0] to 0101 so SEL instructions do what we want */ uadd8 SCRATCH, SCRATCH, SCRATCH line_saved_regs Y, STRIDE_D, STRIDE_M, ORIG_W .endm .macro over_n_8_8888_newline ldr STRIDE_D, =0x00800080 b 1f .ltorg 1: .endm .macro over_n_8_8888_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload WK4 .req STRIDE_M pixld , \numbytes/4, 4, MASK, \unaligned_mask pixld , \numbytes, \firstreg, DST, 0 .unreq WK4 .endm .macro over_n_8_8888_1pixel src, dst uxtb Y, WK4, ror #\src*8 /* Trailing part of multiplication of source */ mla SCRATCH, STRIDE_S, Y, STRIDE_D mla Y, SRC, Y, STRIDE_D mov ORIG_W, #255 uxtab16 SCRATCH, SCRATCH, SCRATCH, ror #8 uxtab16 Y, Y, Y, ror #8 mov SCRATCH, SCRATCH, ror #8 sub ORIG_W, ORIG_W, Y, lsr #24 sel Y, SCRATCH, Y /* Then multiply the destination */ mul_8888_8 WK\()\dst, ORIG_W, SCRATCH, STRIDE_D uqadd8 WK\()\dst, WK\()\dst, Y .endm .macro over_n_8_8888_process_tail cond, numbytes, firstreg WK4 .req STRIDE_M teq WK4, #0 beq 10f .set PROCESS_REG, \firstreg .rept \numbytes / 4 over_n_8_8888_1pixel %(PROCESS_REG-\firstreg), %(PROCESS_REG) .set PROCESS_REG, PROCESS_REG+1 .endr pixst , \numbytes, \firstreg, DST 10: .unreq WK4 .endm generate_composite_function \ pixman_composite_over_n_8_8888_asm_armv6, 0, 8, 32 \ FLAG_DST_READWRITE | FLAG_BRANCH_OVER | FLAG_PROCESS_CORRUPTS_PSR | FLAG_PROCESS_DOES_STORE | FLAG_SPILL_LINE_VARS \ 2, /* prefetch distance */ \ over_n_8_8888_init, \ over_n_8_8888_newline, \ nop_macro, /* cleanup */ \ over_n_8_8888_process_head, \ over_n_8_8888_process_tail /******************************************************************************/ .macro over_reverse_n_8888_init ldr SRC, [sp, #ARGS_STACK_OFFSET] ldr MASK, =0x00800080 /* Split source pixel into RB/AG parts */ uxtb16 STRIDE_S, SRC uxtb16 STRIDE_M, SRC, ror #8 /* Set GE[3:0] to 0101 so SEL instructions do what we want */ uadd8 SCRATCH, MASK, MASK line_saved_regs STRIDE_D, ORIG_W .endm .macro over_reverse_n_8888_newline mov STRIDE_D, #0xFF .endm .macro over_reverse_n_8888_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload pixld , \numbytes, \firstreg, DST, 0 .endm .macro over_reverse_n_8888_1pixel d, is_only teq WK\()\d, #0 beq 8f /* replace with source */ bics ORIG_W, STRIDE_D, WK\()\d, lsr #24 .if \is_only == 1 beq 49f /* skip store */ .else beq 9f /* write same value back */ .endif mla SCRATCH, STRIDE_S, ORIG_W, MASK /* red/blue */ mla ORIG_W, STRIDE_M, ORIG_W, MASK /* alpha/green */ uxtab16 SCRATCH, SCRATCH, SCRATCH, ror #8 uxtab16 ORIG_W, ORIG_W, ORIG_W, ror #8 mov SCRATCH, SCRATCH, ror #8 sel ORIG_W, SCRATCH, ORIG_W uqadd8 WK\()\d, WK\()\d, ORIG_W b 9f 8: mov WK\()\d, SRC 9: .endm .macro over_reverse_n_8888_tail numbytes, reg1, reg2, reg3, reg4 .if \numbytes == 4 over_reverse_n_8888_1pixel \reg1, 1 .else and SCRATCH, WK\()\reg1, WK\()\reg2 .if \numbytes == 16 and SCRATCH, SCRATCH, WK\()\reg3 and SCRATCH, SCRATCH, WK\()\reg4 .endif mvns SCRATCH, SCRATCH, asr #24 beq 49f /* skip store if all opaque */ over_reverse_n_8888_1pixel \reg1, 0 over_reverse_n_8888_1pixel \reg2, 0 .if \numbytes == 16 over_reverse_n_8888_1pixel \reg3, 0 over_reverse_n_8888_1pixel \reg4, 0 .endif .endif pixst , \numbytes, \reg1, DST 49: .endm .macro over_reverse_n_8888_process_tail cond, numbytes, firstreg over_reverse_n_8888_tail \numbytes, \firstreg, %(\firstreg+1), %(\firstreg+2), %(\firstreg+3) .endm generate_composite_function \ pixman_composite_over_reverse_n_8888_asm_armv6, 0, 0, 32 \ FLAG_DST_READWRITE | FLAG_BRANCH_OVER | FLAG_PROCESS_CORRUPTS_PSR | FLAG_PROCESS_DOES_STORE | FLAG_SPILL_LINE_VARS | FLAG_PROCESS_CORRUPTS_SCRATCH, \ 3, /* prefetch distance */ \ over_reverse_n_8888_init, \ over_reverse_n_8888_newline, \ nop_macro, /* cleanup */ \ over_reverse_n_8888_process_head, \ over_reverse_n_8888_process_tail /******************************************************************************/ .macro over_white_8888_8888_ca_init HALF .req SRC TMP0 .req STRIDE_D TMP1 .req STRIDE_S TMP2 .req STRIDE_M TMP3 .req ORIG_W WK4 .req SCRATCH line_saved_regs STRIDE_D, STRIDE_M, ORIG_W ldr SCRATCH, =0x800080 mov HALF, #0x80 /* Set GE[3:0] to 0101 so SEL instructions do what we want */ uadd8 SCRATCH, SCRATCH, SCRATCH .set DST_PRELOAD_BIAS, 8 .endm .macro over_white_8888_8888_ca_cleanup .set DST_PRELOAD_BIAS, 0 .unreq HALF .unreq TMP0 .unreq TMP1 .unreq TMP2 .unreq TMP3 .unreq WK4 .endm .macro over_white_8888_8888_ca_combine m, d uxtb16 TMP1, TMP0 /* rb_notmask */ uxtb16 TMP2, \d /* rb_dest; 1 stall follows */ smlatt TMP3, TMP2, TMP1, HALF /* red */ smlabb TMP2, TMP2, TMP1, HALF /* blue */ uxtb16 TMP0, TMP0, ror #8 /* ag_notmask */ uxtb16 TMP1, \d, ror #8 /* ag_dest; 1 stall follows */ smlatt \d, TMP1, TMP0, HALF /* alpha */ smlabb TMP1, TMP1, TMP0, HALF /* green */ pkhbt TMP0, TMP2, TMP3, lsl #16 /* rb; 1 stall follows */ pkhbt TMP1, TMP1, \d, lsl #16 /* ag */ uxtab16 TMP0, TMP0, TMP0, ror #8 uxtab16 TMP1, TMP1, TMP1, ror #8 mov TMP0, TMP0, ror #8 sel \d, TMP0, TMP1 uqadd8 \d, \d, \m /* d is a late result */ .endm .macro over_white_8888_8888_ca_1pixel_head pixld , 4, 1, MASK, 0 pixld , 4, 3, DST, 0 .endm .macro over_white_8888_8888_ca_1pixel_tail mvn TMP0, WK1 teq WK1, WK1, asr #32 bne 1f bcc 3f mov WK3, WK1 b 2f 1: over_white_8888_8888_ca_combine WK1, WK3 2: pixst , 4, 3, DST 3: .endm .macro over_white_8888_8888_ca_2pixels_head pixld , 8, 1, MASK, 0 .endm .macro over_white_8888_8888_ca_2pixels_tail pixld , 8, 3, DST mvn TMP0, WK1 teq WK1, WK1, asr #32 bne 1f movcs WK3, WK1 bcs 2f teq WK2, #0 beq 5f b 2f 1: over_white_8888_8888_ca_combine WK1, WK3 2: mvn TMP0, WK2 teq WK2, WK2, asr #32 bne 3f movcs WK4, WK2 b 4f 3: over_white_8888_8888_ca_combine WK2, WK4 4: pixst , 8, 3, DST 5: .endm .macro over_white_8888_8888_ca_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload .if \numbytes == 4 over_white_8888_8888_ca_1pixel_head .else .if \numbytes == 16 over_white_8888_8888_ca_2pixels_head over_white_8888_8888_ca_2pixels_tail .endif over_white_8888_8888_ca_2pixels_head .endif .endm .macro over_white_8888_8888_ca_process_tail cond, numbytes, firstreg .if \numbytes == 4 over_white_8888_8888_ca_1pixel_tail .else over_white_8888_8888_ca_2pixels_tail .endif .endm generate_composite_function \ pixman_composite_over_white_8888_8888_ca_asm_armv6, 0, 32, 32 \ FLAG_DST_READWRITE | FLAG_BRANCH_OVER | FLAG_PROCESS_CORRUPTS_PSR | FLAG_PROCESS_DOES_STORE | FLAG_SPILL_LINE_VARS | FLAG_PROCESS_CORRUPTS_SCRATCH \ 2, /* prefetch distance */ \ over_white_8888_8888_ca_init, \ nop_macro, /* newline */ \ over_white_8888_8888_ca_cleanup, \ over_white_8888_8888_ca_process_head, \ over_white_8888_8888_ca_process_tail .macro over_n_8888_8888_ca_init /* Set up constants. RB_SRC and AG_SRC are in registers; * RB_FLDS, A_SRC, and the two HALF values need to go on the * stack (and the ful SRC value is already there) */ ldr SCRATCH, [sp, #ARGS_STACK_OFFSET] mov WK0, #0x00FF0000 orr WK0, WK0, #0xFF /* RB_FLDS (0x00FF00FF) */ mov WK1, #0x80 /* HALF default value */ mov WK2, SCRATCH, lsr #24 /* A_SRC */ orr WK3, WK1, WK1, lsl #16 /* HALF alternate value (0x00800080) */ push {WK0-WK3} .set ARGS_STACK_OFFSET, ARGS_STACK_OFFSET+16 uxtb16 SRC, SCRATCH uxtb16 STRIDE_S, SCRATCH, ror #8 /* Set GE[3:0] to 0101 so SEL instructions do what we want */ uadd8 SCRATCH, WK3, WK3 .unreq WK0 .unreq WK1 .unreq WK2 .unreq WK3 WK0 .req Y WK1 .req STRIDE_D RB_SRC .req SRC AG_SRC .req STRIDE_S WK2 .req STRIDE_M RB_FLDS .req r8 /* the reloaded constants have to be at consecutive registers starting at an even one */ A_SRC .req r8 HALF .req r9 WK3 .req r10 WK4 .req r11 WK5 .req SCRATCH WK6 .req ORIG_W line_saved_regs Y, STRIDE_D, STRIDE_M, ORIG_W .endm .macro over_n_8888_8888_ca_cleanup add sp, sp, #16 .set ARGS_STACK_OFFSET, ARGS_STACK_OFFSET-16 .unreq WK0 .unreq WK1 .unreq RB_SRC .unreq AG_SRC .unreq WK2 .unreq RB_FLDS .unreq A_SRC .unreq HALF .unreq WK3 .unreq WK4 .unreq WK5 .unreq WK6 WK0 .req r8 WK1 .req r9 WK2 .req r10 WK3 .req r11 .endm .macro over_n_8888_8888_ca_1pixel_head pixld , 4, 6, MASK, 0 pixld , 4, 0, DST, 0 .endm .macro over_n_8888_8888_ca_1pixel_tail ldrd A_SRC, HALF, [sp, #LOCALS_STACK_OFFSET+8] uxtb16 WK1, WK6 /* rb_mask (first step of hard case placed in what would otherwise be a stall) */ teq WK6, WK6, asr #32 /* Zc if transparent, ZC if opaque */ bne 20f bcc 40f /* Mask is fully opaque (all channels) */ ldr WK6, [sp, #ARGS_STACK_OFFSET] /* get SRC back */ eors A_SRC, A_SRC, #0xFF bne 10f /* Source is also opaque - same as src_8888_8888 */ mov WK0, WK6 b 30f 10: /* Same as over_8888_8888 */ mul_8888_8 WK0, A_SRC, WK5, HALF uqadd8 WK0, WK0, WK6 b 30f 20: /* No simplifications possible - do it the hard way */ uxtb16 WK2, WK6, ror #8 /* ag_mask */ mla WK3, WK1, A_SRC, HALF /* rb_mul; 2 cycles */ mla WK4, WK2, A_SRC, HALF /* ag_mul; 2 cycles */ ldrd RB_FLDS, HALF, [sp, #LOCALS_STACK_OFFSET] uxtb16 WK5, WK0 /* rb_dest */ uxtab16 WK3, WK3, WK3, ror #8 uxtb16 WK6, WK0, ror #8 /* ag_dest */ uxtab16 WK4, WK4, WK4, ror #8 smlatt WK0, RB_SRC, WK1, HALF /* red1 */ smlabb WK1, RB_SRC, WK1, HALF /* blue1 */ bic WK3, RB_FLDS, WK3, lsr #8 bic WK4, RB_FLDS, WK4, lsr #8 pkhbt WK1, WK1, WK0, lsl #16 /* rb1 */ smlatt WK0, WK5, WK3, HALF /* red2 */ smlabb WK3, WK5, WK3, HALF /* blue2 */ uxtab16 WK1, WK1, WK1, ror #8 smlatt WK5, AG_SRC, WK2, HALF /* alpha1 */ pkhbt WK3, WK3, WK0, lsl #16 /* rb2 */ smlabb WK0, AG_SRC, WK2, HALF /* green1 */ smlatt WK2, WK6, WK4, HALF /* alpha2 */ smlabb WK4, WK6, WK4, HALF /* green2 */ pkhbt WK0, WK0, WK5, lsl #16 /* ag1 */ uxtab16 WK3, WK3, WK3, ror #8 pkhbt WK4, WK4, WK2, lsl #16 /* ag2 */ uxtab16 WK0, WK0, WK0, ror #8 uxtab16 WK4, WK4, WK4, ror #8 mov WK1, WK1, ror #8 mov WK3, WK3, ror #8 sel WK2, WK1, WK0 /* recombine source*mask */ sel WK1, WK3, WK4 /* recombine dest*(1-source_alpha*mask) */ uqadd8 WK0, WK1, WK2 /* followed by 1 stall */ 30: /* The destination buffer is already in the L1 cache, so * there's little point in amalgamating writes */ pixst , 4, 0, DST 40: .endm .macro over_n_8888_8888_ca_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload .rept (\numbytes / 4) - 1 over_n_8888_8888_ca_1pixel_head over_n_8888_8888_ca_1pixel_tail .endr over_n_8888_8888_ca_1pixel_head .endm .macro over_n_8888_8888_ca_process_tail cond, numbytes, firstreg over_n_8888_8888_ca_1pixel_tail .endm pixman_asm_function pixman_composite_over_n_8888_8888_ca_asm_armv6 ldr ip, [sp] cmp ip, #-1 beq pixman_composite_over_white_8888_8888_ca_asm_armv6 /* else drop through... */ pixman_end_asm_function generate_composite_function \ pixman_composite_over_n_8888_8888_ca_asm_armv6_helper, 0, 32, 32 \ FLAG_DST_READWRITE | FLAG_BRANCH_OVER | FLAG_PROCESS_CORRUPTS_PSR | FLAG_PROCESS_DOES_STORE | FLAG_SPILL_LINE_VARS | FLAG_PROCESS_CORRUPTS_SCRATCH | FLAG_PROCESS_CORRUPTS_WK0 \ 2, /* prefetch distance */ \ over_n_8888_8888_ca_init, \ nop_macro, /* newline */ \ over_n_8888_8888_ca_cleanup, \ over_n_8888_8888_ca_process_head, \ over_n_8888_8888_ca_process_tail /******************************************************************************/ .macro in_reverse_8888_8888_init /* Hold loop invariant in MASK */ ldr MASK, =0x00800080 /* Set GE[3:0] to 0101 so SEL instructions do what we want */ uadd8 SCRATCH, MASK, MASK /* Offset the source pointer: we only need the alpha bytes */ add SRC, SRC, #3 line_saved_regs ORIG_W .endm .macro in_reverse_8888_8888_head numbytes, reg1, reg2, reg3 ldrb ORIG_W, [SRC], #4 .if \numbytes >= 8 ldrb WK\()\reg1, [SRC], #4 .if \numbytes == 16 ldrb WK\()\reg2, [SRC], #4 ldrb WK\()\reg3, [SRC], #4 .endif .endif add DST, DST, #\numbytes .endm .macro in_reverse_8888_8888_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload in_reverse_8888_8888_head \numbytes, \firstreg, %(\firstreg+1), %(\firstreg+2) .endm .macro in_reverse_8888_8888_1pixel s, d, offset, is_only .if \is_only != 1 movs \s, ORIG_W .if \offset != 0 ldrb ORIG_W, [SRC, #\offset] .endif beq 1f teq STRIDE_M, #0xFF beq 2f .endif uxtb16 SCRATCH, \d /* rb_dest */ uxtb16 \d, \d, ror #8 /* ag_dest */ mla SCRATCH, SCRATCH, \s, MASK mla \d, \d, \s, MASK uxtab16 SCRATCH, SCRATCH, SCRATCH, ror #8 uxtab16 \d, \d, \d, ror #8 mov SCRATCH, SCRATCH, ror #8 sel \d, SCRATCH, \d b 2f .if \offset == 0 48: /* Last mov d,#0 of the set - used as part of shortcut for * source values all 0 */ .endif 1: mov \d, #0 2: .endm .macro in_reverse_8888_8888_tail numbytes, reg1, reg2, reg3, reg4 .if \numbytes == 4 teq ORIG_W, ORIG_W, asr #32 ldrne WK\()\reg1, [DST, #-4] .elseif \numbytes == 8 teq ORIG_W, WK\()\reg1 teqeq ORIG_W, ORIG_W, asr #32 /* all 0 or all -1? */ ldmdbne DST, {WK\()\reg1-WK\()\reg2} .else teq ORIG_W, WK\()\reg1 teqeq ORIG_W, WK\()\reg2 teqeq ORIG_W, WK\()\reg3 teqeq ORIG_W, ORIG_W, asr #32 /* all 0 or all -1? */ ldmdbne DST, {WK\()\reg1-WK\()\reg4} .endif cmnne DST, #0 /* clear C if NE */ bcs 49f /* no writes to dest if source all -1 */ beq 48f /* set dest to all 0 if source all 0 */ .if \numbytes == 4 in_reverse_8888_8888_1pixel ORIG_W, WK\()\reg1, 0, 1 str WK\()\reg1, [DST, #-4] .elseif \numbytes == 8 in_reverse_8888_8888_1pixel STRIDE_M, WK\()\reg1, -4, 0 in_reverse_8888_8888_1pixel STRIDE_M, WK\()\reg2, 0, 0 stmdb DST, {WK\()\reg1-WK\()\reg2} .else in_reverse_8888_8888_1pixel STRIDE_M, WK\()\reg1, -12, 0 in_reverse_8888_8888_1pixel STRIDE_M, WK\()\reg2, -8, 0 in_reverse_8888_8888_1pixel STRIDE_M, WK\()\reg3, -4, 0 in_reverse_8888_8888_1pixel STRIDE_M, WK\()\reg4, 0, 0 stmdb DST, {WK\()\reg1-WK\()\reg4} .endif 49: .endm .macro in_reverse_8888_8888_process_tail cond, numbytes, firstreg in_reverse_8888_8888_tail \numbytes, \firstreg, %(\firstreg+1), %(\firstreg+2), %(\firstreg+3) .endm generate_composite_function \ pixman_composite_in_reverse_8888_8888_asm_armv6, 32, 0, 32 \ FLAG_DST_READWRITE | FLAG_BRANCH_OVER | FLAG_PROCESS_CORRUPTS_PSR | FLAG_PROCESS_DOES_STORE | FLAG_SPILL_LINE_VARS | FLAG_PROCESS_CORRUPTS_SCRATCH | FLAG_NO_PRELOAD_DST \ 2, /* prefetch distance */ \ in_reverse_8888_8888_init, \ nop_macro, /* newline */ \ nop_macro, /* cleanup */ \ in_reverse_8888_8888_process_head, \ in_reverse_8888_8888_process_tail /******************************************************************************/ .macro over_n_8888_init ldr SRC, [sp, #ARGS_STACK_OFFSET] /* Hold loop invariant in MASK */ ldr MASK, =0x00800080 /* Hold multiplier for destination in STRIDE_M */ mov STRIDE_M, #255 sub STRIDE_M, STRIDE_M, SRC, lsr #24 /* Set GE[3:0] to 0101 so SEL instructions do what we want */ uadd8 SCRATCH, MASK, MASK .endm .macro over_n_8888_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload pixld , \numbytes, \firstreg, DST, 0 .endm .macro over_n_8888_1pixel dst mul_8888_8 WK\()\dst, STRIDE_M, SCRATCH, MASK uqadd8 WK\()\dst, WK\()\dst, SRC .endm .macro over_n_8888_process_tail cond, numbytes, firstreg .set PROCESS_REG, \firstreg .rept \numbytes / 4 over_n_8888_1pixel %(PROCESS_REG) .set PROCESS_REG, PROCESS_REG+1 .endr pixst , \numbytes, \firstreg, DST .endm generate_composite_function \ pixman_composite_over_n_8888_asm_armv6, 0, 0, 32 \ FLAG_DST_READWRITE | FLAG_BRANCH_OVER | FLAG_PROCESS_DOES_STORE \ 2, /* prefetch distance */ \ over_n_8888_init, \ nop_macro, /* newline */ \ nop_macro, /* cleanup */ \ over_n_8888_process_head, \ over_n_8888_process_tail /******************************************************************************/ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-arm-simd-asm.h0000664000175000017500000010363214712446423020351 0ustar00mattst88mattst88/* * Copyright Âİ 2012 Raspberry Pi Foundation * Copyright Âİ 2012 RISC OS Open Ltd * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of the copyright holders not be used in * advertising or publicity pertaining to distribution of the software without * specific, written prior permission. The copyright holders make no * representations about the suitability of this software for any purpose. It * is provided "as is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS * SOFTWARE. * * Author: Ben Avison (bavison@riscosopen.org) * */ /* * Because the alignment of pixel data to cachelines, and even the number of * cachelines per row can vary from row to row, and because of the need to * preload each scanline once and only once, this prefetch strategy treats * each row of pixels independently. When a pixel row is long enough, there * are three distinct phases of prefetch: * * an inner loop section, where each time a cacheline of data is * processed, another cacheline is preloaded (the exact distance ahead is * determined empirically using profiling results from lowlevel-blt-bench) * * a leading section, where enough cachelines are preloaded to ensure no * cachelines escape being preloaded when the inner loop starts * * a trailing section, where a limited number (0 or more) of cachelines * are preloaded to deal with data (if any) that hangs off the end of the * last iteration of the inner loop, plus any trailing bytes that were not * enough to make up one whole iteration of the inner loop * * There are (in general) three distinct code paths, selected between * depending upon how long the pixel row is. If it is long enough that there * is at least one iteration of the inner loop (as described above) then * this is described as the "wide" case. If it is shorter than that, but * there are still enough bytes output that there is at least one 16-byte- * long, 16-byte-aligned write to the destination (the optimum type of * write), then this is the "medium" case. If it is not even this long, then * this is the "narrow" case, and there is no attempt to align writes to * 16-byte boundaries. In the "medium" and "narrow" cases, all the * cachelines containing data from the pixel row are prefetched up-front. */ /* * Determine whether we put the arguments on the stack for debugging. */ #undef DEBUG_PARAMS /* * Bit flags for 'generate_composite_function' macro which are used * to tune generated functions behavior. */ .set FLAG_DST_WRITEONLY, 0 .set FLAG_DST_READWRITE, 1 .set FLAG_COND_EXEC, 0 .set FLAG_BRANCH_OVER, 2 .set FLAG_PROCESS_PRESERVES_PSR, 0 .set FLAG_PROCESS_CORRUPTS_PSR, 4 .set FLAG_PROCESS_DOESNT_STORE, 0 .set FLAG_PROCESS_DOES_STORE, 8 /* usually because it needs to conditionally skip it */ .set FLAG_NO_SPILL_LINE_VARS, 0 .set FLAG_SPILL_LINE_VARS_WIDE, 16 .set FLAG_SPILL_LINE_VARS_NON_WIDE, 32 .set FLAG_SPILL_LINE_VARS, 48 .set FLAG_PROCESS_CORRUPTS_SCRATCH, 0 .set FLAG_PROCESS_PRESERVES_SCRATCH, 64 .set FLAG_PROCESS_PRESERVES_WK0, 0 .set FLAG_PROCESS_CORRUPTS_WK0, 128 /* if possible, use the specified register(s) instead so WK0 can hold number of leading pixels */ .set FLAG_PRELOAD_DST, 0 .set FLAG_NO_PRELOAD_DST, 256 /* * Number of bytes by which to adjust preload offset of destination * buffer (allows preload instruction to be moved before the load(s)) */ .set DST_PRELOAD_BIAS, 0 /* * Offset into stack where mask and source pointer/stride can be accessed. */ #ifdef DEBUG_PARAMS .set ARGS_STACK_OFFSET, (9*4+9*4) #else .set ARGS_STACK_OFFSET, (9*4) #endif /* * Offset into stack where space allocated during init macro can be accessed. */ .set LOCALS_STACK_OFFSET, 0 /* * Constants for selecting preferable prefetch type. */ .set PREFETCH_TYPE_NONE, 0 .set PREFETCH_TYPE_STANDARD, 1 /* * Definitions of macros for load/store of pixel data. */ .macro pixldst op, cond=al, numbytes, reg0, reg1, reg2, reg3, base, unaligned=0 .if \numbytes == 16 .if \unaligned == 1 \op\()r\()\cond WK\()\reg0, [\base], #4 \op\()r\()\cond WK\()\reg1, [\base], #4 \op\()r\()\cond WK\()\reg2, [\base], #4 \op\()r\()\cond WK\()\reg3, [\base], #4 .else \op\()mia\()\cond \base!, {WK\()\reg0,WK\()\reg1,WK\()\reg2,WK\()\reg3} .endif .elseif \numbytes == 8 .if \unaligned == 1 \op\()r\()\cond WK\()\reg0, [\base], #4 \op\()r\()\cond WK\()\reg1, [\base], #4 .else \op\()mia\()\cond \base!, {WK\()\reg0,WK\()\reg1} .endif .elseif \numbytes == 4 \op\()r\()\cond WK\()\reg0, [\base], #4 .elseif \numbytes == 2 \op\()rh\()\cond WK\()\reg0, [\base], #2 .elseif \numbytes == 1 \op\()rb\()\cond WK\()\reg0, [\base], #1 .else .error "unsupported size: \numbytes" .endif .endm .macro pixst_baseupdated cond, numbytes, reg0, reg1, reg2, reg3, base .if \numbytes == 16 stm\()\cond\()db \base, {WK\()\reg0,WK\()\reg1,WK\()\reg2,WK\()\reg3} .elseif \numbytes == 8 stmdb\()\cond \base, {WK\()\reg0,WK\()\reg1} .elseif \numbytes == 4 str\()\cond WK\()\reg0, [\base, #-4] .elseif \numbytes == 2 strh\()\cond WK\()\reg0, [\base, #-2] .elseif \numbytes == 1 strb\()\cond WK\()\reg0, [\base, #-1] .else .error "unsupported size: \numbytes" .endif .endm .macro pixld cond, numbytes, firstreg, base, unaligned pixldst ld, \cond, \numbytes, %(\firstreg+0), %(\firstreg+1), %(\firstreg+2), %(\firstreg+3), \base, \unaligned .endm .macro pixst cond, numbytes, firstreg, base .if (flags) & FLAG_DST_READWRITE pixst_baseupdated \cond, \numbytes, %(\firstreg+0), %(\firstreg+1), %(\firstreg+2), %(\firstreg+3), \base .else pixldst st, \cond, \numbytes, %(\firstreg+0), %(\firstreg+1), %(\firstreg+2), %(\firstreg+3), \base .endif .endm .macro PF a, x:vararg .if (PREFETCH_TYPE_CURRENT == PREFETCH_TYPE_STANDARD) \a \x .endif .endm .macro preload_leading_step1 bpp, ptr, base /* If the destination is already 16-byte aligned, then we need to preload * between 0 and prefetch_distance (inclusive) cache lines ahead so there * are no gaps when the inner loop starts. */ .if \bpp > 0 PF bic, \ptr, \base, #31 .set OFFSET, 0 .rept prefetch_distance+1 PF pld, [\ptr, #OFFSET] .set OFFSET, OFFSET+32 .endr .endif .endm .macro preload_leading_step2 bpp, bpp_shift, ptr, base /* However, if the destination is not 16-byte aligned, we may need to * preload more cache lines than that. The question we need to ask is: * are the bytes corresponding to the leading pixels more than the amount * by which the source pointer will be rounded down for preloading, and if * so, by how many cache lines? Effectively, we want to calculate * leading_bytes = ((-dst)&15)*src_bpp/dst_bpp * inner_loop_offset = (src+leading_bytes)&31 * extra_needed = leading_bytes - inner_loop_offset * and test if extra_needed is <= 0, <= 32, or > 32 (where > 32 is only * possible when there are 4 src bytes for every 1 dst byte). */ .if \bpp > 0 .ifc \base,DST /* The test can be simplified further when preloading the destination */ PF tst, \base, #16 PF beq, 61f .else .if \bpp/dst_w_bpp == 4 PF add, SCRATCH, \base, WK0, lsl #\bpp_shift-dst_bpp_shift PF and, SCRATCH, SCRATCH, #31 PF rsb, SCRATCH, SCRATCH, WK0, lsl #\bpp_shift-dst_bpp_shift PF sub, SCRATCH, SCRATCH, #1 /* so now ranges are -16..-1 / 0..31 / 32..63 */ PF movs, SCRATCH, SCRATCH, lsl #32-6 /* so this sets NC / nc / Nc */ PF bcs, 61f PF bpl, 60f PF pld, [ptr, #32*(prefetch_distance+2)] .else PF mov, SCRATCH, \base, lsl #32-5 PF add, SCRATCH, SCRATCH, WK0, lsl #32-5+\bpp_shift-dst_bpp_shift PF rsbs, SCRATCH, SCRATCH, WK0, lsl #32-5+\bpp_shift-dst_bpp_shift PF bls, 61f .endif .endif 60: PF pld, [\ptr, #32*(prefetch_distance+1)] 61: .endif .endm #define IS_END_OF_GROUP(INDEX,SIZE) ((SIZE) < 2 || ((INDEX) & ~((INDEX)+1)) & ((SIZE)/2)) .macro preload_middle bpp, base, scratch_holds_offset .if \bpp > 0 /* prefetch distance = 256/bpp, stm distance = 128/dst_w_bpp */ .if IS_END_OF_GROUP(SUBBLOCK,256/128*dst_w_bpp/\bpp) .if \scratch_holds_offset PF pld, [\base, SCRATCH] .else PF bic, SCRATCH, \base, #31 PF pld, [SCRATCH, #32*prefetch_distance] .endif .endif .endif .endm .macro preload_trailing bpp, bpp_shift, base .if \bpp > 0 .if \bpp*pix_per_block > 256 /* Calculations are more complex if more than one fetch per block */ PF and, WK1, \base, #31 PF add, WK1, WK1, WK0, lsl #\bpp_shift PF add, WK1, WK1, #32*(\bpp*pix_per_block/256-1)*(prefetch_distance+1) PF bic, SCRATCH, \base, #31 80: PF pld, [SCRATCH, #32*(prefetch_distance+1)] PF add, SCRATCH, SCRATCH, #32 PF subs, WK1, WK1, #32 PF bhi, 80b .else /* If exactly one fetch per block, then we need either 0, 1 or 2 extra preloads */ PF mov, SCRATCH, \base, lsl #32-5 PF adds, SCRATCH, SCRATCH, X, lsl #32-5+\bpp_shift PF adcseq, SCRATCH, SCRATCH, #0 /* The instruction above has two effects: ensures Z is only * set if C was clear (so Z indicates that both shifted quantities * were 0), and clears C if Z was set (so C indicates that the sum * of the shifted quantities was greater and not equal to 32) */ PF beq, 82f PF bic, SCRATCH, \base, #31 PF bcc, 81f PF pld, [SCRATCH, #32*(prefetch_distance+2)] 81: PF pld, [SCRATCH, #32*(prefetch_distance+1)] 82: .endif .endif .endm .macro preload_line narrow_case, bpp, bpp_shift, base /* "narrow_case" - just means that the macro was invoked from the "narrow" * code path rather than the "medium" one - because in the narrow case, * the row of pixels is known to output no more than 30 bytes, then * (assuming the source pixels are no wider than the the destination * pixels) they cannot possibly straddle more than 2 32-byte cachelines, * meaning there's no need for a loop. * "bpp" - number of bits per pixel in the channel (source, mask or * destination) that's being preloaded, or 0 if this channel is not used * for reading * "bpp_shift" - log2 of ("bpp"/8) (except if "bpp"=0 of course) * "base" - base address register of channel to preload (SRC, MASK or DST) */ .if \bpp > 0 .if \narrow_case && (\bpp <= dst_w_bpp) /* In these cases, each line for each channel is in either 1 or 2 cache lines */ PF bic, WK0, \base, #31 PF pld, [WK0] PF add, WK1, \base, X, LSL #\bpp_shift PF sub, WK1, WK1, #1 PF bic, WK1, WK1, #31 PF cmp, WK1, WK0 PF beq, 90f PF pld, [WK1] 90: .else PF bic, WK0, \base, #31 PF pld, [WK0] PF add, WK1, \base, X, lsl #\bpp_shift PF sub, WK1, WK1, #1 PF bic, WK1, WK1, #31 PF cmp, WK1, WK0 PF beq, 92f 91: PF add, WK0, WK0, #32 PF cmp, WK0, WK1 PF pld, [WK0] PF bne, 91b 92: .endif .endif .endm .macro conditional_process1_helper cond, process_head, process_tail, numbytes, firstreg, unaligned_src, unaligned_mask, decrementx \process_head \cond, \numbytes, \firstreg, \unaligned_src, \unaligned_mask, 0 .if \decrementx sub\()\cond X, X, #8*\numbytes/dst_w_bpp .endif \process_tail \cond, \numbytes, \firstreg .if !((flags) & FLAG_PROCESS_DOES_STORE) pixst \cond, \numbytes, \firstreg, DST .endif .endm .macro conditional_process1 cond, process_head, process_tail, numbytes, firstreg, unaligned_src, unaligned_mask, decrementx .if (flags) & FLAG_BRANCH_OVER .ifc \cond,mi bpl 100f .endif .ifc \cond,cs bcc 100f .endif .ifc \cond,ne beq 100f .endif conditional_process1_helper , \process_head, \process_tail, \numbytes, \firstreg, \unaligned_src, \unaligned_mask, \decrementx 100: .else conditional_process1_helper \cond, \process_head, \process_tail, \numbytes, \firstreg, \unaligned_src, \unaligned_mask, \decrementx .endif .endm .macro conditional_process2 test, cond1, cond2, process_head, process_tail, numbytes1, numbytes2, firstreg1, firstreg2, unaligned_src, unaligned_mask, decrementx .if (flags) & (FLAG_DST_READWRITE | FLAG_BRANCH_OVER | FLAG_PROCESS_CORRUPTS_PSR | FLAG_PROCESS_DOES_STORE) /* Can't interleave reads and writes */ \test conditional_process1 \cond1, \process_head, \process_tail, \numbytes1, \firstreg1, \unaligned_src, \unaligned_mask, \decrementx .if (flags) & FLAG_PROCESS_CORRUPTS_PSR \test .endif conditional_process1 \cond2, \process_head, \process_tail, \numbytes2, \firstreg2, \unaligned_src, \unaligned_mask, \decrementx .else /* Can interleave reads and writes for better scheduling */ \test \process_head \cond1, \numbytes1, \firstreg1, \unaligned_src, \unaligned_mask, 0 \process_head \cond2, \numbytes2, \firstreg2, \unaligned_src, \unaligned_mask, 0 .if \decrementx sub\()\cond1 X, X, #8*\numbytes1/dst_w_bpp sub\()\cond2 X, X, #8*\numbytes2/dst_w_bpp .endif \process_tail \cond1, \numbytes1, \firstreg1 \process_tail \cond2, \numbytes2, \firstreg2 pixst \cond1, \numbytes1, \firstreg1, DST pixst \cond2, \numbytes2, \firstreg2, DST .endif .endm .macro test_bits_1_0_ptr .if (flags) & FLAG_PROCESS_CORRUPTS_WK0 movs SCRATCH, X, lsl #32-1 /* C,N = bits 1,0 of DST */ .else movs SCRATCH, WK0, lsl #32-1 /* C,N = bits 1,0 of DST */ .endif .endm .macro test_bits_3_2_ptr .if (flags) & FLAG_PROCESS_CORRUPTS_WK0 movs SCRATCH, X, lsl #32-3 /* C,N = bits 3, 2 of DST */ .else movs SCRATCH, WK0, lsl #32-3 /* C,N = bits 3, 2 of DST */ .endif .endm .macro leading_15bytes process_head, process_tail /* On entry, WK0 bits 0-3 = number of bytes until destination is 16-byte aligned */ .set DECREMENT_X, 1 .if (flags) & FLAG_PROCESS_CORRUPTS_WK0 .set DECREMENT_X, 0 sub X, X, WK0, lsr #dst_bpp_shift str X, [sp, #LINE_SAVED_REG_COUNT*4] mov X, WK0 .endif /* Use unaligned loads in all cases for simplicity */ .if dst_w_bpp == 8 conditional_process2 test_bits_1_0_ptr, mi, cs, \process_head, \process_tail, 1, 2, 1, 2, 1, 1, DECREMENT_X .elseif dst_w_bpp == 16 test_bits_1_0_ptr conditional_process1 cs, \process_head, \process_tail, 2, 2, 1, 1, DECREMENT_X .endif conditional_process2 test_bits_3_2_ptr, mi, cs, \process_head, \process_tail, 4, 8, 1, 2, 1, 1, DECREMENT_X .if (flags) & FLAG_PROCESS_CORRUPTS_WK0 ldr X, [sp, #LINE_SAVED_REG_COUNT*4] .endif .endm .macro test_bits_3_2_pix movs SCRATCH, X, lsl #dst_bpp_shift+32-3 .endm .macro test_bits_1_0_pix .if dst_w_bpp == 8 movs SCRATCH, X, lsl #dst_bpp_shift+32-1 .else movs SCRATCH, X, lsr #1 .endif .endm .macro trailing_15bytes process_head, process_tail, unaligned_src, unaligned_mask conditional_process2 test_bits_3_2_pix, cs, mi, \process_head, \process_tail, 8, 4, 0, 2, \unaligned_src, \unaligned_mask, 0 .if dst_w_bpp == 16 test_bits_1_0_pix conditional_process1 cs, \process_head, \process_tail, 2, 0, \unaligned_src, \unaligned_mask, 0 .elseif dst_w_bpp == 8 conditional_process2 test_bits_1_0_pix, cs, mi, \process_head, \process_tail, 2, 1, 0, 1, \unaligned_src, \unaligned_mask, 0 .endif .endm .macro wide_case_inner_loop process_head, process_tail, unaligned_src, unaligned_mask, dst_alignment 110: .set SUBBLOCK, 0 /* this is a count of STMs; there can be up to 8 STMs per block */ .rept pix_per_block*dst_w_bpp/128 \process_head , 16, 0, \unaligned_src, \unaligned_mask, 1 .if (src_bpp > 0) && (mask_bpp == 0) && ((flags) & FLAG_PROCESS_PRESERVES_SCRATCH) preload_middle src_bpp, SRC, 1 .elseif (src_bpp == 0) && (mask_bpp > 0) && ((flags) & FLAG_PROCESS_PRESERVES_SCRATCH) preload_middle mask_bpp, MASK, 1 .else preload_middle src_bpp, SRC, 0 preload_middle mask_bpp, MASK, 0 .endif .if (dst_r_bpp > 0) && ((SUBBLOCK % 2) == 0) && (((flags) & FLAG_NO_PRELOAD_DST) == 0) /* Because we know that writes are 16-byte aligned, it's relatively easy to ensure that * destination prefetches are 32-byte aligned. It's also the easiest channel to offset * preloads for, to achieve staggered prefetches for multiple channels, because there are * always two STMs per prefetch, so there is always an opposite STM on which to put the * preload. Note, no need to BIC the base register here */ PF pld, [DST, #32*prefetch_distance - \dst_alignment] .endif \process_tail , 16, 0 .if !((flags) & FLAG_PROCESS_DOES_STORE) pixst , 16, 0, DST .endif .set SUBBLOCK, SUBBLOCK+1 .endr subs X, X, #pix_per_block bhs 110b .endm .macro wide_case_inner_loop_and_trailing_pixels process_head, process_tail, process_inner_loop, exit_label, unaligned_src, unaligned_mask /* Destination now 16-byte aligned; we have at least one block before we have to stop preloading */ .if dst_r_bpp > 0 tst DST, #16 bne 111f \process_inner_loop \process_head, \process_tail, \unaligned_src, \unaligned_mask, 16 + DST_PRELOAD_BIAS b 112f 111: .endif \process_inner_loop \process_head, \process_tail, \unaligned_src, \unaligned_mask, 0 + DST_PRELOAD_BIAS 112: /* Just before the final (prefetch_distance+1) 32-byte blocks, deal with final preloads */ .if (src_bpp*pix_per_block > 256) || (mask_bpp*pix_per_block > 256) || (dst_r_bpp*pix_per_block > 256) PF and, WK0, X, #pix_per_block-1 .endif preload_trailing src_bpp, src_bpp_shift, SRC preload_trailing mask_bpp, mask_bpp_shift, MASK .if ((flags) & FLAG_NO_PRELOAD_DST) == 0 preload_trailing dst_r_bpp, dst_bpp_shift, DST .endif add X, X, #(prefetch_distance+2)*pix_per_block - 128/dst_w_bpp /* The remainder of the line is handled identically to the medium case */ medium_case_inner_loop_and_trailing_pixels \process_head, \process_tail,, \exit_label, \unaligned_src, \unaligned_mask .endm .macro medium_case_inner_loop_and_trailing_pixels process_head, process_tail, unused, exit_label, unaligned_src, unaligned_mask 120: \process_head , 16, 0, \unaligned_src, \unaligned_mask, 0 \process_tail , 16, 0 .if !((flags) & FLAG_PROCESS_DOES_STORE) pixst , 16, 0, DST .endif subs X, X, #128/dst_w_bpp bhs 120b /* Trailing pixels */ tst X, #128/dst_w_bpp - 1 beq \exit_label trailing_15bytes \process_head, \process_tail, \unaligned_src, \unaligned_mask .endm .macro narrow_case_inner_loop_and_trailing_pixels process_head, process_tail, unused, exit_label, unaligned_src, unaligned_mask tst X, #16*8/dst_w_bpp conditional_process1 ne, \process_head, \process_tail, 16, 0, \unaligned_src, \unaligned_mask, 0 /* Trailing pixels */ /* In narrow case, it's relatively unlikely to be aligned, so let's do without a branch here */ trailing_15bytes \process_head, \process_tail, \unaligned_src, \unaligned_mask .endm .macro switch_on_alignment action, process_head, process_tail, process_inner_loop, exit_label /* Note that if we're reading the destination, it's already guaranteed to be aligned at this point */ .if mask_bpp == 8 || mask_bpp == 16 tst MASK, #3 bne 141f .endif .if src_bpp == 8 || src_bpp == 16 tst SRC, #3 bne 140f .endif \action \process_head, \process_tail, \process_inner_loop, \exit_label, 0, 0 .if src_bpp == 8 || src_bpp == 16 b \exit_label 140: \action \process_head, \process_tail, \process_inner_loop, \exit_label, 1, 0 .endif .if mask_bpp == 8 || mask_bpp == 16 b \exit_label 141: .if src_bpp == 8 || src_bpp == 16 tst SRC, #3 bne 142f .endif \action \process_head, \process_tail, \process_inner_loop, \exit_label, 0, 1 .if src_bpp == 8 || src_bpp == 16 b \exit_label 142: \action \process_head, \process_tail, \process_inner_loop, \exit_label, 1, 1 .endif .endif .endm .macro end_of_line restore_x, vars_spilled, loop_label, last_one .if \vars_spilled /* Sadly, GAS doesn't seem have an equivalent of the DCI directive? */ /* This is ldmia sp,{} */ .word 0xE89D0000 | LINE_SAVED_REGS .endif subs Y, Y, #1 .if \vars_spilled .if (LINE_SAVED_REGS) & (1<<1) str Y, [sp] .endif .endif add DST, DST, STRIDE_D .if src_bpp > 0 add SRC, SRC, STRIDE_S .endif .if mask_bpp > 0 add MASK, MASK, STRIDE_M .endif .if \restore_x mov X, ORIG_W .endif bhs \loop_label .ifc "\last_one","" .if \vars_spilled b 197f .else b 198f .endif .else .if (!\vars_spilled) && ((flags) & FLAG_SPILL_LINE_VARS) b 198f .endif .endif .endm .macro generate_composite_function fname, \ src_bpp_, \ mask_bpp_, \ dst_w_bpp_, \ flags_, \ prefetch_distance_, \ init, \ newline, \ cleanup, \ process_head, \ process_tail, \ process_inner_loop pixman_asm_function \fname /* * Make some macro arguments globally visible and accessible * from other macros */ .set src_bpp, \src_bpp_ .set mask_bpp, \mask_bpp_ .set dst_w_bpp, \dst_w_bpp_ .set flags, \flags_ .set prefetch_distance, \prefetch_distance_ /* * Select prefetch type for this function. */ .if prefetch_distance == 0 .set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_NONE .else .set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_STANDARD .endif .if src_bpp == 32 .set src_bpp_shift, 2 .elseif src_bpp == 24 .set src_bpp_shift, 0 .elseif src_bpp == 16 .set src_bpp_shift, 1 .elseif src_bpp == 8 .set src_bpp_shift, 0 .elseif src_bpp == 0 .set src_bpp_shift, -1 .else .error "requested src bpp (src_bpp) is not supported" .endif .if mask_bpp == 32 .set mask_bpp_shift, 2 .elseif mask_bpp == 24 .set mask_bpp_shift, 0 .elseif mask_bpp == 8 .set mask_bpp_shift, 0 .elseif mask_bpp == 0 .set mask_bpp_shift, -1 .else .error "requested mask bpp (mask_bpp) is not supported" .endif .if dst_w_bpp == 32 .set dst_bpp_shift, 2 .elseif dst_w_bpp == 24 .set dst_bpp_shift, 0 .elseif dst_w_bpp == 16 .set dst_bpp_shift, 1 .elseif dst_w_bpp == 8 .set dst_bpp_shift, 0 .else .error "requested dst bpp (dst_w_bpp) is not supported" .endif .if (((flags) & FLAG_DST_READWRITE) != 0) .set dst_r_bpp, dst_w_bpp .else .set dst_r_bpp, 0 .endif .set pix_per_block, 16*8/dst_w_bpp .if src_bpp != 0 .if 32*8/src_bpp > pix_per_block .set pix_per_block, 32*8/src_bpp .endif .endif .if mask_bpp != 0 .if 32*8/mask_bpp > pix_per_block .set pix_per_block, 32*8/mask_bpp .endif .endif .if dst_r_bpp != 0 .if 32*8/dst_r_bpp > pix_per_block .set pix_per_block, 32*8/dst_r_bpp .endif .endif /* The standard entry conditions set up by pixman-arm-common.h are: * r0 = width (pixels) * r1 = height (rows) * r2 = pointer to top-left pixel of destination * r3 = destination stride (pixels) * [sp] = source pixel value, or pointer to top-left pixel of source * [sp,#4] = 0 or source stride (pixels) * The following arguments are unused for non-mask operations * [sp,#8] = mask pixel value, or pointer to top-left pixel of mask * [sp,#12] = 0 or mask stride (pixels) */ /* * Assign symbolic names to registers */ X .req r0 /* pixels to go on this line */ Y .req r1 /* lines to go */ DST .req r2 /* destination pixel pointer */ STRIDE_D .req r3 /* destination stride (bytes, minus width) */ SRC .req r4 /* source pixel pointer */ STRIDE_S .req r5 /* source stride (bytes, minus width) */ MASK .req r6 /* mask pixel pointer (if applicable) */ STRIDE_M .req r7 /* mask stride (bytes, minus width) */ WK0 .req r8 /* pixel data registers */ WK1 .req r9 WK2 .req r10 WK3 .req r11 SCRATCH .req r12 ORIG_W .req r14 /* width (pixels) */ push {r4-r11, lr} /* save all registers */ subs Y, Y, #1 blo 199f #ifdef DEBUG_PARAMS sub sp, sp, #9*4 #endif .if src_bpp > 0 ldr SRC, [sp, #ARGS_STACK_OFFSET] ldr STRIDE_S, [sp, #ARGS_STACK_OFFSET+4] .endif .if mask_bpp > 0 ldr MASK, [sp, #ARGS_STACK_OFFSET+8] ldr STRIDE_M, [sp, #ARGS_STACK_OFFSET+12] .endif #ifdef DEBUG_PARAMS add Y, Y, #1 stmia sp, {r0-r7,pc} sub Y, Y, #1 #endif \init .if (flags) & FLAG_PROCESS_CORRUPTS_WK0 /* Reserve a word in which to store X during leading pixels */ sub sp, sp, #4 .set ARGS_STACK_OFFSET, ARGS_STACK_OFFSET+4 .set LOCALS_STACK_OFFSET, LOCALS_STACK_OFFSET+4 .endif lsl STRIDE_D, #dst_bpp_shift /* stride in bytes */ sub STRIDE_D, STRIDE_D, X, lsl #dst_bpp_shift .if src_bpp > 0 lsl STRIDE_S, #src_bpp_shift sub STRIDE_S, STRIDE_S, X, lsl #src_bpp_shift .endif .if mask_bpp > 0 lsl STRIDE_M, #mask_bpp_shift sub STRIDE_M, STRIDE_M, X, lsl #mask_bpp_shift .endif /* Are we not even wide enough to have one 16-byte aligned 16-byte block write? */ cmp X, #2*16*8/dst_w_bpp - 1 blo 170f .if src_bpp || mask_bpp || dst_r_bpp /* Wide and medium cases are the same for fill */ /* To preload ahead on the current line, we need at least (prefetch_distance+2) 32-byte blocks on all prefetch channels */ cmp X, #(prefetch_distance+3)*pix_per_block - 1 blo 160f /* Wide case */ /* Adjust X so that the decrement instruction can also test for * inner loop termination. We want it to stop when there are * (prefetch_distance+1) complete blocks to go. */ sub X, X, #(prefetch_distance+2)*pix_per_block mov ORIG_W, X .if (flags) & FLAG_SPILL_LINE_VARS_WIDE /* This is stmdb sp!,{} */ .word 0xE92D0000 | LINE_SAVED_REGS .set ARGS_STACK_OFFSET, ARGS_STACK_OFFSET + LINE_SAVED_REG_COUNT*4 .set LOCALS_STACK_OFFSET, LOCALS_STACK_OFFSET + LINE_SAVED_REG_COUNT*4 .endif 151: /* New line */ \newline preload_leading_step1 src_bpp, WK1, SRC preload_leading_step1 mask_bpp, WK2, MASK .if ((flags) & FLAG_NO_PRELOAD_DST) == 0 preload_leading_step1 dst_r_bpp, WK3, DST .endif ands WK0, DST, #15 beq 154f rsb WK0, WK0, #16 /* number of leading bytes until destination aligned */ preload_leading_step2 src_bpp, src_bpp_shift, WK1, SRC preload_leading_step2 mask_bpp, mask_bpp_shift, WK2, MASK .if ((flags) & FLAG_NO_PRELOAD_DST) == 0 preload_leading_step2 dst_r_bpp, dst_bpp_shift, WK3, DST .endif leading_15bytes \process_head, \process_tail 154: /* Destination now 16-byte aligned; we have at least one prefetch on each channel as well as at least one 16-byte output block */ .if (src_bpp > 0) && (mask_bpp == 0) && ((flags) & FLAG_PROCESS_PRESERVES_SCRATCH) and SCRATCH, SRC, #31 rsb SCRATCH, SCRATCH, #32*prefetch_distance .elseif (src_bpp == 0) && (mask_bpp > 0) && ((flags) & FLAG_PROCESS_PRESERVES_SCRATCH) and SCRATCH, MASK, #31 rsb SCRATCH, SCRATCH, #32*prefetch_distance .endif .ifc "\process_inner_loop","" switch_on_alignment wide_case_inner_loop_and_trailing_pixels, \process_head, \process_tail, wide_case_inner_loop, 157f .else switch_on_alignment wide_case_inner_loop_and_trailing_pixels, \process_head, \process_tail, \process_inner_loop, 157f .endif 157: /* Check for another line */ end_of_line 1, %((flags) & FLAG_SPILL_LINE_VARS_WIDE), 151b .if (flags) & FLAG_SPILL_LINE_VARS_WIDE .set ARGS_STACK_OFFSET, ARGS_STACK_OFFSET - LINE_SAVED_REG_COUNT*4 .set LOCALS_STACK_OFFSET, LOCALS_STACK_OFFSET - LINE_SAVED_REG_COUNT*4 .endif .endif .ltorg 160: /* Medium case */ mov ORIG_W, X .if (flags) & FLAG_SPILL_LINE_VARS_NON_WIDE /* This is stmdb sp!,{} */ .word 0xE92D0000 | LINE_SAVED_REGS .set ARGS_STACK_OFFSET, ARGS_STACK_OFFSET + LINE_SAVED_REG_COUNT*4 .set LOCALS_STACK_OFFSET, LOCALS_STACK_OFFSET + LINE_SAVED_REG_COUNT*4 .endif 161: /* New line */ \newline preload_line 0, src_bpp, src_bpp_shift, SRC /* in: X, corrupts: WK0-WK1 */ preload_line 0, mask_bpp, mask_bpp_shift, MASK .if ((flags) & FLAG_NO_PRELOAD_DST) == 0 preload_line 0, dst_r_bpp, dst_bpp_shift, DST .endif sub X, X, #128/dst_w_bpp /* simplifies inner loop termination */ ands WK0, DST, #15 beq 164f rsb WK0, WK0, #16 /* number of leading bytes until destination aligned */ leading_15bytes \process_head, \process_tail 164: /* Destination now 16-byte aligned; we have at least one 16-byte output block */ switch_on_alignment medium_case_inner_loop_and_trailing_pixels, \process_head, \process_tail,, 167f 167: /* Check for another line */ end_of_line 1, %((flags) & FLAG_SPILL_LINE_VARS_NON_WIDE), 161b .ltorg 170: /* Narrow case, less than 31 bytes, so no guarantee of at least one 16-byte block */ .if dst_w_bpp < 32 mov ORIG_W, X .endif .if (flags) & FLAG_SPILL_LINE_VARS_NON_WIDE /* This is stmdb sp!,{} */ .word 0xE92D0000 | LINE_SAVED_REGS .endif 171: /* New line */ \newline preload_line 1, src_bpp, src_bpp_shift, SRC /* in: X, corrupts: WK0-WK1 */ preload_line 1, mask_bpp, mask_bpp_shift, MASK .if ((flags) & FLAG_NO_PRELOAD_DST) == 0 preload_line 1, dst_r_bpp, dst_bpp_shift, DST .endif .if dst_w_bpp == 8 tst DST, #3 beq 174f 172: subs X, X, #1 blo 177f \process_head , 1, 0, 1, 1, 0 \process_tail , 1, 0 .if !((flags) & FLAG_PROCESS_DOES_STORE) pixst , 1, 0, DST .endif tst DST, #3 bne 172b .elseif dst_w_bpp == 16 tst DST, #2 beq 174f subs X, X, #1 blo 177f \process_head , 2, 0, 1, 1, 0 \process_tail , 2, 0 .if !((flags) & FLAG_PROCESS_DOES_STORE) pixst , 2, 0, DST .endif .endif 174: /* Destination now 4-byte aligned; we have 0 or more output bytes to go */ switch_on_alignment narrow_case_inner_loop_and_trailing_pixels, \process_head, \process_tail,, 177f 177: /* Check for another line */ end_of_line %(dst_w_bpp < 32), %((flags) & FLAG_SPILL_LINE_VARS_NON_WIDE), 171b, last_one .if (flags) & FLAG_SPILL_LINE_VARS_NON_WIDE .set ARGS_STACK_OFFSET, ARGS_STACK_OFFSET - LINE_SAVED_REG_COUNT*4 .set LOCALS_STACK_OFFSET, LOCALS_STACK_OFFSET - LINE_SAVED_REG_COUNT*4 .endif 197: .if (flags) & FLAG_SPILL_LINE_VARS add sp, sp, #LINE_SAVED_REG_COUNT*4 .endif 198: .if (flags) & FLAG_PROCESS_CORRUPTS_WK0 .set ARGS_STACK_OFFSET, ARGS_STACK_OFFSET-4 .set LOCALS_STACK_OFFSET, LOCALS_STACK_OFFSET-4 add sp, sp, #4 .endif \cleanup #ifdef DEBUG_PARAMS add sp, sp, #9*4 /* junk the debug copy of arguments */ #endif 199: pop {r4-r11, pc} /* exit */ .ltorg .unreq X .unreq Y .unreq DST .unreq STRIDE_D .unreq SRC .unreq STRIDE_S .unreq MASK .unreq STRIDE_M .unreq WK0 .unreq WK1 .unreq WK2 .unreq WK3 .unreq SCRATCH .unreq ORIG_W pixman_end_asm_function .endm .macro line_saved_regs x:vararg .set LINE_SAVED_REGS, 0 .set LINE_SAVED_REG_COUNT, 0 .irp SAVED_REG,\x .ifc "SAVED_REG","Y" .set LINE_SAVED_REGS, LINE_SAVED_REGS | (1<<1) .set LINE_SAVED_REG_COUNT, LINE_SAVED_REG_COUNT + 1 .endif .ifc "SAVED_REG","STRIDE_D" .set LINE_SAVED_REGS, LINE_SAVED_REGS | (1<<3) .set LINE_SAVED_REG_COUNT, LINE_SAVED_REG_COUNT + 1 .endif .ifc "SAVED_REG","STRIDE_S" .set LINE_SAVED_REGS, LINE_SAVED_REGS | (1<<5) .set LINE_SAVED_REG_COUNT, LINE_SAVED_REG_COUNT + 1 .endif .ifc "SAVED_REG","STRIDE_M" .set LINE_SAVED_REGS, LINE_SAVED_REGS | (1<<7) .set LINE_SAVED_REG_COUNT, LINE_SAVED_REG_COUNT + 1 .endif .ifc "SAVED_REG","ORIG_W" .set LINE_SAVED_REGS, LINE_SAVED_REGS | (1<<14) .set LINE_SAVED_REG_COUNT, LINE_SAVED_REG_COUNT + 1 .endif .endr .endm .macro nop_macro x:vararg .endm ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-arm-simd.c0000664000175000017500000003305714712446423017571 0ustar00mattst88mattst88/* * Copyright Âİ 2008 Mozilla Corporation * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of Mozilla Corporation not be used in * advertising or publicity pertaining to distribution of the software without * specific, written prior permission. Mozilla Corporation makes no * representations about the suitability of this software for any purpose. It * is provided "as is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS * SOFTWARE. * * Author: Jeff Muizelaar (jeff@infidigm.net) * */ #ifdef HAVE_CONFIG_H #include #endif #include "pixman-private.h" #include "pixman-arm-common.h" #include "pixman-inlines.h" PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (armv6, src_8888_8888, uint32_t, 1, uint32_t, 1) PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (armv6, src_x888_8888, uint32_t, 1, uint32_t, 1) PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (armv6, src_0565_0565, uint16_t, 1, uint16_t, 1) PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (armv6, src_8_8, uint8_t, 1, uint8_t, 1) PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (armv6, src_0565_8888, uint16_t, 1, uint32_t, 1) PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (armv6, src_x888_0565, uint32_t, 1, uint16_t, 1) PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (armv6, add_8_8, uint8_t, 1, uint8_t, 1) PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (armv6, over_8888_8888, uint32_t, 1, uint32_t, 1) PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (armv6, in_reverse_8888_8888, uint32_t, 1, uint32_t, 1) PIXMAN_ARM_BIND_FAST_PATH_N_DST (SKIP_ZERO_SRC, armv6, over_n_8888, uint32_t, 1) PIXMAN_ARM_BIND_FAST_PATH_N_DST (0, armv6, over_reverse_n_8888, uint32_t, 1) PIXMAN_ARM_BIND_FAST_PATH_SRC_N_DST (SKIP_ZERO_MASK, armv6, over_8888_n_8888, uint32_t, 1, uint32_t, 1) PIXMAN_ARM_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, armv6, over_n_8_8888, uint8_t, 1, uint32_t, 1) PIXMAN_ARM_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, armv6, over_n_8888_8888_ca, uint32_t, 1, uint32_t, 1) PIXMAN_ARM_BIND_SCALED_NEAREST_SRC_DST (armv6, 0565_0565, SRC, uint16_t, uint16_t) PIXMAN_ARM_BIND_SCALED_NEAREST_SRC_DST (armv6, 8888_8888, SRC, uint32_t, uint32_t) void pixman_composite_src_n_8888_asm_armv6 (int32_t w, int32_t h, uint32_t *dst, int32_t dst_stride, uint32_t src); void pixman_composite_src_n_0565_asm_armv6 (int32_t w, int32_t h, uint16_t *dst, int32_t dst_stride, uint16_t src); void pixman_composite_src_n_8_asm_armv6 (int32_t w, int32_t h, uint8_t *dst, int32_t dst_stride, uint8_t src); static pixman_bool_t arm_simd_fill (pixman_implementation_t *imp, uint32_t * bits, int stride, /* in 32-bit words */ int bpp, int x, int y, int width, int height, uint32_t _xor) { /* stride is always multiple of 32bit units in pixman */ uint32_t byte_stride = stride * sizeof(uint32_t); switch (bpp) { case 8: pixman_composite_src_n_8_asm_armv6 ( width, height, (uint8_t *)(((char *) bits) + y * byte_stride + x), byte_stride, _xor & 0xff); return TRUE; case 16: pixman_composite_src_n_0565_asm_armv6 ( width, height, (uint16_t *)(((char *) bits) + y * byte_stride + x * 2), byte_stride / 2, _xor & 0xffff); return TRUE; case 32: pixman_composite_src_n_8888_asm_armv6 ( width, height, (uint32_t *)(((char *) bits) + y * byte_stride + x * 4), byte_stride / 4, _xor); return TRUE; default: return FALSE; } } static pixman_bool_t arm_simd_blt (pixman_implementation_t *imp, uint32_t * src_bits, uint32_t * dst_bits, int src_stride, /* in 32-bit words */ int dst_stride, /* in 32-bit words */ int src_bpp, int dst_bpp, int src_x, int src_y, int dest_x, int dest_y, int width, int height) { if (src_bpp != dst_bpp) return FALSE; switch (src_bpp) { case 8: pixman_composite_src_8_8_asm_armv6 ( width, height, (uint8_t *)(((char *) dst_bits) + dest_y * dst_stride * 4 + dest_x * 1), dst_stride * 4, (uint8_t *)(((char *) src_bits) + src_y * src_stride * 4 + src_x * 1), src_stride * 4); return TRUE; case 16: pixman_composite_src_0565_0565_asm_armv6 ( width, height, (uint16_t *)(((char *) dst_bits) + dest_y * dst_stride * 4 + dest_x * 2), dst_stride * 2, (uint16_t *)(((char *) src_bits) + src_y * src_stride * 4 + src_x * 2), src_stride * 2); return TRUE; case 32: pixman_composite_src_8888_8888_asm_armv6 ( width, height, (uint32_t *)(((char *) dst_bits) + dest_y * dst_stride * 4 + dest_x * 4), dst_stride, (uint32_t *)(((char *) src_bits) + src_y * src_stride * 4 + src_x * 4), src_stride); return TRUE; default: return FALSE; } } static const pixman_fast_path_t arm_simd_fast_paths[] = { PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, a8r8g8b8, armv6_composite_src_8888_8888), PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, a8b8g8r8, armv6_composite_src_8888_8888), PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, x8r8g8b8, armv6_composite_src_8888_8888), PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, x8b8g8r8, armv6_composite_src_8888_8888), PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, x8r8g8b8, armv6_composite_src_8888_8888), PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, x8b8g8r8, armv6_composite_src_8888_8888), PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, a8b8g8r8, armv6_composite_src_x888_8888), PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, a8r8g8b8, armv6_composite_src_x888_8888), PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, r5g6b5, armv6_composite_src_0565_0565), PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, b5g6r5, armv6_composite_src_0565_0565), PIXMAN_STD_FAST_PATH (SRC, a1r5g5b5, null, a1r5g5b5, armv6_composite_src_0565_0565), PIXMAN_STD_FAST_PATH (SRC, a1b5g5r5, null, a1b5g5r5, armv6_composite_src_0565_0565), PIXMAN_STD_FAST_PATH (SRC, a1r5g5b5, null, x1r5g5b5, armv6_composite_src_0565_0565), PIXMAN_STD_FAST_PATH (SRC, a1b5g5r5, null, x1b5g5r5, armv6_composite_src_0565_0565), PIXMAN_STD_FAST_PATH (SRC, x1r5g5b5, null, x1r5g5b5, armv6_composite_src_0565_0565), PIXMAN_STD_FAST_PATH (SRC, x1b5g5r5, null, x1b5g5r5, armv6_composite_src_0565_0565), PIXMAN_STD_FAST_PATH (SRC, a4r4g4b4, null, a4r4g4b4, armv6_composite_src_0565_0565), PIXMAN_STD_FAST_PATH (SRC, a4b4g4r4, null, a4b4g4r4, armv6_composite_src_0565_0565), PIXMAN_STD_FAST_PATH (SRC, a4r4g4b4, null, x4r4g4b4, armv6_composite_src_0565_0565), PIXMAN_STD_FAST_PATH (SRC, a4b4g4r4, null, x4b4g4r4, armv6_composite_src_0565_0565), PIXMAN_STD_FAST_PATH (SRC, x4r4g4b4, null, x4r4g4b4, armv6_composite_src_0565_0565), PIXMAN_STD_FAST_PATH (SRC, x4b4g4r4, null, x4b4g4r4, armv6_composite_src_0565_0565), PIXMAN_STD_FAST_PATH (SRC, a8, null, a8, armv6_composite_src_8_8), PIXMAN_STD_FAST_PATH (SRC, r3g3b2, null, r3g3b2, armv6_composite_src_8_8), PIXMAN_STD_FAST_PATH (SRC, b2g3r3, null, b2g3r3, armv6_composite_src_8_8), PIXMAN_STD_FAST_PATH (SRC, a2r2g2b2, null, a2r2g2b2, armv6_composite_src_8_8), PIXMAN_STD_FAST_PATH (SRC, a2b2g2r2, null, a2b2g2r2, armv6_composite_src_8_8), PIXMAN_STD_FAST_PATH (SRC, c8, null, c8, armv6_composite_src_8_8), PIXMAN_STD_FAST_PATH (SRC, g8, null, g8, armv6_composite_src_8_8), PIXMAN_STD_FAST_PATH (SRC, x4a4, null, x4a4, armv6_composite_src_8_8), PIXMAN_STD_FAST_PATH (SRC, x4c4, null, x4c4, armv6_composite_src_8_8), PIXMAN_STD_FAST_PATH (SRC, x4g4, null, x4g4, armv6_composite_src_8_8), PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, a8r8g8b8, armv6_composite_src_0565_8888), PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, x8r8g8b8, armv6_composite_src_0565_8888), PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, a8b8g8r8, armv6_composite_src_0565_8888), PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, x8b8g8r8, armv6_composite_src_0565_8888), PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, r5g6b5, armv6_composite_src_x888_0565), PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, r5g6b5, armv6_composite_src_x888_0565), PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, b5g6r5, armv6_composite_src_x888_0565), PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, b5g6r5, armv6_composite_src_x888_0565), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, a8r8g8b8, armv6_composite_over_8888_8888), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, x8r8g8b8, armv6_composite_over_8888_8888), PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, a8b8g8r8, armv6_composite_over_8888_8888), PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, x8b8g8r8, armv6_composite_over_8888_8888), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, a8r8g8b8, armv6_composite_over_8888_n_8888), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, x8r8g8b8, armv6_composite_over_8888_n_8888), PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, solid, a8b8g8r8, armv6_composite_over_8888_n_8888), PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, solid, x8b8g8r8, armv6_composite_over_8888_n_8888), PIXMAN_STD_FAST_PATH (OVER, solid, null, a8r8g8b8, armv6_composite_over_n_8888), PIXMAN_STD_FAST_PATH (OVER, solid, null, x8r8g8b8, armv6_composite_over_n_8888), PIXMAN_STD_FAST_PATH (OVER, solid, null, a8b8g8r8, armv6_composite_over_n_8888), PIXMAN_STD_FAST_PATH (OVER, solid, null, x8b8g8r8, armv6_composite_over_n_8888), PIXMAN_STD_FAST_PATH (OVER_REVERSE, solid, null, a8r8g8b8, armv6_composite_over_reverse_n_8888), PIXMAN_STD_FAST_PATH (OVER_REVERSE, solid, null, a8b8g8r8, armv6_composite_over_reverse_n_8888), PIXMAN_STD_FAST_PATH (ADD, a8, null, a8, armv6_composite_add_8_8), PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8r8g8b8, armv6_composite_over_n_8_8888), PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8r8g8b8, armv6_composite_over_n_8_8888), PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8b8g8r8, armv6_composite_over_n_8_8888), PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8b8g8r8, armv6_composite_over_n_8_8888), PIXMAN_STD_FAST_PATH (IN_REVERSE, a8r8g8b8, null, a8r8g8b8, armv6_composite_in_reverse_8888_8888), PIXMAN_STD_FAST_PATH (IN_REVERSE, a8r8g8b8, null, x8r8g8b8, armv6_composite_in_reverse_8888_8888), PIXMAN_STD_FAST_PATH (IN_REVERSE, a8b8g8r8, null, a8b8g8r8, armv6_composite_in_reverse_8888_8888), PIXMAN_STD_FAST_PATH (IN_REVERSE, a8b8g8r8, null, x8b8g8r8, armv6_composite_in_reverse_8888_8888), PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, a8r8g8b8, armv6_composite_over_n_8888_8888_ca), PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, x8r8g8b8, armv6_composite_over_n_8888_8888_ca), PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, a8b8g8r8, armv6_composite_over_n_8888_8888_ca), PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, x8b8g8r8, armv6_composite_over_n_8888_8888_ca), SIMPLE_NEAREST_FAST_PATH (SRC, r5g6b5, r5g6b5, armv6_0565_0565), SIMPLE_NEAREST_FAST_PATH (SRC, b5g6r5, b5g6r5, armv6_0565_0565), SIMPLE_NEAREST_FAST_PATH (SRC, a8r8g8b8, a8r8g8b8, armv6_8888_8888), SIMPLE_NEAREST_FAST_PATH (SRC, a8r8g8b8, x8r8g8b8, armv6_8888_8888), SIMPLE_NEAREST_FAST_PATH (SRC, x8r8g8b8, x8r8g8b8, armv6_8888_8888), SIMPLE_NEAREST_FAST_PATH (SRC, a8b8g8r8, a8b8g8r8, armv6_8888_8888), SIMPLE_NEAREST_FAST_PATH (SRC, a8b8g8r8, x8b8g8r8, armv6_8888_8888), SIMPLE_NEAREST_FAST_PATH (SRC, x8b8g8r8, x8b8g8r8, armv6_8888_8888), { PIXMAN_OP_NONE }, }; pixman_implementation_t * _pixman_implementation_create_arm_simd (pixman_implementation_t *fallback) { pixman_implementation_t *imp = _pixman_implementation_create (fallback, arm_simd_fast_paths); imp->blt = arm_simd_blt; imp->fill = arm_simd_fill; return imp; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-arm.c0000664000175000017500000001340614712446423016633 0ustar00mattst88mattst88/* * Copyright Âİ 2000 SuSE, Inc. * Copyright Âİ 2007 Red Hat, Inc. * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of SuSE not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. SuSE makes no representations about the * suitability of this software for any purpose. It is provided "as is" * without express or implied warranty. * * SuSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL SuSE * BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifdef HAVE_CONFIG_H #include #endif #include "pixman-private.h" typedef enum { ARM_V7 = (1 << 0), ARM_V6 = (1 << 1), ARM_VFP = (1 << 2), ARM_NEON = (1 << 3) } arm_cpu_features_t; #if defined(USE_ARM_SIMD) || defined(USE_ARM_NEON) #if defined(_MSC_VER) /* Needed for EXCEPTION_ILLEGAL_INSTRUCTION */ #include extern int pixman_msvc_try_arm_neon_op (); extern int pixman_msvc_try_arm_simd_op (); static arm_cpu_features_t detect_cpu_features (void) { arm_cpu_features_t features = 0; __try { pixman_msvc_try_arm_simd_op (); features |= ARM_V6; } __except (GetExceptionCode () == EXCEPTION_ILLEGAL_INSTRUCTION) { } __try { pixman_msvc_try_arm_neon_op (); features |= ARM_NEON; } __except (GetExceptionCode () == EXCEPTION_ILLEGAL_INSTRUCTION) { } return features; } #elif defined(__APPLE__) && defined(TARGET_OS_IPHONE) /* iOS */ #include "TargetConditionals.h" static arm_cpu_features_t detect_cpu_features (void) { arm_cpu_features_t features = 0; features |= ARM_V6; /* Detection of ARM NEON on iOS is fairly simple because iOS binaries * contain separate executable images for each processor architecture. * So all we have to do is detect the armv7 architecture build. The * operating system automatically runs the armv7 binary for armv7 devices * and the armv6 binary for armv6 devices. */ #if defined(__ARM_NEON__) features |= ARM_NEON; #endif return features; } #elif defined(__ANDROID__) || defined(ANDROID) /* Android */ #include static arm_cpu_features_t detect_cpu_features (void) { arm_cpu_features_t features = 0; AndroidCpuFamily cpu_family; uint64_t cpu_features; cpu_family = android_getCpuFamily(); cpu_features = android_getCpuFeatures(); if (cpu_family == ANDROID_CPU_FAMILY_ARM) { if (cpu_features & ANDROID_CPU_ARM_FEATURE_ARMv7) features |= ARM_V7; if (cpu_features & ANDROID_CPU_ARM_FEATURE_VFPv3) features |= ARM_VFP; if (cpu_features & ANDROID_CPU_ARM_FEATURE_NEON) features |= ARM_NEON; } return features; } #elif defined (__linux__) /* linux ELF */ #include #include #include #include #include #include #include static arm_cpu_features_t detect_cpu_features (void) { arm_cpu_features_t features = 0; Elf32_auxv_t aux; int fd; fd = open ("/proc/self/auxv", O_RDONLY); if (fd >= 0) { while (read (fd, &aux, sizeof(Elf32_auxv_t)) == sizeof(Elf32_auxv_t)) { if (aux.a_type == AT_HWCAP) { uint32_t hwcap = aux.a_un.a_val; /* hardcode these values to avoid depending on specific * versions of the hwcap header, e.g. HWCAP_NEON */ if ((hwcap & 64) != 0) features |= ARM_VFP; /* this flag is only present on kernel 2.6.29 */ if ((hwcap & 4096) != 0) features |= ARM_NEON; } else if (aux.a_type == AT_PLATFORM) { const char *plat = (const char*) aux.a_un.a_val; if (strncmp (plat, "v7l", 3) == 0) features |= (ARM_V7 | ARM_V6); else if (strncmp (plat, "v6l", 3) == 0) features |= ARM_V6; } } close (fd); } return features; } #elif defined (_3DS) /* 3DS homebrew (devkitARM) */ static arm_cpu_features_t detect_cpu_features (void) { arm_cpu_features_t features = 0; features |= ARM_V6; return features; } #elif defined (PSP2) || defined (__SWITCH__) /* Vita (VitaSDK) or Switch (devkitA64) homebrew */ static arm_cpu_features_t detect_cpu_features (void) { arm_cpu_features_t features = 0; features |= ARM_NEON; return features; } #else /* Unknown */ static arm_cpu_features_t detect_cpu_features (void) { return 0; } #endif /* Linux elf */ static pixman_bool_t have_feature (arm_cpu_features_t feature) { static pixman_bool_t initialized; static arm_cpu_features_t features; if (!initialized) { features = detect_cpu_features(); initialized = TRUE; } return (features & feature) == feature; } #endif /* USE_ARM_SIMD || USE_ARM_NEON */ pixman_implementation_t * _pixman_arm_get_implementations (pixman_implementation_t *imp) { #ifdef USE_ARM_SIMD if (!_pixman_disabled ("arm-simd") && have_feature (ARM_V6)) imp = _pixman_implementation_create_arm_simd (imp); #endif #ifdef USE_ARM_NEON if (!_pixman_disabled ("arm-neon") && have_feature (ARM_NEON)) imp = _pixman_implementation_create_arm_neon (imp); #endif #ifdef USE_ARM_A64_NEON /* neon is a part of aarch64 */ if (!_pixman_disabled ("arm-neon")) imp = _pixman_implementation_create_arm_neon (imp); #endif return imp; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-arma64-neon-asm-bilinear.S0000664000175000017500000012730514712446423022430 0ustar00mattst88mattst88/* * Copyright Âİ 2011 SCore Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Author: Siarhei Siamashka (siarhei.siamashka@nokia.com) * Author: Taekyun Kim (tkq.kim@samsung.com) */ /* * This file contains scaled bilinear scanline functions implemented * using older siarhei's bilinear macro template. * * << General scanline function procedures >> * 1. bilinear interpolate source pixels * 2. load mask pixels * 3. load destination pixels * 4. duplicate mask to fill whole register * 5. interleave source & destination pixels * 6. apply mask to source pixels * 7. combine source & destination pixels * 8, Deinterleave final result * 9. store destination pixels * * All registers with single number (i.e. src0, tmp0) are 64-bits registers. * Registers with double numbers(src01, dst01) are 128-bits registers. * All temp registers can be used freely outside the code block. * Assume that symbol(register .req) OUT and MASK are defined at caller of these macro blocks. * * Remarks * There can be lots of pipeline stalls inside code block and between code blocks. * Further optimizations will be done by new macro templates using head/tail_head/tail scheme. */ /* Prevent the stack from becoming executable for no reason... */ #if defined(__linux__) && defined (__ELF__) .section .note.GNU-stack,"",%progbits #endif .text .arch armv8-a .altmacro .p2align 2 #include "pixman-private.h" #include "pixman-arm-asm.h" #include "pixman-arma64-neon-asm.h" /* * Bilinear macros from pixman-arm-neon-asm.S */ /* * Bilinear scaling support code which tries to provide pixel fetching, color * format conversion, and interpolation as separate macros which can be used * as the basic building blocks for constructing bilinear scanline functions. */ .macro bilinear_load_8888 reg1, reg2, tmp asr WTMP1, X, #16 add X, X, UX add TMP1, TOP, TMP1, lsl #2 ld1 {\()\reg1\().2s}, [TMP1], STRIDE ld1 {\()\reg2\().2s}, [TMP1] .endm .macro bilinear_load_0565 reg1, reg2, tmp asr WTMP1, X, #16 add X, X, UX add TMP1, TOP, TMP1, lsl #1 ld1 {\()\reg2\().s}[0], [TMP1], STRIDE ld1 {\()\reg2\().s}[1], [TMP1] convert_four_0565_to_x888_packed \reg2, \reg1, \reg2, \tmp .endm .macro bilinear_load_and_vertical_interpolate_two_8888 \ acc1, acc2, reg1, reg2, reg3, reg4, tmp1, tmp2 bilinear_load_8888 \reg1, \reg2, \tmp1 umull \()\acc1\().8h, \()\reg1\().8b, v28.8b umlal \()\acc1\().8h, \()\reg2\().8b, v29.8b bilinear_load_8888 \reg3, \reg4, \tmp2 umull \()\acc2\().8h, \()\reg3\().8b, v28.8b umlal \()\acc2\().8h, \()\reg4\().8b, v29.8b .endm .macro bilinear_load_and_vertical_interpolate_four_8888 \ xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi, \ yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi bilinear_load_and_vertical_interpolate_two_8888 \ \xacc1, \xacc2, \xreg1, \xreg2, \xreg3, \xreg4, \xacc2lo, xacc2hi bilinear_load_and_vertical_interpolate_two_8888 \ \yacc1, \yacc2, \yreg1, \yreg2, \yreg3, \yreg4, \yacc2lo, \yacc2hi .endm .macro vzip reg1, reg2 zip1 v24.8b, \reg1, \reg2 zip2 \reg2, \reg1, \reg2 mov \reg1, v24.8b .endm .macro vuzp reg1, reg2 uzp1 v24.8b, \reg1, \reg2 uzp2 \reg2, \reg1, \reg2 mov \reg1, v24.8b .endm .macro bilinear_load_and_vertical_interpolate_two_0565 \ acc1, acc2, reg1, reg2, reg3, reg4, acc2lo, acc2hi asr WTMP1, X, #16 add X, X, UX add TMP1, TOP, TMP1, lsl #1 asr WTMP2, X, #16 add X, X, UX add TMP2, TOP, TMP2, lsl #1 ld1 {\()\acc2\().s}[0], [TMP1], STRIDE ld1 {\()\acc2\().s}[2], [TMP2], STRIDE ld1 {\()\acc2\().s}[1], [TMP1] ld1 {\()\acc2\().s}[3], [TMP2] convert_0565_to_x888 \acc2, \reg3, \reg2, \reg1 vzip \()\reg1\().8b, \()\reg3\().8b vzip \()\reg2\().8b, \()\reg4\().8b vzip \()\reg3\().8b, \()\reg4\().8b vzip \()\reg1\().8b, \()\reg2\().8b umull \()\acc1\().8h, \()\reg1\().8b, v28.8b umlal \()\acc1\().8h, \()\reg2\().8b, v29.8b umull \()\acc2\().8h, \()\reg3\().8b, v28.8b umlal \()\acc2\().8h, \()\reg4\().8b, v29.8b .endm .macro bilinear_load_and_vertical_interpolate_four_0565 \ xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi, \ yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi asr WTMP1, X, #16 add X, X, UX add TMP1, TOP, TMP1, lsl #1 asr WTMP2, X, #16 add X, X, UX add TMP2, TOP, TMP2, lsl #1 ld1 {\()\xacc2\().s}[0], [TMP1], STRIDE ld1 {\()\xacc2\().s}[2], [TMP2], STRIDE ld1 {\()\xacc2\().s}[1], [TMP1] ld1 {\()\xacc2\().s}[3], [TMP2] convert_0565_to_x888 \xacc2, \xreg3, \xreg2, \xreg1 asr WTMP1, X, #16 add X, X, UX add TMP1, TOP, TMP1, lsl #1 asr WTMP2, X, #16 add X, X, UX add TMP2, TOP, TMP2, lsl #1 ld1 {\()\yacc2\().s}[0], [TMP1], STRIDE vzip \()\xreg1\().8b, \()\xreg3\().8b ld1 {\()\yacc2\().s}[2], [TMP2], STRIDE vzip \()\xreg2\().8b, \()\xreg4\().8b ld1 {\()\yacc2\().s}[1], [TMP1] vzip \()\xreg3\().8b, \()\xreg4\().8b ld1 {\()\yacc2\().s}[3], [TMP2] vzip \()\xreg1\().8b, \()\xreg2\().8b convert_0565_to_x888 \yacc2, \yreg3, \yreg2, \yreg1 umull \()\xacc1\().8h, \()\xreg1\().8b, v28.8b vzip \()\yreg1\().8b, \()\yreg3\().8b umlal \()\xacc1\().8h, \()\xreg2\().8b, v29.8b vzip \()\yreg2\().8b, \()\yreg4\().8b umull \()\xacc2\().8h, \()\xreg3\().8b, v28.8b vzip \()\yreg3\().8b, \()\yreg4\().8b umlal \()\xacc2\().8h, \()\xreg4\().8b, v29.8b vzip \()\yreg1\().8b, \()\yreg2\().8b umull \()\yacc1\().8h, \()\yreg1\().8b, v28.8b umlal \()\yacc1\().8h, \()\yreg2\().8b, v29.8b umull \()\yacc2\().8h, \()\yreg3\().8b, v28.8b umlal \()\yacc2\().8h, \()\yreg4\().8b, v29.8b .endm .macro bilinear_store_8888 numpix, tmp1, tmp2 .if \numpix == 4 st1 {v0.2s, v1.2s}, [OUT], #16 .elseif \numpix == 2 st1 {v0.2s}, [OUT], #8 .elseif \numpix == 1 st1 {v0.s}[0], [OUT], #4 .else .error bilinear_store_8888 \numpix is unsupported .endif .endm .macro bilinear_store_0565 numpix, tmp1, tmp2 vuzp v0.8b, v1.8b vuzp v2.8b, v3.8b vuzp v1.8b, v3.8b vuzp v0.8b, v2.8b convert_8888_to_0565 v2, v1, v0, v1, \tmp1, \tmp2 .if \numpix == 4 st1 {v1.4h}, [OUT], #8 .elseif \numpix == 2 st1 {v1.s}[0], [OUT], #4 .elseif \numpix == 1 st1 {v1.h}[0], [OUT], #2 .else .error bilinear_store_0565 \numpix is unsupported .endif .endm /* * Macros for loading mask pixels into register 'mask'. * dup must be done in somewhere else. */ .macro bilinear_load_mask_x numpix, mask .endm .macro bilinear_load_mask_8 numpix, mask .if \numpix == 4 ld1 {\()\mask\().s}[0], [MASK], #4 .elseif \numpix == 2 ld1 {\()\mask\().h}[0], [MASK], #2 .elseif \numpix == 1 ld1 {\()\mask\().b}[0], [MASK], #1 .else .error bilinear_load_mask_8 \numpix is unsupported .endif prfum PREFETCH_MODE, [MASK, #(prefetch_offset)] .endm .macro bilinear_load_mask mask_fmt, numpix, mask bilinear_load_mask_\mask_fmt \numpix, \mask .endm /* * Macros for loading destination pixels into register 'dst0' and 'dst1'. * Interleave should be done somewhere else. */ .macro bilinear_load_dst_0565_src numpix, dst0, dst1, dst01 .endm .macro bilinear_load_dst_8888_src numpix, dst0, dst1, dst01 .endm .macro bilinear_load_dst_8888 numpix, dst0, dst1, dst01 .if \numpix == 4 ld1 {\()\dst0\().2s, \()\dst1\().2s}, [OUT] .elseif \numpix == 2 ld1 {\()\dst0\().2s}, [OUT] .elseif \numpix == 1 ld1 {\()\dst0\().s}[0], [OUT] .else .error bilinear_load_dst_8888 \numpix is unsupported .endif mov \()\dst01\().d[0], \()\dst0\().d[0] mov \()\dst01\().d[1], \()\dst1\().d[0] prfm PREFETCH_MODE, [OUT, #(prefetch_offset * 4)] .endm .macro bilinear_load_dst_8888_over numpix, dst0, dst1, dst01 bilinear_load_dst_8888 \numpix, \dst0, \dst1, \dst01 .endm .macro bilinear_load_dst_8888_add numpix, dst0, dst1, dst01 bilinear_load_dst_8888 \numpix, \dst0, \dst1, \dst01 .endm .macro bilinear_load_dst dst_fmt, op, numpix, dst0, dst1, dst01 bilinear_load_dst_\()\dst_fmt\()_\()\op \numpix, \dst0, \dst1, \dst01 .endm /* * Macros for duplicating partially loaded mask to fill entire register. * We will apply mask to interleaved source pixels, that is * (r0, r1, r2, r3, g0, g1, g2, g3) x (m0, m1, m2, m3, m0, m1, m2, m3) * (b0, b1, b2, b3, a0, a1, a2, a3) x (m0, m1, m2, m3, m0, m1, m2, m3) * So, we need to duplicate loaded mask into whole register. * * For two pixel case * (r0, r1, x, x, g0, g1, x, x) x (m0, m1, m0, m1, m0, m1, m0, m1) * (b0, b1, x, x, a0, a1, x, x) x (m0, m1, m0, m1, m0, m1, m0, m1) * We can do some optimizations for this including last pixel cases. */ .macro bilinear_duplicate_mask_x numpix, mask .endm .macro bilinear_duplicate_mask_8 numpix, mask .if \numpix == 4 dup \()\mask\().2s, \()\mask\().s[0] .elseif \numpix == 2 dup \()\mask\().4h, \()\mask\().h[0] .elseif \numpix == 1 dup \()\mask\().8b, \()\mask\().b[0] .else .error bilinear_duplicate_\mask_8 is unsupported .endif .endm .macro bilinear_duplicate_mask mask_fmt, numpix, mask bilinear_duplicate_mask_\()\mask_fmt \numpix, \mask .endm /* * Macros for interleaving src and dst pixels to rrrr gggg bbbb aaaa form. * Interleave should be done when maks is enabled or operator is 'over'. */ .macro bilinear_interleave src0, src1, src01, dst0, dst1, dst01 vuzp \()\src0\().8b, \()\src1\().8b vuzp \()\dst0\().8b, \()\dst1\().8b vuzp \()\src0\().8b, \()\src1\().8b vuzp \()\dst0\().8b, \()\dst1\().8b mov \()\src01\().d[1], \()\src1\().d[0] mov \()\src01\().d[0], \()\src0\().d[0] mov \()\dst01\().d[1], \()\dst1\().d[0] mov \()\dst01\().d[0], \()\dst0\().d[0] .endm .macro bilinear_interleave_src_dst_x_src \ numpix, src0, src1, src01, dst0, dst1, dst01 .endm .macro bilinear_interleave_src_dst_x_over \ numpix, src0, src1, src01, dst0, dst1, dst01 bilinear_interleave \src0, \src1, \src01, \dst0, \dst1, \dst01 .endm .macro bilinear_interleave_src_dst_x_add \ numpix, src0, src1, src01, dst0, dst1, dst01 bilinear_interleave \src0, \src1, \src01, \dst0, \dst1, \dst01 .endm .macro bilinear_interleave_src_dst_8_src \ numpix, src0, src1, src01, dst0, dst1, dst01 bilinear_interleave \src0, \src1, \src01, \dst0, \dst1, \dst01 .endm .macro bilinear_interleave_src_dst_8_over \ numpix, src0, src1, src01, dst0, dst1, dst01 bilinear_interleave \src0, \src1, \src01, \dst0, \dst1, \dst01 .endm .macro bilinear_interleave_src_dst_8_add \ numpix, src0, src1, src01, dst0, dst1, dst01 bilinear_interleave \src0, \src1, \src01, \dst0, \dst1, \dst01 .endm .macro bilinear_interleave_src_dst \ mask_fmt, op, numpix, src0, src1, src01, dst0, dst1, dst01 bilinear_interleave_src_dst_\()\mask_fmt\()_\()\op \ \numpix, \src0, \src1, \src01, \dst0, \dst1, \dst01 .endm /* * Macros for applying masks to src pixels. (see combine_mask_u() function) * src, dst should be in interleaved form. * mask register should be in form (m0, m1, m2, m3). */ .macro bilinear_apply_mask_to_src_x \ numpix, src0, src1, src01, mask, \ tmp01, tmp23, tmp45, tmp67 .endm .macro bilinear_apply_mask_to_src_8 \ numpix, src0, src1, src01, mask, \ tmp01, tmp23, tmp45, tmp67 umull \()\tmp01\().8h, \()\src0\().8b, \()\mask\().8b umull \()\tmp23\().8h, \()\src1\().8b, \()\mask\().8b /* bubbles */ urshr \()\tmp45\().8h, \()\tmp01\().8h, #8 urshr \()\tmp67\().8h, \()\tmp23\().8h, #8 /* bubbles */ raddhn \()\src0\().8b, \()\tmp45\().8h, \()\tmp01\().8h raddhn \()\src1\().8b, \()\tmp67\().8h, \()\tmp23\().8h mov \()\src01\().d[0], \()\src0\().d[0] mov \()\src01\().d[1], \()\src1\().d[0] .endm .macro bilinear_apply_mask_to_src \ mask_fmt, numpix, src0, src1, src01, mask, \ tmp01, tmp23, tmp45, tmp67 bilinear_apply_mask_to_src_\()\mask_fmt \ \numpix, \src0, \src1, \src01, \mask, \ \tmp01, \tmp23, \tmp45, \tmp67 .endm /* * Macros for combining src and destination pixels. * Interleave or not is depending on operator 'op'. */ .macro bilinear_combine_src \ numpix, src0, src1, src01, dst0, dst1, dst01, \ tmp01, tmp23, tmp45, tmp67, tmp8 .endm .macro bilinear_combine_over \ numpix, src0, src1, src01, dst0, dst1, dst01, \ tmp01, tmp23, tmp45, tmp67, tmp8 dup \()\tmp8\().2s, \()\src1\().s[1] /* bubbles */ mvn \()\tmp8\().8b, \()\tmp8\().8b /* bubbles */ umull \()\tmp01\().8h, \()\dst0\().8b, \()\tmp8\().8b /* bubbles */ umull \()\tmp23\().8h, \()\dst1\().8b, \()\tmp8\().8b /* bubbles */ urshr \()\tmp45\().8h, \()\tmp01\().8h, #8 urshr \()\tmp67\().8h, \()\tmp23\().8h, #8 /* bubbles */ raddhn \()\dst0\().8b, \()\tmp45\().8h, \()\tmp01\().8h raddhn \()\dst1\().8b, \()\tmp67\().8h, \()\tmp23\().8h mov \()\dst01\().d[0], \()\dst0\().d[0] mov \()\dst01\().d[1], \()\dst1\().d[0] /* bubbles */ uqadd \()\src0\().8b, \()\dst0\().8b, \()\src0\().8b uqadd \()\src1\().8b, \()\dst1\().8b, \()\src1\().8b mov \()\src01\().d[0], \()\src0\().d[0] mov \()\src01\().d[1], \()\src1\().d[0] .endm .macro bilinear_combine_add \ numpix, src0, src1, src01, dst0, dst1, dst01, \ tmp01, tmp23, tmp45, tmp67, tmp8 uqadd \()\src0\().8b, \()\dst0\().8b, \()\src0\().8b uqadd \()\src1\().8b, \()\dst1\().8b, \()\src1\().8b mov \()\src01\().d[0], \()\src0\().d[0] mov \()\src01\().d[1], \()\src1\().d[0] .endm .macro bilinear_combine \ op, numpix, src0, src1, src01, dst0, dst1, dst01, \ tmp01, tmp23, tmp45, tmp67, tmp8 bilinear_combine_\()\op \ \numpix, \src0, \src1, \src01, \dst0, \dst1, \dst01, \ \tmp01, \tmp23, \tmp45, \tmp67, \tmp8 .endm /* * Macros for final deinterleaving of destination pixels if needed. */ .macro bilinear_deinterleave numpix, dst0, dst1, dst01 vuzp \()\dst0\().8b, \()\dst1\().8b /* bubbles */ vuzp \()\dst0\().8b, \()\dst1\().8b mov \()\dst01\().d[0], \()\dst0\().d[0] mov \()\dst01\().d[1], \()\dst1\().d[0] .endm .macro bilinear_deinterleave_dst_x_src numpix, dst0, dst1, dst01 .endm .macro bilinear_deinterleave_dst_x_over numpix, dst0, dst1, dst01 bilinear_deinterleave \numpix, \dst0, \dst1, \dst01 .endm .macro bilinear_deinterleave_dst_x_add numpix, dst0, dst1, dst01 bilinear_deinterleave \numpix, \dst0, \dst1, \dst01 .endm .macro bilinear_deinterleave_dst_8_src numpix, dst0, dst1, dst01 bilinear_deinterleave \numpix, \dst0, \dst1, \dst01 .endm .macro bilinear_deinterleave_dst_8_over numpix, dst0, dst1, dst01 bilinear_deinterleave \numpix, \dst0, \dst1, \dst01 .endm .macro bilinear_deinterleave_dst_8_add numpix, dst0, dst1, dst01 bilinear_deinterleave \numpix, \dst0, \dst1, \dst01 .endm .macro bilinear_deinterleave_dst mask_fmt, op, numpix, dst0, dst1, dst01 bilinear_deinterleave_dst_\()\mask_fmt\()_\()\op \numpix, \dst0, \dst1, \dst01 .endm .macro bilinear_interpolate_last_pixel src_fmt, mask_fmt, dst_fmt, op bilinear_load_\()\src_fmt v0, v1, v2 bilinear_load_mask \mask_fmt, 1, v4 bilinear_load_dst \dst_fmt, \op, 1, v18, v19, v9 umull v2.8h, v0.8b, v28.8b umlal v2.8h, v1.8b, v29.8b /* 5 cycles bubble */ ushll v0.4s, v2.4h, #BILINEAR_INTERPOLATION_BITS umlsl v0.4s, v2.4h, v15.h[0] umlal2 v0.4s, v2.8h, v15.h[0] /* 5 cycles bubble */ bilinear_duplicate_mask \mask_fmt, 1, v4 shrn v0.4h, v0.4s, #(2 * BILINEAR_INTERPOLATION_BITS) /* 3 cycles bubble */ xtn v0.8b, v0.8h /* 1 cycle bubble */ bilinear_interleave_src_dst \ \mask_fmt, \op, 1, v0, v1, v0, v18, v19, v9 bilinear_apply_mask_to_src \ \mask_fmt, 1, v0, v1, v0, v4, \ v3, v8, v10, v11 bilinear_combine \ \op, 1, v0, v1, v0, v18, v19, v9, \ v3, v8, v10, v11, v5 bilinear_deinterleave_dst \mask_fmt, \op, 1, v0, v1, v0 bilinear_store_\()\dst_fmt 1, v17, v18 .endm .macro bilinear_interpolate_two_pixels src_fmt, mask_fmt, dst_fmt, op bilinear_load_and_vertical_interpolate_two_\()\src_fmt \ v1, v11, v18, v19, v20, v21, v22, v23 bilinear_load_mask \mask_fmt, 2, v4 bilinear_load_dst \dst_fmt, \op, 2, v18, v19, v9 ushll v0.4s, v1.4h, #BILINEAR_INTERPOLATION_BITS umlsl v0.4s, v1.4h, v15.h[0] umlal2 v0.4s, v1.8h, v15.h[0] ushll v10.4s, v11.4h, #BILINEAR_INTERPOLATION_BITS umlsl v10.4s, v11.4h, v15.h[4] umlal2 v10.4s, v11.8h, v15.h[4] shrn v0.4h, v0.4s, #(2 * BILINEAR_INTERPOLATION_BITS) shrn2 v0.8h, v10.4s, #(2 * BILINEAR_INTERPOLATION_BITS) bilinear_duplicate_mask \mask_fmt, 2, v4 ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS) add v12.8h, v12.8h, v13.8h xtn v0.8b, v0.8h bilinear_interleave_src_dst \ \mask_fmt, \op, 2, v0, v1, v0, v18, v19, v9 bilinear_apply_mask_to_src \ \mask_fmt, 2, v0, v1, v0, v4, \ v3, v8, v10, v11 bilinear_combine \ \op, 2, v0, v1, v0, v18, v19, v9, \ v3, v8, v10, v11, v5 bilinear_deinterleave_dst \mask_fmt, \op, 2, v0, v1, v0 bilinear_store_\()\dst_fmt 2, v16, v17 .endm .macro bilinear_interpolate_four_pixels src_fmt, mask_fmt, dst_fmt, op bilinear_load_and_vertical_interpolate_four_\()\src_fmt \ v1, v11, v4, v5, v6, v7, v22, v23, \ v3, v9, v16, v17, v20, v21, v18, v19 prfm PREFETCH_MODE, [TMP1, PF_OFFS] sub TMP1, TMP1, STRIDE prfm PREFETCH_MODE, [TMP1, PF_OFFS] ushll v0.4s, v1.4h, #BILINEAR_INTERPOLATION_BITS umlsl v0.4s, v1.4h, v15.h[0] umlal2 v0.4s, v1.8h, v15.h[0] ushll v10.4s, v11.4h, #BILINEAR_INTERPOLATION_BITS umlsl v10.4s, v11.4h, v15.h[4] umlal2 v10.4s, v11.8h, v15.h[4] ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS) ushll v2.4s, v3.4h, #BILINEAR_INTERPOLATION_BITS umlsl v2.4s, v3.4h, v15.h[0] umlal2 v2.4s, v3.8h, v15.h[0] ushll v8.4s, v9.4h, #BILINEAR_INTERPOLATION_BITS umlsl v8.4s, v9.4h, v15.h[4] umlal2 v8.4s, v9.8h, v15.h[4] add v12.8h, v12.8h, v13.8h shrn v0.4h, v0.4s, #(2 * BILINEAR_INTERPOLATION_BITS) shrn2 v0.8h, v10.4s, #(2 * BILINEAR_INTERPOLATION_BITS) shrn v2.4h, v2.4s, #(2 * BILINEAR_INTERPOLATION_BITS) shrn2 v2.8h, v8.4s, #(2 * BILINEAR_INTERPOLATION_BITS) bilinear_load_mask \mask_fmt, 4, v4 bilinear_duplicate_mask \mask_fmt, 4, v4 ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS) xtn v0.8b, v0.8h xtn v1.8b, v2.8h add v12.8h, v12.8h, v13.8h bilinear_load_dst \dst_fmt, \op, 4, v2, v3, v21 bilinear_interleave_src_dst \ \mask_fmt, \op, 4, v0, v1, v0, v2, v3, v11 bilinear_apply_mask_to_src \ \mask_fmt, 4, v0, v1, v0, v4, \ v6, v8, v9, v10 bilinear_combine \ \op, 4, v0, v1, v0, v2, v3, v1, \ v6, v8, v9, v10, v23 bilinear_deinterleave_dst \mask_fmt, \op, 4, v0, v1, v0 bilinear_store_\()\dst_fmt 4, v6, v7 .endm .set BILINEAR_FLAG_USE_MASK, 1 .set BILINEAR_FLAG_USE_ALL_NEON_REGS, 2 /* * Main template macro for generating NEON optimized bilinear scanline functions. * * Bilinear scanline generator macro take folling arguments: * fname - name of the function to generate * src_fmt - source color format (8888 or 0565) * dst_fmt - destination color format (8888 or 0565) * src/dst_bpp_shift - (1 << bpp_shift) is the size of src/dst pixel in bytes * process_last_pixel - code block that interpolate one pixel and does not * update horizontal weight * process_two_pixels - code block that interpolate two pixels and update * horizontal weight * process_four_pixels - code block that interpolate four pixels and update * horizontal weight * process_pixblock_head - head part of middle loop * process_pixblock_tail - tail part of middle loop * process_pixblock_tail_head - tail_head of middle loop * pixblock_size - number of pixels processed in a single middle loop * prefetch_distance - prefetch in the source image by that many pixels ahead */ .macro generate_bilinear_scanline_func \ fname, \ src_fmt, dst_fmt, src_bpp_shift, dst_bpp_shift, \ bilinear_process_last_pixel, \ bilinear_process_two_pixels, \ bilinear_process_four_pixels, \ bilinear_process_pixblock_head, \ bilinear_process_pixblock_tail, \ bilinear_process_pixblock_tail_head, \ pixblock_size, \ prefetch_distance, \ flags pixman_asm_function \fname .if \pixblock_size == 8 .elseif \pixblock_size == 4 .else .error unsupported pixblock size .endif .if ((\flags) & BILINEAR_FLAG_USE_MASK) == 0 OUT .req x0 TOP .req x1 BOTTOM .req x2 WT .req x3 WWT .req w3 WB .req x4 WWB .req w4 X .req w5 UX .req w6 WIDTH .req x7 TMP1 .req x10 WTMP1 .req w10 TMP2 .req x11 WTMP2 .req w11 PF_OFFS .req x12 TMP3 .req x13 WTMP3 .req w13 TMP4 .req x14 WTMP4 .req w14 STRIDE .req x15 DUMMY .req x30 stp x29, x30, [sp, -16]! mov x29, sp sub sp, sp, 112 sub x29, x29, 64 st1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], 32 st1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], 32 stp x10, x11, [x29, -80] stp x12, x13, [x29, -96] stp x14, x15, [x29, -112] .else OUT .req x0 MASK .req x1 TOP .req x2 BOTTOM .req x3 WT .req x4 WWT .req w4 WB .req x5 WWB .req w5 X .req w6 UX .req w7 WIDTH .req x8 TMP1 .req x10 WTMP1 .req w10 TMP2 .req x11 WTMP2 .req w11 PF_OFFS .req x12 TMP3 .req x13 WTMP3 .req w13 TMP4 .req x14 WTMP4 .req w14 STRIDE .req x15 DUMMY .req x30 .set prefetch_offset, \prefetch_distance stp x29, x30, [sp, -16]! mov x29, sp sub x29, x29, 64 st1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], 32 st1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], 32 stp x10, x11, [x29, -80] stp x12, x13, [x29, -96] stp x14, x15, [x29, -112] str x8, [x29, -120] ldr w8, [x29, 16] sub sp, sp, 120 .endif mov WTMP1, #\prefetch_distance umull PF_OFFS, WTMP1, UX sub STRIDE, BOTTOM, TOP .unreq BOTTOM cmp WIDTH, #0 ble 300f dup v12.8h, X dup v13.8h, UX dup v28.8b, WWT dup v29.8b, WWB mov v25.d[0], v12.d[1] mov v26.d[0], v13.d[0] add v25.4h, v25.4h, v26.4h mov v12.d[1], v25.d[0] /* ensure good destination alignment */ cmp WIDTH, #1 blt 100f tst OUT, #(1 << \dst_bpp_shift) beq 100f ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS) add v12.8h, v12.8h, v13.8h \bilinear_process_last_pixel sub WIDTH, WIDTH, #1 100: add v13.8h, v13.8h, v13.8h ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS) add v12.8h, v12.8h, v13.8h cmp WIDTH, #2 blt 100f tst OUT, #(1 << (\dst_bpp_shift + 1)) beq 100f \bilinear_process_two_pixels sub WIDTH, WIDTH, #2 100: .if \pixblock_size == 8 cmp WIDTH, #4 blt 100f tst OUT, #(1 << (\dst_bpp_shift + 2)) beq 100f \bilinear_process_four_pixels sub WIDTH, WIDTH, #4 100: .endif subs WIDTH, WIDTH, #\pixblock_size blt 100f asr PF_OFFS, PF_OFFS, #(16 - \src_bpp_shift) \bilinear_process_pixblock_head subs WIDTH, WIDTH, #\pixblock_size blt 500f 0: \bilinear_process_pixblock_tail_head subs WIDTH, WIDTH, #\pixblock_size bge 0b 500: \bilinear_process_pixblock_tail 100: .if \pixblock_size == 8 tst WIDTH, #4 beq 200f \bilinear_process_four_pixels 200: .endif /* handle the remaining trailing pixels */ tst WIDTH, #2 beq 200f \bilinear_process_two_pixels 200: tst WIDTH, #1 beq 300f \bilinear_process_last_pixel 300: .if ((\flags) & BILINEAR_FLAG_USE_MASK) == 0 sub x29, x29, 64 ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], 32 ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], 32 ldp x10, x11, [x29, -80] ldp x12, x13, [x29, -96] ldp x14, x15, [x29, -112] mov sp, x29 ldp x29, x30, [sp], 16 .else sub x29, x29, 64 ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], 32 ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], 32 ldp x10, x11, [x29, -80] ldp x12, x13, [x29, -96] ldp x14, x15, [x29, -112] ldr x8, [x29, -120] mov sp, x29 ldp x29, x30, [sp], 16 .endif VERIFY_LR ret .unreq OUT .unreq TOP .unreq WT .unreq WWT .unreq WB .unreq WWB .unreq X .unreq UX .unreq WIDTH .unreq TMP1 .unreq WTMP1 .unreq TMP2 .unreq PF_OFFS .unreq TMP3 .unreq TMP4 .unreq STRIDE .if ((\flags) & BILINEAR_FLAG_USE_MASK) != 0 .unreq MASK .endif pixman_end_asm_function .endm /* src_8888_8_8888 */ .macro bilinear_src_8888_8_8888_process_last_pixel bilinear_interpolate_last_pixel 8888, 8, 8888, src .endm .macro bilinear_src_8888_8_8888_process_two_pixels bilinear_interpolate_two_pixels 8888, 8, 8888, src .endm .macro bilinear_src_8888_8_8888_process_four_pixels bilinear_interpolate_four_pixels 8888, 8, 8888, src .endm .macro bilinear_src_8888_8_8888_process_pixblock_head bilinear_src_8888_8_8888_process_four_pixels .endm .macro bilinear_src_8888_8_8888_process_pixblock_tail .endm .macro bilinear_src_8888_8_8888_process_pixblock_tail_head bilinear_src_8888_8_8888_process_pixblock_tail bilinear_src_8888_8_8888_process_pixblock_head .endm /* src_8888_8_0565 */ .macro bilinear_src_8888_8_0565_process_last_pixel bilinear_interpolate_last_pixel 8888, 8, 0565, src .endm .macro bilinear_src_8888_8_0565_process_two_pixels bilinear_interpolate_two_pixels 8888, 8, 0565, src .endm .macro bilinear_src_8888_8_0565_process_four_pixels bilinear_interpolate_four_pixels 8888, 8, 0565, src .endm .macro bilinear_src_8888_8_0565_process_pixblock_head bilinear_src_8888_8_0565_process_four_pixels .endm .macro bilinear_src_8888_8_0565_process_pixblock_tail .endm .macro bilinear_src_8888_8_0565_process_pixblock_tail_head bilinear_src_8888_8_0565_process_pixblock_tail bilinear_src_8888_8_0565_process_pixblock_head .endm /* src_0565_8_x888 */ .macro bilinear_src_0565_8_x888_process_last_pixel bilinear_interpolate_last_pixel 0565, 8, 8888, src .endm .macro bilinear_src_0565_8_x888_process_two_pixels bilinear_interpolate_two_pixels 0565, 8, 8888, src .endm .macro bilinear_src_0565_8_x888_process_four_pixels bilinear_interpolate_four_pixels 0565, 8, 8888, src .endm .macro bilinear_src_0565_8_x888_process_pixblock_head bilinear_src_0565_8_x888_process_four_pixels .endm .macro bilinear_src_0565_8_x888_process_pixblock_tail .endm .macro bilinear_src_0565_8_x888_process_pixblock_tail_head bilinear_src_0565_8_x888_process_pixblock_tail bilinear_src_0565_8_x888_process_pixblock_head .endm /* src_0565_8_0565 */ .macro bilinear_src_0565_8_0565_process_last_pixel bilinear_interpolate_last_pixel 0565, 8, 0565, src .endm .macro bilinear_src_0565_8_0565_process_two_pixels bilinear_interpolate_two_pixels 0565, 8, 0565, src .endm .macro bilinear_src_0565_8_0565_process_four_pixels bilinear_interpolate_four_pixels 0565, 8, 0565, src .endm .macro bilinear_src_0565_8_0565_process_pixblock_head bilinear_src_0565_8_0565_process_four_pixels .endm .macro bilinear_src_0565_8_0565_process_pixblock_tail .endm .macro bilinear_src_0565_8_0565_process_pixblock_tail_head bilinear_src_0565_8_0565_process_pixblock_tail bilinear_src_0565_8_0565_process_pixblock_head .endm /* over_8888_8888 */ .macro bilinear_over_8888_8888_process_last_pixel bilinear_interpolate_last_pixel 8888, x, 8888, over .endm .macro bilinear_over_8888_8888_process_two_pixels bilinear_interpolate_two_pixels 8888, x, 8888, over .endm .macro bilinear_over_8888_8888_process_four_pixels bilinear_interpolate_four_pixels 8888, x, 8888, over .endm .macro bilinear_over_8888_8888_process_pixblock_head asr WTMP1, X, #16 add X, X, UX add TMP1, TOP, TMP1, lsl #2 asr WTMP2, X, #16 add X, X, UX add TMP2, TOP, TMP2, lsl #2 ld1 {v22.2s}, [TMP1], STRIDE ld1 {v23.2s}, [TMP1] asr WTMP3, X, #16 add X, X, UX add TMP3, TOP, TMP3, lsl #2 umull v8.8h, v22.8b, v28.8b umlal v8.8h, v23.8b, v29.8b ld1 {v22.2s}, [TMP2], STRIDE ld1 {v23.2s}, [TMP2] asr WTMP4, X, #16 add X, X, UX add TMP4, TOP, TMP4, lsl #2 umull v9.8h, v22.8b, v28.8b umlal v9.8h, v23.8b, v29.8b ld1 {v22.2s}, [TMP3], STRIDE ld1 {v23.2s}, [TMP3] umull v10.8h, v22.8b, v28.8b umlal v10.8h, v23.8b, v29.8b ushll v0.4s, v8.4h, #BILINEAR_INTERPOLATION_BITS umlsl v0.4s, v8.4h, v15.h[0] umlal2 v0.4s, v8.8h, v15.h[0] prfm PREFETCH_MODE, [TMP4, PF_OFFS] ld1 {v16.2s}, [TMP4], STRIDE ld1 {v17.2s}, [TMP4] prfm PREFETCH_MODE, [TMP4, PF_OFFS] umull v11.8h, v16.8b, v28.8b umlal v11.8h, v17.8b, v29.8b ushll v1.4s, v9.4h, #BILINEAR_INTERPOLATION_BITS umlsl v1.4s, v9.4h, v15.h[4] umlal2 v1.4s, v9.8h, v15.h[4] ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS) add v12.8h, v12.8h, v13.8h .endm .macro bilinear_over_8888_8888_process_pixblock_tail ushll v2.4s, v10.4h, #BILINEAR_INTERPOLATION_BITS umlsl v2.4s, v10.4h, v15.h[0] umlal2 v2.4s, v10.8h, v15.h[0] ushll v3.4s, v11.4h, #BILINEAR_INTERPOLATION_BITS umlsl v3.4s, v11.4h, v15.h[4] umlal2 v3.4s, v11.8h, v15.h[4] shrn v0.4h, v0.4s, #(2 * BILINEAR_INTERPOLATION_BITS) shrn2 v0.8h, v1.4s, #(2 * BILINEAR_INTERPOLATION_BITS) shrn v2.4h, v2.4s, #(2 * BILINEAR_INTERPOLATION_BITS) ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS) shrn2 v2.8h, v3.4s, #(2 * BILINEAR_INTERPOLATION_BITS) xtn v6.8b, v0.8h xtn v7.8b, v2.8h ld1 {v2.2s, v3.2s}, [OUT] prfm PREFETCH_MODE, [OUT, #(prefetch_offset * 4)] vuzp v6.8b, v7.8b vuzp v2.8b, v3.8b vuzp v6.8b, v7.8b vuzp v2.8b, v3.8b dup v4.2s, v7.s[1] mvn v4.8b, v4.8b umull v11.8h, v2.8b, v4.8b umull v2.8h, v3.8b, v4.8b urshr v1.8h, v11.8h, #8 urshr v10.8h, v2.8h, #8 raddhn v3.8b, v10.8h, v2.8h raddhn v2.8b, v1.8h, v11.8h uqadd v6.8b, v2.8b, v6.8b uqadd v7.8b, v3.8b, v7.8b vuzp v6.8b, v7.8b vuzp v6.8b, v7.8b add v12.8h, v12.8h, v13.8h st1 {v6.2s, v7.2s}, [OUT], #16 .endm .macro bilinear_over_8888_8888_process_pixblock_tail_head ushll v2.4s, v10.4h, #BILINEAR_INTERPOLATION_BITS asr WTMP1, X, #16 add X, X, UX add TMP1, TOP, TMP1, lsl #2 umlsl v2.4s, v10.4h, v15.h[0] asr WTMP2, X, #16 add X, X, UX add TMP2, TOP, TMP2, lsl #2 umlal2 v2.4s, v10.8h, v15.h[0] ushll v3.4s, v11.4h, #BILINEAR_INTERPOLATION_BITS ld1 {v20.2s}, [TMP1], STRIDE umlsl v3.4s, v11.4h, v15.h[4] umlal2 v3.4s, v11.8h, v15.h[4] ld1 {v21.2s}, [TMP1] umull v8.8h, v20.8b, v28.8b umlal v8.8h, v21.8b, v29.8b shrn v0.4h, v0.4s, #(2 * BILINEAR_INTERPOLATION_BITS) shrn2 v0.8h, v1.4s, #(2 * BILINEAR_INTERPOLATION_BITS) shrn v2.4h, v2.4s, #(2 * BILINEAR_INTERPOLATION_BITS) ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS) ld1 {v22.2s}, [TMP2], STRIDE shrn2 v2.8h, v3.4s, #(2 * BILINEAR_INTERPOLATION_BITS) xtn v6.8b, v0.8h ld1 {v23.2s}, [TMP2] umull v9.8h, v22.8b, v28.8b asr WTMP3, X, #16 add X, X, UX add TMP3, TOP, TMP3, lsl #2 asr WTMP4, X, #16 add X, X, UX add TMP4, TOP, TMP4, lsl #2 umlal v9.8h, v23.8b, v29.8b xtn v7.8b, v2.8h ld1 {v2.2s, v3.2s}, [OUT] prfm PREFETCH_MODE, [OUT, PF_OFFS] ld1 {v22.2s}, [TMP3], STRIDE vuzp v6.8b, v7.8b vuzp v2.8b, v3.8b vuzp v6.8b, v7.8b vuzp v2.8b, v3.8b dup v4.2s, v7.s[1] ld1 {v23.2s}, [TMP3] mvn v4.8b, v4.8b umull v10.8h, v22.8b, v28.8b umlal v10.8h, v23.8b, v29.8b umull v11.8h, v2.8b, v4.8b umull v2.8h, v3.8b, v4.8b ushll v0.4s, v8.4h, #BILINEAR_INTERPOLATION_BITS umlsl v0.4s, v8.4h, v15.h[0] urshr v1.8h, v11.8h, #8 umlal2 v0.4s, v8.8h, v15.h[0] urshr v8.8h, v2.8h, #8 raddhn v3.8b, v8.8h, v2.8h raddhn v2.8b, v1.8h, v11.8h prfm PREFETCH_MODE, [TMP4, PF_OFFS] ld1 {v16.2s}, [TMP4], STRIDE uqadd v6.8b, v2.8b, v6.8b uqadd v7.8b, v3.8b, v7.8b ld1 {v17.2s}, [TMP4] prfm PREFETCH_MODE, [TMP4, PF_OFFS] umull v11.8h, v16.8b, v28.8b umlal v11.8h, v17.8b, v29.8b vuzp v6.8b, v7.8b ushll v1.4s, v9.4h, #BILINEAR_INTERPOLATION_BITS vuzp v6.8b, v7.8b umlsl v1.4s, v9.4h, v15.h[4] add v12.8h, v12.8h, v13.8h umlal2 v1.4s, v9.8h, v15.h[4] ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS) add v12.8h, v12.8h, v13.8h st1 {v6.2s, v7.2s}, [OUT], #16 .endm /* over_8888_8_8888 */ .macro bilinear_over_8888_8_8888_process_last_pixel bilinear_interpolate_last_pixel 8888, 8, 8888, over .endm .macro bilinear_over_8888_8_8888_process_two_pixels bilinear_interpolate_two_pixels 8888, 8, 8888, over .endm .macro bilinear_over_8888_8_8888_process_four_pixels bilinear_interpolate_two_pixels 8888, 8, 8888, over bilinear_interpolate_two_pixels 8888, 8, 8888, over .endm .macro bilinear_over_8888_8_8888_process_pixblock_head bilinear_over_8888_8_8888_process_four_pixels .endm .macro bilinear_over_8888_8_8888_process_pixblock_tail .endm .macro bilinear_over_8888_8_8888_process_pixblock_tail_head bilinear_over_8888_8_8888_process_pixblock_tail bilinear_over_8888_8_8888_process_pixblock_head .endm /* add_8888_8888 */ .macro bilinear_add_8888_8888_process_last_pixel bilinear_interpolate_last_pixel 8888, x, 8888, add .endm .macro bilinear_add_8888_8888_process_two_pixels bilinear_interpolate_two_pixels 8888, x, 8888, add .endm .macro bilinear_add_8888_8888_process_four_pixels bilinear_interpolate_two_pixels 8888, x, 8888, add bilinear_interpolate_two_pixels 8888, x, 8888, add .endm .macro bilinear_add_8888_8888_process_pixblock_head bilinear_add_8888_8888_process_four_pixels .endm .macro bilinear_add_8888_8888_process_pixblock_tail .endm .macro bilinear_add_8888_8888_process_pixblock_tail_head bilinear_add_8888_8888_process_pixblock_tail bilinear_add_8888_8888_process_pixblock_head .endm /* add_8888_8_8888 */ .macro bilinear_add_8888_8_8888_process_last_pixel bilinear_interpolate_last_pixel 8888, 8, 8888, add .endm .macro bilinear_add_8888_8_8888_process_two_pixels bilinear_interpolate_two_pixels 8888, 8, 8888, add .endm .macro bilinear_add_8888_8_8888_process_four_pixels bilinear_interpolate_four_pixels 8888, 8, 8888, add .endm .macro bilinear_add_8888_8_8888_process_pixblock_head bilinear_add_8888_8_8888_process_four_pixels .endm .macro bilinear_add_8888_8_8888_process_pixblock_tail .endm .macro bilinear_add_8888_8_8888_process_pixblock_tail_head bilinear_add_8888_8_8888_process_pixblock_tail bilinear_add_8888_8_8888_process_pixblock_head .endm /* Bilinear scanline functions */ generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_8888_8_8888_SRC_asm_neon, \ 8888, 8888, 2, 2, \ bilinear_src_8888_8_8888_process_last_pixel, \ bilinear_src_8888_8_8888_process_two_pixels, \ bilinear_src_8888_8_8888_process_four_pixels, \ bilinear_src_8888_8_8888_process_pixblock_head, \ bilinear_src_8888_8_8888_process_pixblock_tail, \ bilinear_src_8888_8_8888_process_pixblock_tail_head, \ 4, 28, BILINEAR_FLAG_USE_MASK generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_8888_8_0565_SRC_asm_neon, \ 8888, 0565, 2, 1, \ bilinear_src_8888_8_0565_process_last_pixel, \ bilinear_src_8888_8_0565_process_two_pixels, \ bilinear_src_8888_8_0565_process_four_pixels, \ bilinear_src_8888_8_0565_process_pixblock_head, \ bilinear_src_8888_8_0565_process_pixblock_tail, \ bilinear_src_8888_8_0565_process_pixblock_tail_head, \ 4, 28, BILINEAR_FLAG_USE_MASK generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_0565_8_x888_SRC_asm_neon, \ 0565, 8888, 1, 2, \ bilinear_src_0565_8_x888_process_last_pixel, \ bilinear_src_0565_8_x888_process_two_pixels, \ bilinear_src_0565_8_x888_process_four_pixels, \ bilinear_src_0565_8_x888_process_pixblock_head, \ bilinear_src_0565_8_x888_process_pixblock_tail, \ bilinear_src_0565_8_x888_process_pixblock_tail_head, \ 4, 28, BILINEAR_FLAG_USE_MASK generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_0565_8_0565_SRC_asm_neon, \ 0565, 0565, 1, 1, \ bilinear_src_0565_8_0565_process_last_pixel, \ bilinear_src_0565_8_0565_process_two_pixels, \ bilinear_src_0565_8_0565_process_four_pixels, \ bilinear_src_0565_8_0565_process_pixblock_head, \ bilinear_src_0565_8_0565_process_pixblock_tail, \ bilinear_src_0565_8_0565_process_pixblock_tail_head, \ 4, 28, BILINEAR_FLAG_USE_MASK generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_8888_8888_OVER_asm_neon, \ 8888, 8888, 2, 2, \ bilinear_over_8888_8888_process_last_pixel, \ bilinear_over_8888_8888_process_two_pixels, \ bilinear_over_8888_8888_process_four_pixels, \ bilinear_over_8888_8888_process_pixblock_head, \ bilinear_over_8888_8888_process_pixblock_tail, \ bilinear_over_8888_8888_process_pixblock_tail_head, \ 4, 28, 0 generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_8888_8_8888_OVER_asm_neon, \ 8888, 8888, 2, 2, \ bilinear_over_8888_8_8888_process_last_pixel, \ bilinear_over_8888_8_8888_process_two_pixels, \ bilinear_over_8888_8_8888_process_four_pixels, \ bilinear_over_8888_8_8888_process_pixblock_head, \ bilinear_over_8888_8_8888_process_pixblock_tail, \ bilinear_over_8888_8_8888_process_pixblock_tail_head, \ 4, 28, BILINEAR_FLAG_USE_MASK generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_8888_8888_ADD_asm_neon, \ 8888, 8888, 2, 2, \ bilinear_add_8888_8888_process_last_pixel, \ bilinear_add_8888_8888_process_two_pixels, \ bilinear_add_8888_8888_process_four_pixels, \ bilinear_add_8888_8888_process_pixblock_head, \ bilinear_add_8888_8888_process_pixblock_tail, \ bilinear_add_8888_8888_process_pixblock_tail_head, \ 4, 28, 0 generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_8888_8_8888_ADD_asm_neon, \ 8888, 8888, 2, 2, \ bilinear_add_8888_8_8888_process_last_pixel, \ bilinear_add_8888_8_8888_process_two_pixels, \ bilinear_add_8888_8_8888_process_four_pixels, \ bilinear_add_8888_8_8888_process_pixblock_head, \ bilinear_add_8888_8_8888_process_pixblock_tail, \ bilinear_add_8888_8_8888_process_pixblock_tail_head, \ 4, 28, BILINEAR_FLAG_USE_MASK ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-arma64-neon-asm.S0000664000175000017500000042241114712446423020641 0ustar00mattst88mattst88/* * Copyright Âİ 2009 Nokia Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Author: Siarhei Siamashka (siarhei.siamashka@nokia.com) */ /* * This file contains implementations of NEON optimized pixel processing * functions. There is no full and detailed tutorial, but some functions * (those which are exposing some new or interesting features) are * extensively commented and can be used as examples. * * You may want to have a look at the comments for following functions: * - pixman_composite_over_8888_0565_asm_neon * - pixman_composite_over_n_8_0565_asm_neon */ /* Prevent the stack from becoming executable for no reason... */ #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif .text .arch armv8-a .altmacro .p2align 2 #include "pixman-private.h" #include "pixman-arm-asm.h" #include "pixman-arma64-neon-asm.h" /* Global configuration options and preferences */ /* * The code can optionally make use of unaligned memory accesses to improve * performance of handling leading/trailing pixels for each scanline. * Configuration variable RESPECT_STRICT_ALIGNMENT can be set to 0 for * example in linux if unaligned memory accesses are not configured to * generate.exceptions. */ .set RESPECT_STRICT_ALIGNMENT, 1 /* * Set default prefetch type. There is a choice between the following options: * * PREFETCH_TYPE_NONE (may be useful for the ARM cores where PLD is set to work * as NOP to workaround some HW bugs or for whatever other reason) * * PREFETCH_TYPE_SIMPLE (may be useful for simple single-issue ARM cores where * advanced prefetch intruduces heavy overhead) * * PREFETCH_TYPE_ADVANCED (useful for superscalar cores such as ARM Cortex-A8 * which can run ARM and NEON instructions simultaneously so that extra ARM * instructions do not add (many) extra cycles, but improve prefetch efficiency) * * Note: some types of function can't support advanced prefetch and fallback * to simple one (those which handle 24bpp pixels) */ .set PREFETCH_TYPE_DEFAULT, PREFETCH_TYPE_ADVANCED /* Prefetch distance in pixels for simple prefetch */ .set PREFETCH_DISTANCE_SIMPLE, 64 /* * Implementation of pixman_composite_over_8888_0565_asm_neon * * This function takes a8r8g8b8 source buffer, r5g6b5 destination buffer and * performs OVER compositing operation. Function fast_composite_over_8888_0565 * from pixman-fast-path.c does the same in C and can be used as a reference. * * First we need to have some NEON assembly code which can do the actual * operation on the pixels and provide it to the template macro. * * Template macro quite conveniently takes care of emitting all the necessary * code for memory reading and writing (including quite tricky cases of * handling unaligned leading/trailing pixels), so we only need to deal with * the data in NEON registers. * * NEON registers allocation in general is recommented to be the following: * v0, v1, v2, v3 - contain loaded source pixel data * v4, v5, v6, v7 - contain loaded destination pixels (if they are needed) * v24, v25, v26, v27 - contain loading mask pixel data (if mask is used) * v28, v29, v30, v31 - place for storing the result (destination pixels) * * As can be seen above, four 64-bit NEON registers are used for keeping * intermediate pixel data and up to 8 pixels can be processed in one step * for 32bpp formats (16 pixels for 16bpp, 32 pixels for 8bpp). * * This particular function uses the following registers allocation: * v0, v1, v2, v3 - contain loaded source pixel data * v4, v5 - contain loaded destination pixels (they are needed) * v28, v29 - place for storing the result (destination pixels) */ /* * Step one. We need to have some code to do some arithmetics on pixel data. * This is implemented as a pair of macros: '*_head' and '*_tail'. When used * back-to-back, they take pixel data from {v0, v1, v2, v3} and {v4, v5}, * perform all the needed calculations and write the result to {v28, v29}. * The rationale for having two macros and not just one will be explained * later. In practice, any single monolitic function which does the work can * be split into two parts in any arbitrary way without affecting correctness. * * There is one special trick here too. Common template macro can optionally * make our life a bit easier by doing R, G, B, A color components * deinterleaving for 32bpp pixel formats (and this feature is used in * 'pixman_composite_over_8888_0565_asm_neon' function). So it means that * instead of having 8 packed pixels in {v0, v1, v2, v3} registers, we * actually use v0 register for blue channel (a vector of eight 8-bit * values), v1 register for green, v2 for red and v3 for alpha. This * simple conversion can be also done with a few NEON instructions: * * Packed to planar conversion: // vuzp8 is a wrapper macro * vuzp8 v0, v1 * vuzp8 v2, v3 * vuzp8 v1, v3 * vuzp8 v0, v2 * * Planar to packed conversion: // vzip8 is a wrapper macro * vzip8 v0, v2 * vzip8 v1, v3 * vzip8 v2, v3 * vzip8 v0, v1 * * But pixel can be loaded directly in planar format using LD4 / b NEON * instruction. It is 1 cycle slower than LD1 / s, so this is not always * desirable, that's why deinterleaving is optional. * * But anyway, here is the code: */ .macro pixman_composite_over_8888_0565_process_pixblock_head /* convert 8 r5g6b5 pixel data from {v4} to planar 8-bit format and put data into v6 - red, v7 - green, v30 - blue */ mov v4.d[1], v5.d[0] shrn v6.8b, v4.8h, #8 shrn v7.8b, v4.8h, #3 sli v4.8h, v4.8h, #5 sri v6.8b, v6.8b, #5 mvn v3.8b, v3.8b /* invert source alpha */ sri v7.8b, v7.8b, #6 shrn v30.8b, v4.8h, #2 /* now do alpha blending, storing results in 8-bit planar format into v20 - red, v23 - green, v22 - blue */ umull v10.8h, v3.8b, v6.8b umull v11.8h, v3.8b, v7.8b umull v12.8h, v3.8b, v30.8b urshr v17.8h, v10.8h, #8 urshr v18.8h, v11.8h, #8 urshr v19.8h, v12.8h, #8 raddhn v20.8b, v10.8h, v17.8h raddhn v23.8b, v11.8h, v18.8h raddhn v22.8b, v12.8h, v19.8h .endm .macro pixman_composite_over_8888_0565_process_pixblock_tail /* ... continue alpha blending */ uqadd v17.8b, v2.8b, v20.8b uqadd v18.8b, v0.8b, v22.8b uqadd v19.8b, v1.8b, v23.8b /* convert the result to r5g6b5 and store it into {v14} */ ushll v14.8h, v17.8b, #7 sli v14.8h, v14.8h, #1 ushll v8.8h, v19.8b, #7 sli v8.8h, v8.8h, #1 ushll v9.8h, v18.8b, #7 sli v9.8h, v9.8h, #1 sri v14.8h, v8.8h, #5 sri v14.8h, v9.8h, #11 mov v28.d[0], v14.d[0] mov v29.d[0], v14.d[1] .endm /* * OK, now we got almost everything that we need. Using the above two * macros, the work can be done right. But now we want to optimize * it a bit. ARM Cortex-A8 is an in-order core, and benefits really * a lot from good code scheduling and software pipelining. * * Let's construct some code, which will run in the core main loop. * Some pseudo-code of the main loop will look like this: * head * while (...) { * tail * head * } * tail * * It may look a bit weird, but this setup allows to hide instruction * latencies better and also utilize dual-issue capability more * efficiently (make pairs of load-store and ALU instructions). * * So what we need now is a '*_tail_head' macro, which will be used * in the core main loop. A trivial straightforward implementation * of this macro would look like this: * * pixman_composite_over_8888_0565_process_pixblock_tail * st1 {v28.4h, v29.4h}, [DST_W], #32 * ld1 {v4.4h, v5.4h}, [DST_R], #16 * ld4 {v0.2s, v1.2s, v2.2s, v3.2s}, [SRC], #32 * pixman_composite_over_8888_0565_process_pixblock_head * cache_preload 8, 8 * * Now it also got some VLD/VST instructions. We simply can't move from * processing one block of pixels to the other one with just arithmetics. * The previously processed data needs to be written to memory and new * data needs to be fetched. Fortunately, this main loop does not deal * with partial leading/trailing pixels and can load/store a full block * of pixels in a bulk. Additionally, destination buffer is already * 16 bytes aligned here (which is good for performance). * * New things here are DST_R, DST_W, SRC and MASK identifiers. These * are the aliases for ARM registers which are used as pointers for * accessing data. We maintain separate pointers for reading and writing * destination buffer (DST_R and DST_W). * * Another new thing is 'cache_preload' macro. It is used for prefetching * data into CPU L2 cache and improve performance when dealing with large * images which are far larger than cache size. It uses one argument * (actually two, but they need to be the same here) - number of pixels * in a block. Looking into 'pixman-arm-neon-asm.h' can provide some * details about this macro. Moreover, if good performance is needed * the code from this macro needs to be copied into '*_tail_head' macro * and mixed with the rest of code for optimal instructions scheduling. * We are actually doing it below. * * Now after all the explanations, here is the optimized code. * Different instruction streams (originaling from '*_head', '*_tail' * and 'cache_preload' macro) use different indentation levels for * better readability. Actually taking the code from one of these * indentation levels and ignoring a few LD/ST instructions would * result in exactly the code from '*_head', '*_tail' or 'cache_preload' * macro! */ #if 1 .macro pixman_composite_over_8888_0565_process_pixblock_tail_head uqadd v17.8b, v2.8b, v20.8b ld1 {v4.4h, v5.4h}, [DST_R], #16 mov v4.d[1], v5.d[0] uqadd v18.8b, v0.8b, v22.8b uqadd v19.8b, v1.8b, v23.8b shrn v6.8b, v4.8h, #8 fetch_src_pixblock shrn v7.8b, v4.8h, #3 sli v4.8h, v4.8h, #5 ushll v14.8h, v17.8b, #7 sli v14.8h, v14.8h, #1 PF add, PF_X, PF_X, #8 ushll v8.8h, v19.8b, #7 sli v8.8h, v8.8h, #1 PF tst, PF_CTL, #0xF sri v6.8b, v6.8b, #5 PF beq, 10f PF add, PF_X, PF_X, #8 10: mvn v3.8b, v3.8b PF beq, 10f PF sub, PF_CTL, PF_CTL, #1 10: sri v7.8b, v7.8b, #6 shrn v30.8b, v4.8h, #2 umull v10.8h, v3.8b, v6.8b PF lsl, DUMMY, PF_X, #src_bpp_shift PF prfm, PREFETCH_MODE, [PF_SRC, DUMMY] umull v11.8h, v3.8b, v7.8b umull v12.8h, v3.8b, v30.8b PF lsl, DUMMY, PF_X, #dst_bpp_shift PF prfm, PREFETCH_MODE, [PF_DST, DUMMY] sri v14.8h, v8.8h, #5 PF cmp, PF_X, ORIG_W ushll v9.8h, v18.8b, #7 sli v9.8h, v9.8h, #1 urshr v17.8h, v10.8h, #8 PF ble, 10f PF sub, PF_X, PF_X, ORIG_W 10: urshr v19.8h, v11.8h, #8 urshr v18.8h, v12.8h, #8 PF ble, 10f PF subs, PF_CTL, PF_CTL, #0x10 10: sri v14.8h, v9.8h, #11 mov v28.d[0], v14.d[0] mov v29.d[0], v14.d[1] PF ble, 10f PF lsl, DUMMY, SRC_STRIDE, #src_bpp_shift PF ldrsb, DUMMY, [PF_SRC, DUMMY] PF add, PF_SRC, PF_SRC, #1 10: raddhn v20.8b, v10.8h, v17.8h raddhn v23.8b, v11.8h, v19.8h PF ble, 10f PF lsl, DUMMY, DST_STRIDE, #dst_bpp_shift PF ldrsb, DUMMY, [PF_DST, DUMMY] PF add, PF_DST, PF_SRC, #1 10: raddhn v22.8b, v12.8h, v18.8h st1 {v14.8h}, [DST_W], #16 .endm #else /* If we did not care much about the performance, we would just use this... */ .macro pixman_composite_over_8888_0565_process_pixblock_tail_head pixman_composite_over_8888_0565_process_pixblock_tail st1 {v14.8h}, [DST_W], #16 ld1 {v4.4h, v4.5h}, [DST_R], #16 fetch_src_pixblock pixman_composite_over_8888_0565_process_pixblock_head cache_preload 8, 8 .endm #endif /* * And now the final part. We are using 'generate_composite_function' macro * to put all the stuff together. We are specifying the name of the function * which we want to get, number of bits per pixel for the source, mask and * destination (0 if unused, like mask in this case). Next come some bit * flags: * FLAG_DST_READWRITE - tells that the destination buffer is both read * and written, for write-only buffer we would use * FLAG_DST_WRITEONLY flag instead * FLAG_DEINTERLEAVE_32BPP - tells that we prefer to work with planar data * and separate color channels for 32bpp format. * The next things are: * - the number of pixels processed per iteration (8 in this case, because * that's the maximum what can fit into four 64-bit NEON registers). * - prefetch distance, measured in pixel blocks. In this case it is 5 times * by 8 pixels. That would be 40 pixels, or up to 160 bytes. Optimal * prefetch distance can be selected by running some benchmarks. * * After that we specify some macros, these are 'default_init', * 'default_cleanup' here which are empty (but it is possible to have custom * init/cleanup macros to be able to save/restore some extra NEON registers * like d8-d15 or do anything else) followed by * 'pixman_composite_over_8888_0565_process_pixblock_head', * 'pixman_composite_over_8888_0565_process_pixblock_tail' and * 'pixman_composite_over_8888_0565_process_pixblock_tail_head' * which we got implemented above. * * The last part is the NEON registers allocation scheme. */ generate_composite_function \ pixman_composite_over_8888_0565_asm_neon, 32, 0, 16, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_over_8888_0565_process_pixblock_head, \ pixman_composite_over_8888_0565_process_pixblock_tail, \ pixman_composite_over_8888_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 24 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_over_n_0565_process_pixblock_head /* convert 8 r5g6b5 pixel data from {v4} to planar 8-bit format and put data into v6 - red, v7 - green, v30 - blue */ mov v4.d[1], v5.d[0] shrn v6.8b, v4.8h, #8 shrn v7.8b, v4.8h, #3 sli v4.8h, v4.8h, #5 sri v6.8b, v6.8b, #5 sri v7.8b, v7.8b, #6 shrn v30.8b, v4.8h, #2 /* now do alpha blending, storing results in 8-bit planar format into v20 - red, v23 - green, v22 - blue */ umull v10.8h, v3.8b, v6.8b umull v11.8h, v3.8b, v7.8b umull v12.8h, v3.8b, v30.8b urshr v13.8h, v10.8h, #8 urshr v14.8h, v11.8h, #8 urshr v15.8h, v12.8h, #8 raddhn v20.8b, v10.8h, v13.8h raddhn v23.8b, v11.8h, v14.8h raddhn v22.8b, v12.8h, v15.8h .endm .macro pixman_composite_over_n_0565_process_pixblock_tail /* ... continue alpha blending */ uqadd v17.8b, v2.8b, v20.8b uqadd v18.8b, v0.8b, v22.8b uqadd v19.8b, v1.8b, v23.8b /* convert the result to r5g6b5 and store it into {v14} */ ushll v14.8h, v17.8b, #7 sli v14.8h, v14.8h, #1 ushll v8.8h, v19.8b, #7 sli v8.8h, v8.8h, #1 ushll v9.8h, v18.8b, #7 sli v9.8h, v9.8h, #1 sri v14.8h, v8.8h, #5 sri v14.8h, v9.8h, #11 mov v28.d[0], v14.d[0] mov v29.d[0], v14.d[1] .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_over_n_0565_process_pixblock_tail_head pixman_composite_over_n_0565_process_pixblock_tail ld1 {v4.4h, v5.4h}, [DST_R], #16 st1 {v14.8h}, [DST_W], #16 pixman_composite_over_n_0565_process_pixblock_head cache_preload 8, 8 .endm .macro pixman_composite_over_n_0565_init mov v3.s[0], w4 dup v0.8b, v3.b[0] dup v1.8b, v3.b[1] dup v2.8b, v3.b[2] dup v3.8b, v3.b[3] mvn v3.8b, v3.8b /* invert source alpha */ .endm generate_composite_function \ pixman_composite_over_n_0565_asm_neon, 0, 0, 16, \ FLAG_DST_READWRITE, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_n_0565_init, \ default_cleanup, \ pixman_composite_over_n_0565_process_pixblock_head, \ pixman_composite_over_n_0565_process_pixblock_tail, \ pixman_composite_over_n_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 24 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_8888_0565_process_pixblock_head ushll v8.8h, v1.8b, #7 sli v8.8h, v8.8h, #1 ushll v14.8h, v2.8b, #7 sli v14.8h, v14.8h, #1 ushll v9.8h, v0.8b, #7 sli v9.8h, v9.8h, #1 .endm .macro pixman_composite_src_8888_0565_process_pixblock_tail sri v14.8h, v8.8h, #5 sri v14.8h, v9.8h, #11 mov v28.d[0], v14.d[0] mov v29.d[0], v14.d[1] .endm .macro pixman_composite_src_8888_0565_process_pixblock_tail_head sri v14.8h, v8.8h, #5 PF add, PF_X, PF_X, #8 PF tst, PF_CTL, #0xF fetch_src_pixblock PF beq, 10f PF add, PF_X, PF_X, #8 PF sub, PF_CTL, PF_CTL, #1 10: sri v14.8h, v9.8h, #11 mov v28.d[0], v14.d[0] mov v29.d[0], v14.d[1] PF cmp, PF_X, ORIG_W PF lsl, DUMMY, PF_X, #src_bpp_shift PF prfm, PREFETCH_MODE, [PF_SRC, DUMMY] ushll v8.8h, v1.8b, #7 sli v8.8h, v8.8h, #1 st1 {v14.8h}, [DST_W], #16 PF ble, 10f PF sub, PF_X, PF_X, ORIG_W PF subs, PF_CTL, PF_CTL, #0x10 10: ushll v14.8h, v2.8b, #7 sli v14.8h, v14.8h, #1 PF ble, 10f PF lsl, DUMMY, SRC_STRIDE, #src_bpp_shift PF ldrsb, DUMMY, [PF_SRC, DUMMY] PF add, PF_SRC, PF_SRC, #1 10: ushll v9.8h, v0.8b, #7 sli v9.8h, v9.8h, #1 .endm generate_composite_function \ pixman_composite_src_8888_0565_asm_neon, 32, 0, 16, \ FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_src_8888_0565_process_pixblock_head, \ pixman_composite_src_8888_0565_process_pixblock_tail, \ pixman_composite_src_8888_0565_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_src_0565_8888_process_pixblock_head mov v0.d[1], v1.d[0] shrn v30.8b, v0.8h, #8 shrn v29.8b, v0.8h, #3 sli v0.8h, v0.8h, #5 movi v31.8b, #255 sri v30.8b, v30.8b, #5 sri v29.8b, v29.8b, #6 shrn v28.8b, v0.8h, #2 .endm .macro pixman_composite_src_0565_8888_process_pixblock_tail .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_src_0565_8888_process_pixblock_tail_head pixman_composite_src_0565_8888_process_pixblock_tail st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 fetch_src_pixblock pixman_composite_src_0565_8888_process_pixblock_head cache_preload 8, 8 .endm generate_composite_function \ pixman_composite_src_0565_8888_asm_neon, 16, 0, 32, \ FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_src_0565_8888_process_pixblock_head, \ pixman_composite_src_0565_8888_process_pixblock_tail, \ pixman_composite_src_0565_8888_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_add_8_8_process_pixblock_head uqadd v28.8b, v0.8b, v4.8b uqadd v29.8b, v1.8b, v5.8b uqadd v30.8b, v2.8b, v6.8b uqadd v31.8b, v3.8b, v7.8b .endm .macro pixman_composite_add_8_8_process_pixblock_tail .endm .macro pixman_composite_add_8_8_process_pixblock_tail_head fetch_src_pixblock PF add, PF_X, PF_X, #32 PF tst, PF_CTL, #0xF ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32 PF beq, 10f PF add, PF_X, PF_X, #32 PF sub, PF_CTL, PF_CTL, #1 10: st1 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 PF cmp, PF_X, ORIG_W PF lsl, DUMMY, PF_X, #src_bpp_shift PF prfm, PREFETCH_MODE, [PF_SRC, DUMMY] PF lsl, DUMMY, PF_X, #dst_bpp_shift PF prfm, PREFETCH_MODE, [PF_DST, DUMMY] PF ble, 10f PF sub, PF_X, PF_X, ORIG_W PF subs, PF_CTL, PF_CTL, #0x10 10: uqadd v28.8b, v0.8b, v4.8b PF ble, 10f PF lsl, DUMMY, SRC_STRIDE, #src_bpp_shift PF ldrsb, DUMMY, [PF_SRC, DUMMY] PF add, PF_SRC, PF_SRC, #1 PF lsl, DUMMY, DST_STRIDE, #dst_bpp_shift PF ldrsb, DUMMY, [PF_DST, DUMMY] PF add, PF_DST, PF_DST, #1 10: uqadd v29.8b, v1.8b, v5.8b uqadd v30.8b, v2.8b, v6.8b uqadd v31.8b, v3.8b, v7.8b .endm generate_composite_function \ pixman_composite_add_8_8_asm_neon, 8, 0, 8, \ FLAG_DST_READWRITE, \ 32, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_add_8_8_process_pixblock_head, \ pixman_composite_add_8_8_process_pixblock_tail, \ pixman_composite_add_8_8_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_add_8888_8888_process_pixblock_tail_head fetch_src_pixblock PF add, PF_X, PF_X, #8 PF tst, PF_CTL, #0xF ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32 PF beq, 10f PF add, PF_X, PF_X, #8 PF sub, PF_CTL, PF_CTL, #1 10: st1 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 PF cmp, PF_X, ORIG_W PF lsl, DUMMY, PF_X, #src_bpp_shift PF prfm, PREFETCH_MODE, [PF_SRC, DUMMY] PF lsl, DUMMY, PF_X, #dst_bpp_shift PF prfm, PREFETCH_MODE, [PF_DST, DUMMY] PF ble, 10f PF sub, PF_X, PF_X, ORIG_W PF subs, PF_CTL, PF_CTL, #0x10 10: uqadd v28.8b, v0.8b, v4.8b PF ble, 10f PF lsl, DUMMY, SRC_STRIDE, #src_bpp_shift PF ldrsb, DUMMY, [PF_SRC, DUMMY] PF add, PF_SRC, PF_SRC, #1 PF lsl, DUMMY, DST_STRIDE, #dst_bpp_shift PF ldrsb, DUMMY, [PF_DST, DUMMY] PF add, PF_DST, PF_DST, #1 10: uqadd v29.8b, v1.8b, v5.8b uqadd v30.8b, v2.8b, v6.8b uqadd v31.8b, v3.8b, v7.8b .endm generate_composite_function \ pixman_composite_add_8888_8888_asm_neon, 32, 0, 32, \ FLAG_DST_READWRITE, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_add_8_8_process_pixblock_head, \ pixman_composite_add_8_8_process_pixblock_tail, \ pixman_composite_add_8888_8888_process_pixblock_tail_head generate_composite_function_single_scanline \ pixman_composite_scanline_add_asm_neon, 32, 0, 32, \ FLAG_DST_READWRITE, \ 8, /* number of pixels, processed in a single block */ \ default_init, \ default_cleanup, \ pixman_composite_add_8_8_process_pixblock_head, \ pixman_composite_add_8_8_process_pixblock_tail, \ pixman_composite_add_8888_8888_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_out_reverse_8888_8888_process_pixblock_head mvn v24.8b, v3.8b /* get inverted alpha */ /* do alpha blending */ umull v8.8h, v24.8b, v4.8b umull v9.8h, v24.8b, v5.8b umull v10.8h, v24.8b, v6.8b umull v11.8h, v24.8b, v7.8b .endm .macro pixman_composite_out_reverse_8888_8888_process_pixblock_tail urshr v14.8h, v8.8h, #8 urshr v15.8h, v9.8h, #8 urshr v16.8h, v10.8h, #8 urshr v17.8h, v11.8h, #8 raddhn v28.8b, v14.8h, v8.8h raddhn v29.8b, v15.8h, v9.8h raddhn v30.8b, v16.8h, v10.8h raddhn v31.8b, v17.8h, v11.8h .endm .macro pixman_composite_out_reverse_8888_8888_process_pixblock_tail_head ld4 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32 urshr v14.8h, v8.8h, #8 PF add, PF_X, PF_X, #8 PF tst, PF_CTL, #0xF urshr v15.8h, v9.8h, #8 urshr v16.8h, v10.8h, #8 urshr v17.8h, v11.8h, #8 PF beq, 10f PF add, PF_X, PF_X, #8 PF sub, PF_CTL, PF_CTL, #1 10: raddhn v28.8b, v14.8h, v8.8h raddhn v29.8b, v15.8h, v9.8h PF cmp, PF_X, ORIG_W raddhn v30.8b, v16.8h, v10.8h raddhn v31.8b, v17.8h, v11.8h fetch_src_pixblock PF lsl, DUMMY, PF_X, #src_bpp_shift PF prfm, PREFETCH_MODE, [PF_SRC, DUMMY] mvn v22.8b, v3.8b PF lsl, DUMMY, PF_X, #dst_bpp_shift PF prfm, PREFETCH_MODE, [PF_DST, DUMMY] st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 PF ble, 10f PF sub, PF_X, PF_X, ORIG_W 10: umull v8.8h, v22.8b, v4.8b PF ble, 10f PF subs, PF_CTL, PF_CTL, #0x10 10: umull v9.8h, v22.8b, v5.8b PF ble, 10f PF lsl, DUMMY, SRC_STRIDE, #src_bpp_shift PF ldrsb, DUMMY, [PF_SRC, DUMMY] PF add, PF_SRC, PF_SRC, #1 10: umull v10.8h, v22.8b, v6.8b PF ble, 10f PF lsl, DUMMY, DST_STRIDE, #dst_bpp_shift PF ldrsb, DUMMY, [PF_DST, DUMMY] PF add, PF_DST, PF_DST, #1 10: umull v11.8h, v22.8b, v7.8b .endm generate_composite_function_single_scanline \ pixman_composite_scanline_out_reverse_asm_neon, 32, 0, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ default_init, \ default_cleanup, \ pixman_composite_out_reverse_8888_8888_process_pixblock_head, \ pixman_composite_out_reverse_8888_8888_process_pixblock_tail, \ pixman_composite_out_reverse_8888_8888_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_over_8888_8888_process_pixblock_head pixman_composite_out_reverse_8888_8888_process_pixblock_head .endm .macro pixman_composite_over_8888_8888_process_pixblock_tail pixman_composite_out_reverse_8888_8888_process_pixblock_tail uqadd v28.8b, v0.8b, v28.8b uqadd v29.8b, v1.8b, v29.8b uqadd v30.8b, v2.8b, v30.8b uqadd v31.8b, v3.8b, v31.8b .endm .macro pixman_composite_over_8888_8888_process_pixblock_tail_head ld4 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32 urshr v14.8h, v8.8h, #8 PF add, PF_X, PF_X, #8 PF tst, PF_CTL, #0xF urshr v15.8h, v9.8h, #8 urshr v16.8h, v10.8h, #8 urshr v17.8h, v11.8h, #8 PF beq, 10f PF add, PF_X, PF_X, #8 PF sub, PF_CTL, PF_CTL, #1 10: raddhn v28.8b, v14.8h, v8.8h raddhn v29.8b, v15.8h, v9.8h PF cmp, PF_X, ORIG_W raddhn v30.8b, v16.8h, v10.8h raddhn v31.8b, v17.8h, v11.8h uqadd v28.8b, v0.8b, v28.8b uqadd v29.8b, v1.8b, v29.8b uqadd v30.8b, v2.8b, v30.8b uqadd v31.8b, v3.8b, v31.8b fetch_src_pixblock PF lsl, DUMMY, PF_X, #src_bpp_shift PF prfm, PREFETCH_MODE, [PF_SRC, DUMMY] mvn v22.8b, v3.8b PF lsl, DUMMY, PF_X, #dst_bpp_shift PF prfm, PREFETCH_MODE, [PF_DST, DUMMY] st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 PF ble, 10f PF sub, PF_X, PF_X, ORIG_W 10: umull v8.8h, v22.8b, v4.8b PF ble, 10f PF subs, PF_CTL, PF_CTL, #0x10 10: umull v9.8h, v22.8b, v5.8b PF ble, 10f PF lsl, DUMMY, SRC_STRIDE, #src_bpp_shift PF ldrsb, DUMMY, [PF_SRC, DUMMY] PF add, PF_SRC, PF_SRC, #1 10: umull v10.8h, v22.8b, v6.8b PF ble, 10f PF lsl, DUMMY, DST_STRIDE, #dst_bpp_shift PF ldrsb, DUMMY, [PF_DST, DUMMY] PF add, PF_DST, PF_DST, #1 10: umull v11.8h, v22.8b, v7.8b .endm generate_composite_function \ pixman_composite_over_8888_8888_asm_neon, 32, 0, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_over_8888_8888_process_pixblock_head, \ pixman_composite_over_8888_8888_process_pixblock_tail, \ pixman_composite_over_8888_8888_process_pixblock_tail_head generate_composite_function_single_scanline \ pixman_composite_scanline_over_asm_neon, 32, 0, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ default_init, \ default_cleanup, \ pixman_composite_over_8888_8888_process_pixblock_head, \ pixman_composite_over_8888_8888_process_pixblock_tail, \ pixman_composite_over_8888_8888_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_over_n_8888_process_pixblock_head /* deinterleaved source pixels in {v0, v1, v2, v3} */ /* inverted alpha in {v24} */ /* destination pixels in {v4, v5, v6, v7} */ umull v8.8h, v24.8b, v4.8b umull v9.8h, v24.8b, v5.8b umull v10.8h, v24.8b, v6.8b umull v11.8h, v24.8b, v7.8b .endm .macro pixman_composite_over_n_8888_process_pixblock_tail urshr v14.8h, v8.8h, #8 urshr v15.8h, v9.8h, #8 urshr v16.8h, v10.8h, #8 urshr v17.8h, v11.8h, #8 raddhn v28.8b, v14.8h, v8.8h raddhn v29.8b, v15.8h, v9.8h raddhn v30.8b, v16.8h, v10.8h raddhn v31.8b, v17.8h, v11.8h uqadd v28.8b, v0.8b, v28.8b uqadd v29.8b, v1.8b, v29.8b uqadd v30.8b, v2.8b, v30.8b uqadd v31.8b, v3.8b, v31.8b .endm .macro pixman_composite_over_n_8888_process_pixblock_tail_head urshr v14.8h, v8.8h, #8 urshr v15.8h, v9.8h, #8 urshr v16.8h, v10.8h, #8 urshr v17.8h, v11.8h, #8 raddhn v28.8b, v14.8h, v8.8h raddhn v29.8b, v15.8h, v9.8h raddhn v30.8b, v16.8h, v10.8h raddhn v31.8b, v17.8h, v11.8h ld4 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32 uqadd v28.8b, v0.8b, v28.8b PF add, PF_X, PF_X, #8 PF tst, PF_CTL, #0x0F PF beq, 10f PF add, PF_X, PF_X, #8 PF sub, PF_CTL, PF_CTL, #1 10: uqadd v29.8b, v1.8b, v29.8b uqadd v30.8b, v2.8b, v30.8b uqadd v31.8b, v3.8b, v31.8b PF cmp, PF_X, ORIG_W umull v8.8h, v24.8b, v4.8b PF lsl, DUMMY, PF_X, #dst_bpp_shift PF prfm, PREFETCH_MODE, [PF_DST, DUMMY] umull v9.8h, v24.8b, v5.8b PF ble, 10f PF sub, PF_X, PF_X, ORIG_W 10: umull v10.8h, v24.8b, v6.8b PF subs, PF_CTL, PF_CTL, #0x10 umull v11.8h, v24.8b, v7.8b PF ble, 10f PF lsl, DUMMY, DST_STRIDE, #dst_bpp_shift PF ldrsb, DUMMY, [PF_DST, DUMMY] PF add, PF_DST, PF_DST, #1 10: st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 .endm .macro pixman_composite_over_n_8888_init mov v3.s[0], w4 dup v0.8b, v3.b[0] dup v1.8b, v3.b[1] dup v2.8b, v3.b[2] dup v3.8b, v3.b[3] mvn v24.8b, v3.8b /* get inverted alpha */ .endm generate_composite_function \ pixman_composite_over_n_8888_asm_neon, 0, 0, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_n_8888_init, \ default_cleanup, \ pixman_composite_over_8888_8888_process_pixblock_head, \ pixman_composite_over_8888_8888_process_pixblock_tail, \ pixman_composite_over_n_8888_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_over_reverse_n_8888_process_pixblock_tail_head urshr v14.8h, v8.8h, #8 PF add, PF_X, PF_X, #8 PF tst, PF_CTL, #0xF urshr v15.8h, v9.8h, #8 urshr v12.8h, v10.8h, #8 urshr v13.8h, v11.8h, #8 PF beq, 10f PF add, PF_X, PF_X, #8 PF sub, PF_CTL, PF_CTL, #1 10: raddhn v28.8b, v14.8h, v8.8h raddhn v29.8b, v15.8h, v9.8h PF cmp, PF_X, ORIG_W raddhn v30.8b, v12.8h, v10.8h raddhn v31.8b, v13.8h, v11.8h uqadd v28.8b, v0.8b, v28.8b uqadd v29.8b, v1.8b, v29.8b uqadd v30.8b, v2.8b, v30.8b uqadd v31.8b, v3.8b, v31.8b ld4 {v0.8b, v1.8b, v2.8b, v3.8b}, [DST_R], #32 mvn v22.8b, v3.8b PF lsl, DUMMY, PF_X, #dst_bpp_shift PF prfm, PREFETCH_MODE, [PF_DST, DUMMY] st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 PF blt, 10f PF sub, PF_X, PF_X, ORIG_W 10: umull v8.8h, v22.8b, v4.8b PF blt, 10f PF subs, PF_CTL, PF_CTL, #0x10 10: umull v9.8h, v22.8b, v5.8b umull v10.8h, v22.8b, v6.8b PF blt, 10f PF lsl, DUMMY, DST_STRIDE, #dst_bpp_shift PF ldrsb, DUMMY, [PF_DST, DUMMY] PF add, PF_DST, PF_DST, #1 10: umull v11.8h, v22.8b, v7.8b .endm .macro pixman_composite_over_reverse_n_8888_init mov v7.s[0], w4 dup v4.8b, v7.b[0] dup v5.8b, v7.b[1] dup v6.8b, v7.b[2] dup v7.8b, v7.b[3] .endm generate_composite_function \ pixman_composite_over_reverse_n_8888_asm_neon, 0, 0, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_reverse_n_8888_init, \ default_cleanup, \ pixman_composite_over_8888_8888_process_pixblock_head, \ pixman_composite_over_8888_8888_process_pixblock_tail, \ pixman_composite_over_reverse_n_8888_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 4, /* src_basereg */ \ 24 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_over_8888_8_0565_process_pixblock_head umull v0.8h, v24.8b, v8.8b /* IN for SRC pixels (part1) */ umull v1.8h, v24.8b, v9.8b umull v2.8h, v24.8b, v10.8b umull v3.8h, v24.8b, v11.8b mov v4.d[1], v5.d[0] shrn v25.8b, v4.8h, #8 /* convert DST_R data to 32-bpp (part1) */ shrn v26.8b, v4.8h, #3 sli v4.8h, v4.8h, #5 urshr v17.8h, v0.8h, #8 /* IN for SRC pixels (part2) */ urshr v18.8h, v1.8h, #8 urshr v19.8h, v2.8h, #8 urshr v20.8h, v3.8h, #8 raddhn v0.8b, v0.8h, v17.8h raddhn v1.8b, v1.8h, v18.8h raddhn v2.8b, v2.8h, v19.8h raddhn v3.8b, v3.8h, v20.8h sri v25.8b, v25.8b, #5 /* convert DST_R data to 32-bpp (part2) */ sri v26.8b, v26.8b, #6 mvn v3.8b, v3.8b shrn v30.8b, v4.8h, #2 umull v18.8h, v3.8b, v25.8b /* now do alpha blending */ umull v19.8h, v3.8b, v26.8b umull v20.8h, v3.8b, v30.8b .endm .macro pixman_composite_over_8888_8_0565_process_pixblock_tail /* 3 cycle bubble (after vmull.u8) */ urshr v5.8h, v18.8h, #8 urshr v6.8h, v19.8h, #8 urshr v7.8h, v20.8h, #8 raddhn v17.8b, v18.8h, v5.8h raddhn v19.8b, v19.8h, v6.8h raddhn v18.8b, v20.8h, v7.8h uqadd v5.8b, v2.8b, v17.8b /* 1 cycle bubble */ uqadd v6.8b, v0.8b, v18.8b uqadd v7.8b, v1.8b, v19.8b ushll v14.8h, v5.8b, #7 /* convert to 16bpp */ sli v14.8h, v14.8h, #1 ushll v18.8h, v7.8b, #7 sli v18.8h, v18.8h, #1 ushll v19.8h, v6.8b, #7 sli v19.8h, v19.8h, #1 sri v14.8h, v18.8h, #5 /* 1 cycle bubble */ sri v14.8h, v19.8h, #11 mov v28.d[0], v14.d[0] mov v29.d[0], v14.d[1] .endm .macro pixman_composite_over_8888_8_0565_process_pixblock_tail_head #if 0 ld1 {v4.8h}, [DST_R], #16 shrn v25.8b, v4.8h, #8 fetch_mask_pixblock shrn v26.8b, v4.8h, #3 fetch_src_pixblock umull v22.8h, v24.8b, v10.8b urshr v13.8h, v18.8h, #8 urshr v11.8h, v19.8h, #8 urshr v15.8h, v20.8h, #8 raddhn v17.8b, v18.8h, v13.8h raddhn v19.8b, v19.8h, v11.8h raddhn v18.8b, v20.8h, v15.8h uqadd v17.8b, v2.8b, v17.8b umull v21.8h, v24.8b, v9.8b uqadd v18.8b, v0.8b, v18.8b uqadd v19.8b, v1.8b, v19.8b ushll v14.8h, v17.8b, #7 sli v14.8h, v14.8h, #1 umull v20.8h, v24.8b, v8.8b ushll v18.8h, v18.8b, #7 sli v18.8h, v18.8h, #1 ushll v19.8h, v19.8b, #7 sli v19.8h, v19.8h, #1 sri v14.8h, v18.8h, #5 umull v23.8h, v24.8b, v11.8b sri v14.8h, v19.8h, #11 mov v28.d[0], v14.d[0] mov v29.d[0], v14.d[1] cache_preload 8, 8 sli v4.8h, v4.8h, #5 urshr v16.8h, v20.8h, #8 urshr v17.8h, v21.8h, #8 urshr v18.8h, v22.8h, #8 urshr v19.8h, v23.8h, #8 raddhn v0.8b, v20.8h, v16.8h raddhn v1.8b, v21.8h, v17.8h raddhn v2.8b, v22.8h, v18.8h raddhn v3.8b, v23.8h, v19.8h sri v25.8b, v25.8b, #5 sri v26.8b, v26.8b, #6 mvn v3.8b, v3.8b shrn v30.8b, v4.8h, #2 st1 {v14.8h}, [DST_W], #16 umull v18.8h, v3.8b, v25.8b umull v19.8h, v3.8b, v26.8b umull v20.8h, v3.8b, v30.8b #else pixman_composite_over_8888_8_0565_process_pixblock_tail st1 {v28.4h, v29.4h}, [DST_W], #16 ld1 {v4.4h, v5.4h}, [DST_R], #16 fetch_mask_pixblock fetch_src_pixblock pixman_composite_over_8888_8_0565_process_pixblock_head #endif .endm generate_composite_function \ pixman_composite_over_8888_8_0565_asm_neon, 32, 8, 16, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ default_init_need_all_regs, \ default_cleanup_need_all_regs, \ pixman_composite_over_8888_8_0565_process_pixblock_head, \ pixman_composite_over_8888_8_0565_process_pixblock_tail, \ pixman_composite_over_8888_8_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 8, /* src_basereg */ \ 24 /* mask_basereg */ /******************************************************************************/ /* * This function needs a special initialization of solid mask. * Solid source pixel data is fetched from stack at ARGS_STACK_OFFSET * offset, split into color components and replicated in d8-d11 * registers. Additionally, this function needs all the NEON registers, * so it has to save d8-d15 registers which are callee saved according * to ABI. These registers are restored from 'cleanup' macro. All the * other NEON registers are caller saved, so can be clobbered freely * without introducing any problems. */ .macro pixman_composite_over_n_8_0565_init mov v11.s[0], w4 dup v8.8b, v11.b[0] dup v9.8b, v11.b[1] dup v10.8b, v11.b[2] dup v11.8b, v11.b[3] .endm .macro pixman_composite_over_n_8_0565_cleanup .endm generate_composite_function \ pixman_composite_over_n_8_0565_asm_neon, 0, 8, 16, \ FLAG_DST_READWRITE, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_n_8_0565_init, \ pixman_composite_over_n_8_0565_cleanup, \ pixman_composite_over_8888_8_0565_process_pixblock_head, \ pixman_composite_over_8888_8_0565_process_pixblock_tail, \ pixman_composite_over_8888_8_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 8, /* src_basereg */ \ 24 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_over_8888_n_0565_init mov v24.s[0], w6 dup v24.8b, v24.b[3] .endm .macro pixman_composite_over_8888_n_0565_cleanup .endm generate_composite_function \ pixman_composite_over_8888_n_0565_asm_neon, 32, 0, 16, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_8888_n_0565_init, \ pixman_composite_over_8888_n_0565_cleanup, \ pixman_composite_over_8888_8_0565_process_pixblock_head, \ pixman_composite_over_8888_8_0565_process_pixblock_tail, \ pixman_composite_over_8888_8_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 8, /* src_basereg */ \ 24 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_0565_0565_process_pixblock_head .endm .macro pixman_composite_src_0565_0565_process_pixblock_tail .endm .macro pixman_composite_src_0565_0565_process_pixblock_tail_head st1 {v0.4h, v1.4h, v2.4h, v3.4h}, [DST_W], #32 fetch_src_pixblock cache_preload 16, 16 .endm generate_composite_function \ pixman_composite_src_0565_0565_asm_neon, 16, 0, 16, \ FLAG_DST_WRITEONLY, \ 16, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_src_0565_0565_process_pixblock_head, \ pixman_composite_src_0565_0565_process_pixblock_tail, \ pixman_composite_src_0565_0565_process_pixblock_tail_head, \ 0, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_n_8_process_pixblock_head .endm .macro pixman_composite_src_n_8_process_pixblock_tail .endm .macro pixman_composite_src_n_8_process_pixblock_tail_head st1 {v0.8b, v1.8b, v2.8b, v3.8b}, [DST_W], 32 .endm .macro pixman_composite_src_n_8_init mov v0.s[0], w4 dup v3.8b, v0.b[0] dup v2.8b, v0.b[0] dup v1.8b, v0.b[0] dup v0.8b, v0.b[0] .endm .macro pixman_composite_src_n_8_cleanup .endm generate_composite_function \ pixman_composite_src_n_8_asm_neon, 0, 0, 8, \ FLAG_DST_WRITEONLY, \ 32, /* number of pixels, processed in a single block */ \ 0, /* prefetch distance */ \ pixman_composite_src_n_8_init, \ pixman_composite_src_n_8_cleanup, \ pixman_composite_src_n_8_process_pixblock_head, \ pixman_composite_src_n_8_process_pixblock_tail, \ pixman_composite_src_n_8_process_pixblock_tail_head, \ 0, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_n_0565_process_pixblock_head .endm .macro pixman_composite_src_n_0565_process_pixblock_tail .endm .macro pixman_composite_src_n_0565_process_pixblock_tail_head st1 {v0.4h, v1.4h, v2.4h, v3.4h}, [DST_W], #32 .endm .macro pixman_composite_src_n_0565_init mov v0.s[0], w4 dup v3.4h, v0.h[0] dup v2.4h, v0.h[0] dup v1.4h, v0.h[0] dup v0.4h, v0.h[0] .endm .macro pixman_composite_src_n_0565_cleanup .endm generate_composite_function \ pixman_composite_src_n_0565_asm_neon, 0, 0, 16, \ FLAG_DST_WRITEONLY, \ 16, /* number of pixels, processed in a single block */ \ 0, /* prefetch distance */ \ pixman_composite_src_n_0565_init, \ pixman_composite_src_n_0565_cleanup, \ pixman_composite_src_n_0565_process_pixblock_head, \ pixman_composite_src_n_0565_process_pixblock_tail, \ pixman_composite_src_n_0565_process_pixblock_tail_head, \ 0, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_n_8888_process_pixblock_head .endm .macro pixman_composite_src_n_8888_process_pixblock_tail .endm .macro pixman_composite_src_n_8888_process_pixblock_tail_head st1 {v0.2s, v1.2s, v2.2s, v3.2s}, [DST_W], #32 .endm .macro pixman_composite_src_n_8888_init mov v0.s[0], w4 dup v3.2s, v0.s[0] dup v2.2s, v0.s[0] dup v1.2s, v0.s[0] dup v0.2s, v0.s[0] .endm .macro pixman_composite_src_n_8888_cleanup .endm generate_composite_function \ pixman_composite_src_n_8888_asm_neon, 0, 0, 32, \ FLAG_DST_WRITEONLY, \ 8, /* number of pixels, processed in a single block */ \ 0, /* prefetch distance */ \ pixman_composite_src_n_8888_init, \ pixman_composite_src_n_8888_cleanup, \ pixman_composite_src_n_8888_process_pixblock_head, \ pixman_composite_src_n_8888_process_pixblock_tail, \ pixman_composite_src_n_8888_process_pixblock_tail_head, \ 0, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_8888_8888_process_pixblock_head .endm .macro pixman_composite_src_8888_8888_process_pixblock_tail .endm .macro pixman_composite_src_8888_8888_process_pixblock_tail_head st1 {v0.2s, v1.2s, v2.2s, v3.2s}, [DST_W], #32 fetch_src_pixblock cache_preload 8, 8 .endm generate_composite_function \ pixman_composite_src_8888_8888_asm_neon, 32, 0, 32, \ FLAG_DST_WRITEONLY, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_src_8888_8888_process_pixblock_head, \ pixman_composite_src_8888_8888_process_pixblock_tail, \ pixman_composite_src_8888_8888_process_pixblock_tail_head, \ 0, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_x888_8888_process_pixblock_head orr v0.8b, v0.8b, v4.8b orr v1.8b, v1.8b, v4.8b orr v2.8b, v2.8b, v4.8b orr v3.8b, v3.8b, v4.8b .endm .macro pixman_composite_src_x888_8888_process_pixblock_tail .endm .macro pixman_composite_src_x888_8888_process_pixblock_tail_head st1 {v0.2s, v1.2s, v2.2s, v3.2s}, [DST_W], #32 fetch_src_pixblock orr v0.8b, v0.8b, v4.8b orr v1.8b, v1.8b, v4.8b orr v2.8b, v2.8b, v4.8b orr v3.8b, v3.8b, v4.8b cache_preload 8, 8 .endm .macro pixman_composite_src_x888_8888_init movi v4.2s, #0xff, lsl 24 .endm generate_composite_function \ pixman_composite_src_x888_8888_asm_neon, 32, 0, 32, \ FLAG_DST_WRITEONLY, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ pixman_composite_src_x888_8888_init, \ default_cleanup, \ pixman_composite_src_x888_8888_process_pixblock_head, \ pixman_composite_src_x888_8888_process_pixblock_tail, \ pixman_composite_src_x888_8888_process_pixblock_tail_head, \ 0, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_n_8_8888_process_pixblock_head /* expecting solid source in {v0, v1, v2, v3} */ /* mask is in v24 (v25, v26, v27 are unused) */ /* in */ umull v8.8h, v24.8b, v0.8b umull v9.8h, v24.8b, v1.8b umull v10.8h, v24.8b, v2.8b umull v11.8h, v24.8b, v3.8b ursra v8.8h, v8.8h, #8 ursra v9.8h, v9.8h, #8 ursra v10.8h, v10.8h, #8 ursra v11.8h, v11.8h, #8 .endm .macro pixman_composite_src_n_8_8888_process_pixblock_tail rshrn v28.8b, v8.8h, #8 rshrn v29.8b, v9.8h, #8 rshrn v30.8b, v10.8h, #8 rshrn v31.8b, v11.8h, #8 .endm .macro pixman_composite_src_n_8_8888_process_pixblock_tail_head fetch_mask_pixblock PF add, PF_X, PF_X, #8 rshrn v28.8b, v8.8h, #8 PF tst, PF_CTL, #0x0F rshrn v29.8b, v9.8h, #8 PF beq, 10f PF add, PF_X, PF_X, #8 10: rshrn v30.8b, v10.8h, #8 PF beq, 10f PF sub, PF_CTL, PF_CTL, #1 10: rshrn v31.8b, v11.8h, #8 PF cmp, PF_X, ORIG_W umull v8.8h, v24.8b, v0.8b PF lsl, DUMMY, PF_X, #mask_bpp_shift PF prfm, PREFETCH_MODE, [PF_MASK, DUMMY] umull v9.8h, v24.8b, v1.8b PF ble, 10f PF sub, PF_X, PF_X, ORIG_W 10: umull v10.8h, v24.8b, v2.8b PF ble, 10f PF subs, PF_CTL, PF_CTL, #0x10 10: umull v11.8h, v24.8b, v3.8b PF ble, 10f PF lsl, DUMMY, MASK_STRIDE, #mask_bpp_shift PF ldrsb, DUMMY, [PF_MASK, DUMMY] PF add, PF_MASK, PF_MASK, #1 10: st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 ursra v8.8h, v8.8h, #8 ursra v9.8h, v9.8h, #8 ursra v10.8h, v10.8h, #8 ursra v11.8h, v11.8h, #8 .endm .macro pixman_composite_src_n_8_8888_init mov v3.s[0], w4 dup v0.8b, v3.b[0] dup v1.8b, v3.b[1] dup v2.8b, v3.b[2] dup v3.8b, v3.b[3] .endm .macro pixman_composite_src_n_8_8888_cleanup .endm generate_composite_function \ pixman_composite_src_n_8_8888_asm_neon, 0, 8, 32, \ FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_src_n_8_8888_init, \ pixman_composite_src_n_8_8888_cleanup, \ pixman_composite_src_n_8_8888_process_pixblock_head, \ pixman_composite_src_n_8_8888_process_pixblock_tail, \ pixman_composite_src_n_8_8888_process_pixblock_tail_head, \ /******************************************************************************/ .macro pixman_composite_src_n_8_8_process_pixblock_head umull v0.8h, v24.8b, v16.8b umull v1.8h, v25.8b, v16.8b umull v2.8h, v26.8b, v16.8b umull v3.8h, v27.8b, v16.8b ursra v0.8h, v0.8h, #8 ursra v1.8h, v1.8h, #8 ursra v2.8h, v2.8h, #8 ursra v3.8h, v3.8h, #8 .endm .macro pixman_composite_src_n_8_8_process_pixblock_tail rshrn v28.8b, v0.8h, #8 rshrn v29.8b, v1.8h, #8 rshrn v30.8b, v2.8h, #8 rshrn v31.8b, v3.8h, #8 .endm .macro pixman_composite_src_n_8_8_process_pixblock_tail_head fetch_mask_pixblock PF add, PF_X, PF_X, #8 rshrn v28.8b, v0.8h, #8 PF tst, PF_CTL, #0x0F rshrn v29.8b, v1.8h, #8 PF beq, 10f PF add, PF_X, PF_X, #8 10: rshrn v30.8b, v2.8h, #8 PF beq, 10f PF sub, PF_CTL, PF_CTL, #1 10: rshrn v31.8b, v3.8h, #8 PF cmp, PF_X, ORIG_W umull v0.8h, v24.8b, v16.8b PF lsl, DUMMY, PF_X, mask_bpp_shift PF prfm, PREFETCH_MODE, [PF_MASK, DUMMY] umull v1.8h, v25.8b, v16.8b PF ble, 10f PF sub, PF_X, PF_X, ORIG_W 10: umull v2.8h, v26.8b, v16.8b PF ble, 10f PF subs, PF_CTL, PF_CTL, #0x10 10: umull v3.8h, v27.8b, v16.8b PF ble, 10f PF lsl, DUMMY, MASK_STRIDE, #mask_bpp_shift PF ldrsb, DUMMY, [PF_MASK, DUMMY] PF add, PF_MASK, PF_MASK, #1 10: st1 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 ursra v0.8h, v0.8h, #8 ursra v1.8h, v1.8h, #8 ursra v2.8h, v2.8h, #8 ursra v3.8h, v3.8h, #8 .endm .macro pixman_composite_src_n_8_8_init mov v16.s[0], w4 dup v16.8b, v16.b[3] .endm .macro pixman_composite_src_n_8_8_cleanup .endm generate_composite_function \ pixman_composite_src_n_8_8_asm_neon, 0, 8, 8, \ FLAG_DST_WRITEONLY, \ 32, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_src_n_8_8_init, \ pixman_composite_src_n_8_8_cleanup, \ pixman_composite_src_n_8_8_process_pixblock_head, \ pixman_composite_src_n_8_8_process_pixblock_tail, \ pixman_composite_src_n_8_8_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_over_n_8_8888_process_pixblock_head /* expecting deinterleaved source data in {v8, v9, v10, v11} */ /* v8 - blue, v9 - green, v10 - red, v11 - alpha */ /* and destination data in {v4, v5, v6, v7} */ /* mask is in v24 (v25, v26, v27 are unused) */ /* in */ umull v12.8h, v24.8b, v8.8b umull v13.8h, v24.8b, v9.8b umull v14.8h, v24.8b, v10.8b umull v15.8h, v24.8b, v11.8b urshr v16.8h, v12.8h, #8 urshr v17.8h, v13.8h, #8 urshr v18.8h, v14.8h, #8 urshr v19.8h, v15.8h, #8 raddhn v0.8b, v12.8h, v16.8h raddhn v1.8b, v13.8h, v17.8h raddhn v2.8b, v14.8h, v18.8h raddhn v3.8b, v15.8h, v19.8h mvn v25.8b, v3.8b /* get inverted alpha */ /* source: v0 - blue, v1 - green, v2 - red, v3 - alpha */ /* destination: v4 - blue, v5 - green, v6 - red, v7 - alpha */ /* now do alpha blending */ umull v12.8h, v25.8b, v4.8b umull v13.8h, v25.8b, v5.8b umull v14.8h, v25.8b, v6.8b umull v15.8h, v25.8b, v7.8b .endm .macro pixman_composite_over_n_8_8888_process_pixblock_tail urshr v16.8h, v12.8h, #8 urshr v17.8h, v13.8h, #8 urshr v18.8h, v14.8h, #8 urshr v19.8h, v15.8h, #8 raddhn v28.8b, v16.8h, v12.8h raddhn v29.8b, v17.8h, v13.8h raddhn v30.8b, v18.8h, v14.8h raddhn v31.8b, v19.8h, v15.8h uqadd v28.8b, v0.8b, v28.8b uqadd v29.8b, v1.8b, v29.8b uqadd v30.8b, v2.8b, v30.8b uqadd v31.8b, v3.8b, v31.8b .endm .macro pixman_composite_over_n_8_8888_process_pixblock_tail_head urshr v16.8h, v12.8h, #8 ld4 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32 urshr v17.8h, v13.8h, #8 fetch_mask_pixblock urshr v18.8h, v14.8h, #8 PF add, PF_X, PF_X, #8 urshr v19.8h, v15.8h, #8 PF tst, PF_CTL, #0x0F raddhn v28.8b, v16.8h, v12.8h PF beq, 10f PF add, PF_X, PF_X, #8 10: raddhn v29.8b, v17.8h, v13.8h PF beq, 10f PF sub, PF_CTL, PF_CTL, #1 10: raddhn v30.8b, v18.8h, v14.8h PF cmp, PF_X, ORIG_W raddhn v31.8b, v19.8h, v15.8h PF lsl, DUMMY, PF_X, #dst_bpp_shift PF prfm, PREFETCH_MODE, [PF_DST, DUMMY] umull v16.8h, v24.8b, v8.8b PF lsl, DUMMY, PF_X, #mask_bpp_shift PF prfm, PREFETCH_MODE, [PF_MASK, DUMMY] umull v17.8h, v24.8b, v9.8b PF ble, 10f PF sub, PF_X, PF_X, ORIG_W 10: umull v18.8h, v24.8b, v10.8b PF ble, 10f PF subs, PF_CTL, PF_CTL, #0x10 10: umull v19.8h, v24.8b, v11.8b PF ble, 10f PF lsl, DUMMY, DST_STRIDE, #dst_bpp_shift PF ldrsb, DUMMY, [PF_DST, DUMMY] PF add, PF_DST, PF_DST, #1 10: uqadd v28.8b, v0.8b, v28.8b PF ble, 10f PF lsl, DUMMY, MASK_STRIDE, #mask_bpp_shift PF ldrsb, DUMMY, [PF_MASK, DUMMY] PF add, PF_MASK, PF_MASK, #1 10: uqadd v29.8b, v1.8b, v29.8b uqadd v30.8b, v2.8b, v30.8b uqadd v31.8b, v3.8b, v31.8b urshr v12.8h, v16.8h, #8 urshr v13.8h, v17.8h, #8 urshr v14.8h, v18.8h, #8 urshr v15.8h, v19.8h, #8 raddhn v0.8b, v16.8h, v12.8h raddhn v1.8b, v17.8h, v13.8h raddhn v2.8b, v18.8h, v14.8h raddhn v3.8b, v19.8h, v15.8h st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 mvn v25.8b, v3.8b umull v12.8h, v25.8b, v4.8b umull v13.8h, v25.8b, v5.8b umull v14.8h, v25.8b, v6.8b umull v15.8h, v25.8b, v7.8b .endm .macro pixman_composite_over_n_8_8888_init mov v11.s[0], w4 dup v8.8b, v11.b[0] dup v9.8b, v11.b[1] dup v10.8b, v11.b[2] dup v11.8b, v11.b[3] .endm .macro pixman_composite_over_n_8_8888_cleanup .endm generate_composite_function \ pixman_composite_over_n_8_8888_asm_neon, 0, 8, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_n_8_8888_init, \ pixman_composite_over_n_8_8888_cleanup, \ pixman_composite_over_n_8_8888_process_pixblock_head, \ pixman_composite_over_n_8_8888_process_pixblock_tail, \ pixman_composite_over_n_8_8888_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_over_n_8_8_process_pixblock_head umull v0.8h, v24.8b, v8.8b umull v1.8h, v25.8b, v8.8b umull v2.8h, v26.8b, v8.8b umull v3.8h, v27.8b, v8.8b urshr v10.8h, v0.8h, #8 urshr v11.8h, v1.8h, #8 urshr v12.8h, v2.8h, #8 urshr v13.8h, v3.8h, #8 raddhn v0.8b, v0.8h, v10.8h raddhn v1.8b, v1.8h, v11.8h raddhn v2.8b, v2.8h, v12.8h raddhn v3.8b, v3.8h, v13.8h mvn v24.8b, v0.8b mvn v25.8b, v1.8b mvn v26.8b, v2.8b mvn v27.8b, v3.8b umull v10.8h, v24.8b, v4.8b umull v11.8h, v25.8b, v5.8b umull v12.8h, v26.8b, v6.8b umull v13.8h, v27.8b, v7.8b .endm .macro pixman_composite_over_n_8_8_process_pixblock_tail urshr v14.8h, v10.8h, #8 urshr v15.8h, v11.8h, #8 urshr v16.8h, v12.8h, #8 urshr v17.8h, v13.8h, #8 raddhn v28.8b, v14.8h, v10.8h raddhn v29.8b, v15.8h, v11.8h raddhn v30.8b, v16.8h, v12.8h raddhn v31.8b, v17.8h, v13.8h uqadd v28.8b, v0.8b, v28.8b uqadd v29.8b, v1.8b, v29.8b uqadd v30.8b, v2.8b, v30.8b uqadd v31.8b, v3.8b, v31.8b .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_over_n_8_8_process_pixblock_tail_head ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32 pixman_composite_over_n_8_8_process_pixblock_tail fetch_mask_pixblock cache_preload 32, 32 st1 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 pixman_composite_over_n_8_8_process_pixblock_head .endm .macro pixman_composite_over_n_8_8_init mov v8.s[0], w4 dup v8.8b, v8.b[3] .endm .macro pixman_composite_over_n_8_8_cleanup .endm generate_composite_function \ pixman_composite_over_n_8_8_asm_neon, 0, 8, 8, \ FLAG_DST_READWRITE, \ 32, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_n_8_8_init, \ pixman_composite_over_n_8_8_cleanup, \ pixman_composite_over_n_8_8_process_pixblock_head, \ pixman_composite_over_n_8_8_process_pixblock_tail, \ pixman_composite_over_n_8_8_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_over_n_8888_8888_ca_process_pixblock_head /* * 'combine_mask_ca' replacement * * input: solid src (n) in {v8, v9, v10, v11} * dest in {v4, v5, v6, v7 } * mask in {v24, v25, v26, v27} * output: updated src in {v0, v1, v2, v3 } * updated mask in {v24, v25, v26, v3 } */ umull v0.8h, v24.8b, v8.8b umull v1.8h, v25.8b, v9.8b umull v2.8h, v26.8b, v10.8b umull v3.8h, v27.8b, v11.8b umull v12.8h, v11.8b, v25.8b umull v13.8h, v11.8b, v24.8b umull v14.8h, v11.8b, v26.8b urshr v15.8h, v0.8h, #8 urshr v16.8h, v1.8h, #8 urshr v17.8h, v2.8h, #8 raddhn v0.8b, v0.8h, v15.8h raddhn v1.8b, v1.8h, v16.8h raddhn v2.8b, v2.8h, v17.8h urshr v15.8h, v13.8h, #8 urshr v16.8h, v12.8h, #8 urshr v17.8h, v14.8h, #8 urshr v18.8h, v3.8h, #8 raddhn v24.8b, v13.8h, v15.8h raddhn v25.8b, v12.8h, v16.8h raddhn v26.8b, v14.8h, v17.8h raddhn v3.8b, v3.8h, v18.8h /* * 'combine_over_ca' replacement * * output: updated dest in {v28, v29, v30, v31} */ mvn v24.8b, v24.8b mvn v25.8b, v25.8b mvn v26.8b, v26.8b mvn v27.8b, v3.8b umull v12.8h, v24.8b, v4.8b umull v13.8h, v25.8b, v5.8b umull v14.8h, v26.8b, v6.8b umull v15.8h, v27.8b, v7.8b .endm .macro pixman_composite_over_n_8888_8888_ca_process_pixblock_tail /* ... continue 'combine_over_ca' replacement */ urshr v16.8h, v12.8h, #8 urshr v17.8h, v13.8h, #8 urshr v18.8h, v14.8h, #8 urshr v19.8h, v15.8h, #8 raddhn v28.8b, v16.8h, v12.8h raddhn v29.8b, v17.8h, v13.8h raddhn v30.8b, v18.8h, v14.8h raddhn v31.8b, v19.8h, v15.8h uqadd v28.8b, v0.8b, v28.8b uqadd v29.8b, v1.8b, v29.8b uqadd v30.8b, v2.8b, v30.8b uqadd v31.8b, v3.8b, v31.8b .endm .macro pixman_composite_over_n_8888_8888_ca_process_pixblock_tail_head urshr v16.8h, v12.8h, #8 urshr v17.8h, v13.8h, #8 ld4 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32 urshr v18.8h, v14.8h, #8 urshr v19.8h, v15.8h, #8 raddhn v28.8b, v16.8h, v12.8h raddhn v29.8b, v17.8h, v13.8h raddhn v30.8b, v18.8h, v14.8h raddhn v31.8b, v19.8h, v15.8h fetch_mask_pixblock uqadd v28.8b, v0.8b, v28.8b uqadd v29.8b, v1.8b, v29.8b uqadd v30.8b, v2.8b, v30.8b uqadd v31.8b, v3.8b, v31.8b cache_preload 8, 8 pixman_composite_over_n_8888_8888_ca_process_pixblock_head st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 .endm .macro pixman_composite_over_n_8888_8888_ca_init mov v13.s[0], w4 dup v8.8b, v13.b[0] dup v9.8b, v13.b[1] dup v10.8b, v13.b[2] dup v11.8b, v13.b[3] .endm .macro pixman_composite_over_n_8888_8888_ca_cleanup .endm generate_composite_function \ pixman_composite_over_n_8888_8888_ca_asm_neon, 0, 32, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_n_8888_8888_ca_init, \ pixman_composite_over_n_8888_8888_ca_cleanup, \ pixman_composite_over_n_8888_8888_ca_process_pixblock_head, \ pixman_composite_over_n_8888_8888_ca_process_pixblock_tail, \ pixman_composite_over_n_8888_8888_ca_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_over_n_8888_0565_ca_process_pixblock_head /* * 'combine_mask_ca' replacement * * input: solid src (n) in {v8, v9, v10, v11} [B, G, R, A] * mask in {v24, v25, v26} [B, G, R] * output: updated src in {v0, v1, v2 } [B, G, R] * updated mask in {v24, v25, v26} [B, G, R] */ umull v0.8h, v24.8b, v8.8b umull v1.8h, v25.8b, v9.8b umull v2.8h, v26.8b, v10.8b umull v12.8h, v11.8b, v24.8b umull v13.8h, v11.8b, v25.8b umull v14.8h, v11.8b, v26.8b urshr v15.8h, v0.8h, #8 urshr v16.8h, v1.8h, #8 urshr v17.8h, v2.8h, #8 raddhn v0.8b, v0.8h, v15.8h raddhn v1.8b, v1.8h, v16.8h raddhn v2.8b, v2.8h, v17.8h urshr v19.8h, v12.8h, #8 urshr v20.8h, v13.8h, #8 urshr v21.8h, v14.8h, #8 raddhn v24.8b, v12.8h, v19.8h raddhn v25.8b, v13.8h, v20.8h /* * convert 8 r5g6b5 pixel data from {v4} to planar 8-bit format * and put data into v16 - blue, v17 - green, v18 - red */ mov v4.d[1], v5.d[0] shrn v17.8b, v4.8h, #3 shrn v18.8b, v4.8h, #8 raddhn v26.8b, v14.8h, v21.8h sli v4.8h, v4.8h, #5 sri v18.8b, v18.8b, #5 sri v17.8b, v17.8b, #6 /* * 'combine_over_ca' replacement * * output: updated dest in v16 - blue, v17 - green, v18 - red */ mvn v24.8b, v24.8b mvn v25.8b, v25.8b shrn v16.8b, v4.8h, #2 mvn v26.8b, v26.8b umull v5.8h, v16.8b, v24.8b umull v6.8h, v17.8b, v25.8b umull v7.8h, v18.8b, v26.8b .endm .macro pixman_composite_over_n_8888_0565_ca_process_pixblock_tail /* ... continue 'combine_over_ca' replacement */ urshr v13.8h, v5.8h, #8 urshr v14.8h, v6.8h, #8 urshr v15.8h, v7.8h, #8 raddhn v16.8b, v13.8h, v5.8h raddhn v17.8b, v14.8h, v6.8h raddhn v18.8b, v15.8h, v7.8h uqadd v16.8b, v0.8b, v16.8b uqadd v17.8b, v1.8b, v17.8b uqadd v18.8b, v2.8b, v18.8b /* * convert the results in v16, v17, v18 to r5g6b5 and store * them into {v14} */ ushll v14.8h, v18.8b, #7 sli v14.8h, v14.8h, #1 ushll v12.8h, v17.8b, #7 sli v12.8h, v12.8h, #1 ushll v13.8h, v16.8b, #7 sli v13.8h, v13.8h, #1 sri v14.8h, v12.8h, #5 sri v14.8h, v13.8h, #11 mov v28.d[0], v14.d[0] mov v29.d[0], v14.d[1] .endm .macro pixman_composite_over_n_8888_0565_ca_process_pixblock_tail_head fetch_mask_pixblock urshr v13.8h, v5.8h, #8 urshr v14.8h, v6.8h, #8 ld1 {v4.8h}, [DST_R], #16 urshr v15.8h, v7.8h, #8 raddhn v16.8b, v13.8h, v5.8h raddhn v17.8b, v14.8h, v6.8h raddhn v18.8b, v15.8h, v7.8h mov v5.d[0], v4.d[1] /* process_pixblock_head */ /* * 'combine_mask_ca' replacement * * input: solid src (n) in {v8, v9, v10, v11} [B, G, R, A] * mask in {v24, v25, v26} [B, G, R] * output: updated src in {v0, v1, v2 } [B, G, R] * updated mask in {v24, v25, v26} [B, G, R] */ uqadd v16.8b, v0.8b, v16.8b uqadd v17.8b, v1.8b, v17.8b uqadd v18.8b, v2.8b, v18.8b umull v0.8h, v24.8b, v8.8b umull v1.8h, v25.8b, v9.8b umull v2.8h, v26.8b, v10.8b /* * convert the result in v16, v17, v18 to r5g6b5 and store * it into {v14} */ ushll v14.8h, v18.8b, #7 sli v14.8h, v14.8h, #1 ushll v18.8h, v16.8b, #7 sli v18.8h, v18.8h, #1 ushll v19.8h, v17.8b, #7 sli v19.8h, v19.8h, #1 umull v12.8h, v11.8b, v24.8b sri v14.8h, v19.8h, #5 umull v13.8h, v11.8b, v25.8b umull v15.8h, v11.8b, v26.8b sri v14.8h, v18.8h, #11 mov v28.d[0], v14.d[0] mov v29.d[0], v14.d[1] cache_preload 8, 8 urshr v16.8h, v0.8h, #8 urshr v17.8h, v1.8h, #8 urshr v18.8h, v2.8h, #8 raddhn v0.8b, v0.8h, v16.8h raddhn v1.8b, v1.8h, v17.8h raddhn v2.8b, v2.8h, v18.8h urshr v19.8h, v12.8h, #8 urshr v20.8h, v13.8h, #8 urshr v21.8h, v15.8h, #8 raddhn v24.8b, v12.8h, v19.8h raddhn v25.8b, v13.8h, v20.8h /* * convert 8 r5g6b5 pixel data from {v4, v5} to planar * 8-bit format and put data into v16 - blue, v17 - green, * v18 - red */ mov v4.d[1], v5.d[0] shrn v17.8b, v4.8h, #3 shrn v18.8b, v4.8h, #8 raddhn v26.8b, v15.8h, v21.8h sli v4.8h, v4.8h, #5 sri v17.8b, v17.8b, #6 sri v18.8b, v18.8b, #5 /* * 'combine_over_ca' replacement * * output: updated dest in v16 - blue, v17 - green, v18 - red */ mvn v24.8b, v24.8b mvn v25.8b, v25.8b shrn v16.8b, v4.8h, #2 mvn v26.8b, v26.8b umull v5.8h, v16.8b, v24.8b umull v6.8h, v17.8b, v25.8b umull v7.8h, v18.8b, v26.8b st1 {v14.8h}, [DST_W], #16 .endm .macro pixman_composite_over_n_8888_0565_ca_init mov v13.s[0], w4 dup v8.8b, v13.b[0] dup v9.8b, v13.b[1] dup v10.8b, v13.b[2] dup v11.8b, v13.b[3] .endm .macro pixman_composite_over_n_8888_0565_ca_cleanup .endm generate_composite_function \ pixman_composite_over_n_8888_0565_ca_asm_neon, 0, 32, 16, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_n_8888_0565_ca_init, \ pixman_composite_over_n_8888_0565_ca_cleanup, \ pixman_composite_over_n_8888_0565_ca_process_pixblock_head, \ pixman_composite_over_n_8888_0565_ca_process_pixblock_tail, \ pixman_composite_over_n_8888_0565_ca_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_in_n_8_process_pixblock_head /* expecting source data in {v0, v1, v2, v3} */ /* and destination data in {v4, v5, v6, v7} */ umull v8.8h, v4.8b, v3.8b umull v9.8h, v5.8b, v3.8b umull v10.8h, v6.8b, v3.8b umull v11.8h, v7.8b, v3.8b .endm .macro pixman_composite_in_n_8_process_pixblock_tail urshr v14.8h, v8.8h, #8 urshr v15.8h, v9.8h, #8 urshr v12.8h, v10.8h, #8 urshr v13.8h, v11.8h, #8 raddhn v28.8b, v8.8h, v14.8h raddhn v29.8b, v9.8h, v15.8h raddhn v30.8b, v10.8h, v12.8h raddhn v31.8b, v11.8h, v13.8h .endm .macro pixman_composite_in_n_8_process_pixblock_tail_head pixman_composite_in_n_8_process_pixblock_tail ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32 cache_preload 32, 32 pixman_composite_in_n_8_process_pixblock_head st1 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 .endm .macro pixman_composite_in_n_8_init mov v3.s[0], w4 dup v3.8b, v3.b[3] .endm .macro pixman_composite_in_n_8_cleanup .endm generate_composite_function \ pixman_composite_in_n_8_asm_neon, 0, 0, 8, \ FLAG_DST_READWRITE, \ 32, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_in_n_8_init, \ pixman_composite_in_n_8_cleanup, \ pixman_composite_in_n_8_process_pixblock_head, \ pixman_composite_in_n_8_process_pixblock_tail, \ pixman_composite_in_n_8_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 24 /* mask_basereg */ .macro pixman_composite_add_n_8_8_process_pixblock_head /* expecting source data in {v8, v9, v10, v11} */ /* v8 - blue, v9 - green, v10 - red, v11 - alpha */ /* and destination data in {v4, v5, v6, v7} */ /* mask is in v24, v25, v26, v27 */ umull v0.8h, v24.8b, v11.8b umull v1.8h, v25.8b, v11.8b umull v2.8h, v26.8b, v11.8b umull v3.8h, v27.8b, v11.8b urshr v12.8h, v0.8h, #8 urshr v13.8h, v1.8h, #8 urshr v14.8h, v2.8h, #8 urshr v15.8h, v3.8h, #8 raddhn v0.8b, v0.8h, v12.8h raddhn v1.8b, v1.8h, v13.8h raddhn v2.8b, v2.8h, v14.8h raddhn v3.8b, v3.8h, v15.8h uqadd v28.8b, v0.8b, v4.8b uqadd v29.8b, v1.8b, v5.8b uqadd v30.8b, v2.8b, v6.8b uqadd v31.8b, v3.8b, v7.8b .endm .macro pixman_composite_add_n_8_8_process_pixblock_tail .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_add_n_8_8_process_pixblock_tail_head pixman_composite_add_n_8_8_process_pixblock_tail st1 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32 fetch_mask_pixblock cache_preload 32, 32 pixman_composite_add_n_8_8_process_pixblock_head .endm .macro pixman_composite_add_n_8_8_init mov v11.s[0], w4 dup v11.8b, v11.b[3] .endm .macro pixman_composite_add_n_8_8_cleanup .endm generate_composite_function \ pixman_composite_add_n_8_8_asm_neon, 0, 8, 8, \ FLAG_DST_READWRITE, \ 32, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_add_n_8_8_init, \ pixman_composite_add_n_8_8_cleanup, \ pixman_composite_add_n_8_8_process_pixblock_head, \ pixman_composite_add_n_8_8_process_pixblock_tail, \ pixman_composite_add_n_8_8_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_add_8_8_8_process_pixblock_head /* expecting source data in {v0, v1, v2, v3} */ /* destination data in {v4, v5, v6, v7} */ /* mask in {v24, v25, v26, v27} */ umull v8.8h, v24.8b, v0.8b umull v9.8h, v25.8b, v1.8b umull v10.8h, v26.8b, v2.8b umull v11.8h, v27.8b, v3.8b urshr v0.8h, v8.8h, #8 urshr v1.8h, v9.8h, #8 urshr v12.8h, v10.8h, #8 urshr v13.8h, v11.8h, #8 raddhn v0.8b, v0.8h, v8.8h raddhn v1.8b, v1.8h, v9.8h raddhn v2.8b, v12.8h, v10.8h raddhn v3.8b, v13.8h, v11.8h uqadd v28.8b, v0.8b, v4.8b uqadd v29.8b, v1.8b, v5.8b uqadd v30.8b, v2.8b, v6.8b uqadd v31.8b, v3.8b, v7.8b .endm .macro pixman_composite_add_8_8_8_process_pixblock_tail .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_add_8_8_8_process_pixblock_tail_head pixman_composite_add_8_8_8_process_pixblock_tail st1 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32 fetch_mask_pixblock fetch_src_pixblock cache_preload 32, 32 pixman_composite_add_8_8_8_process_pixblock_head .endm .macro pixman_composite_add_8_8_8_init .endm .macro pixman_composite_add_8_8_8_cleanup .endm generate_composite_function \ pixman_composite_add_8_8_8_asm_neon, 8, 8, 8, \ FLAG_DST_READWRITE, \ 32, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_add_8_8_8_init, \ pixman_composite_add_8_8_8_cleanup, \ pixman_composite_add_8_8_8_process_pixblock_head, \ pixman_composite_add_8_8_8_process_pixblock_tail, \ pixman_composite_add_8_8_8_process_pixblock_tail_head /******************************************************************************/ .macro pixman_composite_add_8888_8888_8888_process_pixblock_head /* expecting source data in {v0, v1, v2, v3} */ /* destination data in {v4, v5, v6, v7} */ /* mask in {v24, v25, v26, v27} */ umull v8.8h, v27.8b, v0.8b umull v9.8h, v27.8b, v1.8b umull v10.8h, v27.8b, v2.8b umull v11.8h, v27.8b, v3.8b /* 1 cycle bubble */ ursra v8.8h, v8.8h, #8 ursra v9.8h, v9.8h, #8 ursra v10.8h, v10.8h, #8 ursra v11.8h, v11.8h, #8 .endm .macro pixman_composite_add_8888_8888_8888_process_pixblock_tail /* 2 cycle bubble */ rshrn v28.8b, v8.8h, #8 rshrn v29.8b, v9.8h, #8 rshrn v30.8b, v10.8h, #8 rshrn v31.8b, v11.8h, #8 uqadd v28.8b, v4.8b, v28.8b uqadd v29.8b, v5.8b, v29.8b uqadd v30.8b, v6.8b, v30.8b uqadd v31.8b, v7.8b, v31.8b .endm .macro pixman_composite_add_8888_8888_8888_process_pixblock_tail_head fetch_src_pixblock rshrn v28.8b, v8.8h, #8 fetch_mask_pixblock rshrn v29.8b, v9.8h, #8 umull v8.8h, v27.8b, v0.8b rshrn v30.8b, v10.8h, #8 umull v9.8h, v27.8b, v1.8b rshrn v31.8b, v11.8h, #8 umull v10.8h, v27.8b, v2.8b umull v11.8h, v27.8b, v3.8b uqadd v28.8b, v4.8b, v28.8b uqadd v29.8b, v5.8b, v29.8b uqadd v30.8b, v6.8b, v30.8b uqadd v31.8b, v7.8b, v31.8b ursra v8.8h, v8.8h, #8 ld4 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32 ursra v9.8h, v9.8h, #8 st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 ursra v10.8h, v10.8h, #8 cache_preload 8, 8 ursra v11.8h, v11.8h, #8 .endm generate_composite_function \ pixman_composite_add_8888_8888_8888_asm_neon, 32, 32, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_add_8888_8888_8888_process_pixblock_head, \ pixman_composite_add_8888_8888_8888_process_pixblock_tail, \ pixman_composite_add_8888_8888_8888_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 24 /* mask_basereg */ generate_composite_function_single_scanline \ pixman_composite_scanline_add_mask_asm_neon, 32, 32, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ default_init, \ default_cleanup, \ pixman_composite_add_8888_8888_8888_process_pixblock_head, \ pixman_composite_add_8888_8888_8888_process_pixblock_tail, \ pixman_composite_add_8888_8888_8888_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 24 /* mask_basereg */ /******************************************************************************/ generate_composite_function \ pixman_composite_add_8888_8_8888_asm_neon, 32, 8, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_add_8888_8888_8888_process_pixblock_head, \ pixman_composite_add_8888_8888_8888_process_pixblock_tail, \ pixman_composite_add_8888_8888_8888_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 27 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_add_n_8_8888_init mov v3.s[0], w4 dup v0.8b, v3.b[0] dup v1.8b, v3.b[1] dup v2.8b, v3.b[2] dup v3.8b, v3.b[3] .endm .macro pixman_composite_add_n_8_8888_cleanup .endm generate_composite_function \ pixman_composite_add_n_8_8888_asm_neon, 0, 8, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_add_n_8_8888_init, \ pixman_composite_add_n_8_8888_cleanup, \ pixman_composite_add_8888_8888_8888_process_pixblock_head, \ pixman_composite_add_8888_8888_8888_process_pixblock_tail, \ pixman_composite_add_8888_8888_8888_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 27 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_add_8888_n_8888_init mov v27.s[0], w6 dup v27.8b, v27.b[3] .endm .macro pixman_composite_add_8888_n_8888_cleanup .endm generate_composite_function \ pixman_composite_add_8888_n_8888_asm_neon, 32, 0, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_add_8888_n_8888_init, \ pixman_composite_add_8888_n_8888_cleanup, \ pixman_composite_add_8888_8888_8888_process_pixblock_head, \ pixman_composite_add_8888_8888_8888_process_pixblock_tail, \ pixman_composite_add_8888_8888_8888_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 27 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_out_reverse_8888_n_8888_process_pixblock_head /* expecting source data in {v0, v1, v2, v3} */ /* destination data in {v4, v5, v6, v7} */ /* solid mask is in v15 */ /* 'in' */ umull v11.8h, v15.8b, v3.8b umull v10.8h, v15.8b, v2.8b umull v9.8h, v15.8b, v1.8b umull v8.8h, v15.8b, v0.8b urshr v16.8h, v11.8h, #8 urshr v14.8h, v10.8h, #8 urshr v13.8h, v9.8h, #8 urshr v12.8h, v8.8h, #8 raddhn v3.8b, v11.8h, v16.8h raddhn v2.8b, v10.8h, v14.8h raddhn v1.8b, v9.8h, v13.8h raddhn v0.8b, v8.8h, v12.8h mvn v24.8b, v3.8b /* get inverted alpha */ /* now do alpha blending */ umull v8.8h, v24.8b, v4.8b umull v9.8h, v24.8b, v5.8b umull v10.8h, v24.8b, v6.8b umull v11.8h, v24.8b, v7.8b .endm .macro pixman_composite_out_reverse_8888_n_8888_process_pixblock_tail urshr v16.8h, v8.8h, #8 urshr v17.8h, v9.8h, #8 urshr v18.8h, v10.8h, #8 urshr v19.8h, v11.8h, #8 raddhn v28.8b, v16.8h, v8.8h raddhn v29.8b, v17.8h, v9.8h raddhn v30.8b, v18.8h, v10.8h raddhn v31.8b, v19.8h, v11.8h .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_out_reverse_8888_8888_8888_process_pixblock_tail_head ld4 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32 pixman_composite_out_reverse_8888_n_8888_process_pixblock_tail fetch_src_pixblock cache_preload 8, 8 fetch_mask_pixblock pixman_composite_out_reverse_8888_n_8888_process_pixblock_head st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 .endm generate_composite_function_single_scanline \ pixman_composite_scanline_out_reverse_mask_asm_neon, 32, 32, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ default_init_need_all_regs, \ default_cleanup_need_all_regs, \ pixman_composite_out_reverse_8888_n_8888_process_pixblock_head, \ pixman_composite_out_reverse_8888_n_8888_process_pixblock_tail, \ pixman_composite_out_reverse_8888_8888_8888_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 12 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_over_8888_n_8888_process_pixblock_head pixman_composite_out_reverse_8888_n_8888_process_pixblock_head .endm .macro pixman_composite_over_8888_n_8888_process_pixblock_tail pixman_composite_out_reverse_8888_n_8888_process_pixblock_tail uqadd v28.8b, v0.8b, v28.8b uqadd v29.8b, v1.8b, v29.8b uqadd v30.8b, v2.8b, v30.8b uqadd v31.8b, v3.8b, v31.8b .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_over_8888_n_8888_process_pixblock_tail_head ld4 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32 pixman_composite_over_8888_n_8888_process_pixblock_tail fetch_src_pixblock cache_preload 8, 8 pixman_composite_over_8888_n_8888_process_pixblock_head st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 .endm .macro pixman_composite_over_8888_n_8888_init mov v15.s[0], w6 dup v15.8b, v15.b[3] .endm .macro pixman_composite_over_8888_n_8888_cleanup .endm generate_composite_function \ pixman_composite_over_8888_n_8888_asm_neon, 32, 0, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_8888_n_8888_init, \ pixman_composite_over_8888_n_8888_cleanup, \ pixman_composite_over_8888_n_8888_process_pixblock_head, \ pixman_composite_over_8888_n_8888_process_pixblock_tail, \ pixman_composite_over_8888_n_8888_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 12 /* mask_basereg */ /******************************************************************************/ /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_over_8888_8888_8888_process_pixblock_tail_head ld4 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32 pixman_composite_over_8888_n_8888_process_pixblock_tail fetch_src_pixblock cache_preload 8, 8 fetch_mask_pixblock pixman_composite_over_8888_n_8888_process_pixblock_head st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 .endm generate_composite_function \ pixman_composite_over_8888_8888_8888_asm_neon, 32, 32, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ default_init_need_all_regs, \ default_cleanup_need_all_regs, \ pixman_composite_over_8888_n_8888_process_pixblock_head, \ pixman_composite_over_8888_n_8888_process_pixblock_tail, \ pixman_composite_over_8888_8888_8888_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 12 /* mask_basereg */ generate_composite_function_single_scanline \ pixman_composite_scanline_over_mask_asm_neon, 32, 32, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ default_init_need_all_regs, \ default_cleanup_need_all_regs, \ pixman_composite_over_8888_n_8888_process_pixblock_head, \ pixman_composite_over_8888_n_8888_process_pixblock_tail, \ pixman_composite_over_8888_8888_8888_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 12 /* mask_basereg */ /******************************************************************************/ /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_over_8888_8_8888_process_pixblock_tail_head ld4 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32 pixman_composite_over_8888_n_8888_process_pixblock_tail fetch_src_pixblock cache_preload 8, 8 fetch_mask_pixblock pixman_composite_over_8888_n_8888_process_pixblock_head st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 .endm generate_composite_function \ pixman_composite_over_8888_8_8888_asm_neon, 32, 8, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ default_init_need_all_regs, \ default_cleanup_need_all_regs, \ pixman_composite_over_8888_n_8888_process_pixblock_head, \ pixman_composite_over_8888_n_8888_process_pixblock_tail, \ pixman_composite_over_8888_8_8888_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 15 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_0888_0888_process_pixblock_head .endm .macro pixman_composite_src_0888_0888_process_pixblock_tail .endm .macro pixman_composite_src_0888_0888_process_pixblock_tail_head st3 {v0.8b, v1.8b, v2.8b}, [DST_W], #24 fetch_src_pixblock cache_preload 8, 8 .endm generate_composite_function \ pixman_composite_src_0888_0888_asm_neon, 24, 0, 24, \ FLAG_DST_WRITEONLY, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_src_0888_0888_process_pixblock_head, \ pixman_composite_src_0888_0888_process_pixblock_tail, \ pixman_composite_src_0888_0888_process_pixblock_tail_head, \ 0, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_0888_8888_rev_process_pixblock_head mov v31.8b, v2.8b mov v2.8b, v0.8b mov v0.8b, v31.8b .endm .macro pixman_composite_src_0888_8888_rev_process_pixblock_tail .endm .macro pixman_composite_src_0888_8888_rev_process_pixblock_tail_head st4 {v0.8b, v1.8b, v2.8b, v3.8b}, [DST_W], #32 fetch_src_pixblock mov v31.8b, v2.8b mov v2.8b, v0.8b mov v0.8b, v31.8b cache_preload 8, 8 .endm .macro pixman_composite_src_0888_8888_rev_init eor v3.8b, v3.8b, v3.8b .endm generate_composite_function \ pixman_composite_src_0888_8888_rev_asm_neon, 24, 0, 32, \ FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ pixman_composite_src_0888_8888_rev_init, \ default_cleanup, \ pixman_composite_src_0888_8888_rev_process_pixblock_head, \ pixman_composite_src_0888_8888_rev_process_pixblock_tail, \ pixman_composite_src_0888_8888_rev_process_pixblock_tail_head, \ 0, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_0888_0565_rev_process_pixblock_head ushll v8.8h, v1.8b, #7 sli v8.8h, v8.8h, #1 ushll v9.8h, v2.8b, #7 sli v9.8h, v9.8h, #1 .endm .macro pixman_composite_src_0888_0565_rev_process_pixblock_tail ushll v14.8h, v0.8b, #7 sli v14.8h, v14.8h, #1 sri v14.8h, v8.8h, #5 sri v14.8h, v9.8h, #11 mov v28.d[0], v14.d[0] mov v29.d[0], v14.d[1] .endm .macro pixman_composite_src_0888_0565_rev_process_pixblock_tail_head ushll v14.8h, v0.8b, #7 sli v14.8h, v14.8h, #1 fetch_src_pixblock sri v14.8h, v8.8h, #5 sri v14.8h, v9.8h, #11 mov v28.d[0], v14.d[0] mov v29.d[0], v14.d[1] ushll v8.8h, v1.8b, #7 sli v8.8h, v8.8h, #1 st1 {v14.8h}, [DST_W], #16 ushll v9.8h, v2.8b, #7 sli v9.8h, v9.8h, #1 .endm generate_composite_function \ pixman_composite_src_0888_0565_rev_asm_neon, 24, 0, 16, \ FLAG_DST_WRITEONLY, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_src_0888_0565_rev_process_pixblock_head, \ pixman_composite_src_0888_0565_rev_process_pixblock_tail, \ pixman_composite_src_0888_0565_rev_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_pixbuf_8888_process_pixblock_head umull v8.8h, v3.8b, v0.8b umull v9.8h, v3.8b, v1.8b umull v10.8h, v3.8b, v2.8b .endm .macro pixman_composite_src_pixbuf_8888_process_pixblock_tail urshr v11.8h, v8.8h, #8 mov v30.8b, v31.8b mov v31.8b, v3.8b mov v3.8b, v30.8b urshr v12.8h, v9.8h, #8 urshr v13.8h, v10.8h, #8 raddhn v30.8b, v11.8h, v8.8h raddhn v29.8b, v12.8h, v9.8h raddhn v28.8b, v13.8h, v10.8h .endm .macro pixman_composite_src_pixbuf_8888_process_pixblock_tail_head urshr v11.8h, v8.8h, #8 mov v30.8b, v31.8b mov v31.8b, v3.8b mov v3.8b, v31.8b urshr v12.8h, v9.8h, #8 urshr v13.8h, v10.8h, #8 fetch_src_pixblock raddhn v30.8b, v11.8h, v8.8h PF add, PF_X, PF_X, #8 PF tst, PF_CTL, #0xF PF beq, 10f PF add, PF_X, PF_X, #8 PF sub, PF_CTL, PF_CTL, #1 10: raddhn v29.8b, v12.8h, v9.8h raddhn v28.8b, v13.8h, v10.8h umull v8.8h, v3.8b, v0.8b umull v9.8h, v3.8b, v1.8b umull v10.8h, v3.8b, v2.8b st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 PF cmp, PF_X, ORIG_W PF lsl, DUMMY, PF_X, src_bpp_shift PF prfm, PREFETCH_MODE, [PF_SRC, DUMMY] PF ble, 10f PF sub, PF_X, PF_X, ORIG_W PF subs, PF_CTL, PF_CTL, #0x10 PF ble, 10f PF lsl, DUMMY, SRC_STRIDE, #src_bpp_shift PF ldrsb, DUMMY, [PF_SRC, DUMMY] PF add, PF_SRC, PF_SRC, #1 10: .endm generate_composite_function \ pixman_composite_src_pixbuf_8888_asm_neon, 32, 0, 32, \ FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_src_pixbuf_8888_process_pixblock_head, \ pixman_composite_src_pixbuf_8888_process_pixblock_tail, \ pixman_composite_src_pixbuf_8888_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_src_rpixbuf_8888_process_pixblock_head umull v8.8h, v3.8b, v0.8b umull v9.8h, v3.8b, v1.8b umull v10.8h, v3.8b, v2.8b .endm .macro pixman_composite_src_rpixbuf_8888_process_pixblock_tail urshr v11.8h, v8.8h, #8 mov v30.8b, v31.8b mov v31.8b, v3.8b mov v3.8b, v30.8b urshr v12.8h, v9.8h, #8 urshr v13.8h, v10.8h, #8 raddhn v28.8b, v11.8h, v8.8h raddhn v29.8b, v12.8h, v9.8h raddhn v30.8b, v13.8h, v10.8h .endm .macro pixman_composite_src_rpixbuf_8888_process_pixblock_tail_head urshr v11.8h, v8.8h, #8 mov v30.8b, v31.8b mov v31.8b, v3.8b mov v3.8b, v30.8b urshr v12.8h, v9.8h, #8 urshr v13.8h, v10.8h, #8 fetch_src_pixblock raddhn v28.8b, v11.8h, v8.8h PF add, PF_X, PF_X, #8 PF tst, PF_CTL, #0xF PF beq, 10f PF add, PF_X, PF_X, #8 PF sub, PF_CTL, PF_CTL, #1 10: raddhn v29.8b, v12.8h, v9.8h raddhn v30.8b, v13.8h, v10.8h umull v8.8h, v3.8b, v0.8b umull v9.8h, v3.8b, v1.8b umull v10.8h, v3.8b, v2.8b st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 PF cmp, PF_X, ORIG_W PF lsl, DUMMY, PF_X, src_bpp_shift PF prfm, PREFETCH_MODE, [PF_SRC, DUMMY] PF ble, 10f PF sub, PF_X, PF_X, ORIG_W PF subs, PF_CTL, PF_CTL, #0x10 PF ble, 10f PF lsl, DUMMY, SRC_STRIDE, #src_bpp_shift PF ldrsb, DUMMY, [PF_SRC, DUMMY] PF add, PF_SRC, PF_SRC, #1 10: .endm generate_composite_function \ pixman_composite_src_rpixbuf_8888_asm_neon, 32, 0, 32, \ FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 10, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_src_rpixbuf_8888_process_pixblock_head, \ pixman_composite_src_rpixbuf_8888_process_pixblock_tail, \ pixman_composite_src_rpixbuf_8888_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 0, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_over_0565_8_0565_process_pixblock_head /* mask is in v15 */ mov v4.d[0], v8.d[0] mov v4.d[1], v9.d[0] mov v13.d[0], v10.d[0] mov v13.d[1], v11.d[0] convert_0565_to_x888 v4, v2, v1, v0 convert_0565_to_x888 v13, v6, v5, v4 /* source pixel data is in {v0, v1, v2, XX} */ /* destination pixel data is in {v4, v5, v6, XX} */ mvn v7.8b, v15.8b umull v10.8h, v15.8b, v2.8b umull v9.8h, v15.8b, v1.8b umull v8.8h, v15.8b, v0.8b umull v11.8h, v7.8b, v4.8b umull v12.8h, v7.8b, v5.8b umull v13.8h, v7.8b, v6.8b urshr v19.8h, v10.8h, #8 urshr v18.8h, v9.8h, #8 urshr v17.8h, v8.8h, #8 raddhn v2.8b, v10.8h, v19.8h raddhn v1.8b, v9.8h, v18.8h raddhn v0.8b, v8.8h, v17.8h .endm .macro pixman_composite_over_0565_8_0565_process_pixblock_tail urshr v17.8h, v11.8h, #8 urshr v18.8h, v12.8h, #8 urshr v19.8h, v13.8h, #8 raddhn v28.8b, v17.8h, v11.8h raddhn v29.8b, v18.8h, v12.8h raddhn v30.8b, v19.8h, v13.8h uqadd v0.8b, v0.8b, v28.8b uqadd v1.8b, v1.8b, v29.8b uqadd v2.8b, v2.8b, v30.8b /* 32bpp result is in {v0, v1, v2, XX} */ convert_8888_to_0565 v2, v1, v0, v14, v30, v13 mov v28.d[0], v14.d[0] mov v29.d[0], v14.d[1] .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_over_0565_8_0565_process_pixblock_tail_head fetch_mask_pixblock pixman_composite_over_0565_8_0565_process_pixblock_tail fetch_src_pixblock ld1 {v10.4h, v11.4h}, [DST_R], #16 cache_preload 8, 8 pixman_composite_over_0565_8_0565_process_pixblock_head st1 {v14.8h}, [DST_W], #16 .endm generate_composite_function \ pixman_composite_over_0565_8_0565_asm_neon, 16, 8, 16, \ FLAG_DST_READWRITE, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ default_init_need_all_regs, \ default_cleanup_need_all_regs, \ pixman_composite_over_0565_8_0565_process_pixblock_head, \ pixman_composite_over_0565_8_0565_process_pixblock_tail, \ pixman_composite_over_0565_8_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 10, /* dst_r_basereg */ \ 8, /* src_basereg */ \ 15 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_over_0565_n_0565_init mov v15.s[0], w6 dup v15.8b, v15.b[3] .endm .macro pixman_composite_over_0565_n_0565_cleanup .endm generate_composite_function \ pixman_composite_over_0565_n_0565_asm_neon, 16, 0, 16, \ FLAG_DST_READWRITE, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ pixman_composite_over_0565_n_0565_init, \ pixman_composite_over_0565_n_0565_cleanup, \ pixman_composite_over_0565_8_0565_process_pixblock_head, \ pixman_composite_over_0565_8_0565_process_pixblock_tail, \ pixman_composite_over_0565_8_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 10, /* dst_r_basereg */ \ 8, /* src_basereg */ \ 15 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_add_0565_8_0565_process_pixblock_head /* mask is in v15 */ mov v4.d[0], v8.d[0] mov v4.d[1], v9.d[0] mov v13.d[0], v10.d[0] mov v13.d[1], v11.d[0] convert_0565_to_x888 v4, v2, v1, v0 convert_0565_to_x888 v13, v6, v5, v4 /* source pixel data is in {v0, v1, v2, XX} */ /* destination pixel data is in {v4, v5, v6, XX} */ umull v9.8h, v15.8b, v2.8b umull v8.8h, v15.8b, v1.8b umull v7.8h, v15.8b, v0.8b urshr v12.8h, v9.8h, #8 urshr v11.8h, v8.8h, #8 urshr v10.8h, v7.8h, #8 raddhn v2.8b, v9.8h, v12.8h raddhn v1.8b, v8.8h, v11.8h raddhn v0.8b, v7.8h, v10.8h .endm .macro pixman_composite_add_0565_8_0565_process_pixblock_tail uqadd v0.8b, v0.8b, v4.8b uqadd v1.8b, v1.8b, v5.8b uqadd v2.8b, v2.8b, v6.8b /* 32bpp result is in {v0, v1, v2, XX} */ convert_8888_to_0565 v2, v1, v0, v14, v30, v13 mov v28.d[0], v14.d[0] mov v29.d[0], v14.d[1] .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_add_0565_8_0565_process_pixblock_tail_head fetch_mask_pixblock pixman_composite_add_0565_8_0565_process_pixblock_tail fetch_src_pixblock ld1 {v10.4h, v11.4h}, [DST_R], #16 cache_preload 8, 8 pixman_composite_add_0565_8_0565_process_pixblock_head st1 {v14.8h}, [DST_W], #16 .endm generate_composite_function \ pixman_composite_add_0565_8_0565_asm_neon, 16, 8, 16, \ FLAG_DST_READWRITE, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ default_init_need_all_regs, \ default_cleanup_need_all_regs, \ pixman_composite_add_0565_8_0565_process_pixblock_head, \ pixman_composite_add_0565_8_0565_process_pixblock_tail, \ pixman_composite_add_0565_8_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 10, /* dst_r_basereg */ \ 8, /* src_basereg */ \ 15 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_out_reverse_8_0565_process_pixblock_head /* mask is in v15 */ mov v12.d[0], v10.d[0] mov v12.d[1], v11.d[0] convert_0565_to_x888 v12, v6, v5, v4 /* destination pixel data is in {v4, v5, v6, xx} */ mvn v24.8b, v15.8b /* get inverted alpha */ /* now do alpha blending */ umull v8.8h, v24.8b, v4.8b umull v9.8h, v24.8b, v5.8b umull v10.8h, v24.8b, v6.8b .endm .macro pixman_composite_out_reverse_8_0565_process_pixblock_tail urshr v11.8h, v8.8h, #8 urshr v12.8h, v9.8h, #8 urshr v13.8h, v10.8h, #8 raddhn v0.8b, v11.8h, v8.8h raddhn v1.8b, v12.8h, v9.8h raddhn v2.8b, v13.8h, v10.8h /* 32bpp result is in {v0, v1, v2, XX} */ convert_8888_to_0565 v2, v1, v0, v14, v12, v3 mov v28.d[0], v14.d[0] mov v29.d[0], v14.d[1] .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_out_reverse_8_0565_process_pixblock_tail_head fetch_src_pixblock pixman_composite_out_reverse_8_0565_process_pixblock_tail ld1 {v10.4h, v11.4h}, [DST_R], #16 cache_preload 8, 8 pixman_composite_out_reverse_8_0565_process_pixblock_head st1 {v14.8h}, [DST_W], #16 .endm generate_composite_function \ pixman_composite_out_reverse_8_0565_asm_neon, 8, 0, 16, \ FLAG_DST_READWRITE, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ default_init_need_all_regs, \ default_cleanup_need_all_regs, \ pixman_composite_out_reverse_8_0565_process_pixblock_head, \ pixman_composite_out_reverse_8_0565_process_pixblock_tail, \ pixman_composite_out_reverse_8_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 10, /* dst_r_basereg */ \ 15, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ .macro pixman_composite_out_reverse_8_8888_process_pixblock_head /* src is in v0 */ /* destination pixel data is in {v4, v5, v6, v7} */ mvn v1.8b, v0.8b /* get inverted alpha */ /* now do alpha blending */ umull v8.8h, v1.8b, v4.8b umull v9.8h, v1.8b, v5.8b umull v10.8h, v1.8b, v6.8b umull v11.8h, v1.8b, v7.8b .endm .macro pixman_composite_out_reverse_8_8888_process_pixblock_tail urshr v14.8h, v8.8h, #8 urshr v15.8h, v9.8h, #8 urshr v12.8h, v10.8h, #8 urshr v13.8h, v11.8h, #8 raddhn v28.8b, v14.8h, v8.8h raddhn v29.8b, v15.8h, v9.8h raddhn v30.8b, v12.8h, v10.8h raddhn v31.8b, v13.8h, v11.8h /* 32bpp result is in {v28, v29, v30, v31} */ .endm /* TODO: expand macros and do better instructions scheduling */ .macro pixman_composite_out_reverse_8_8888_process_pixblock_tail_head fetch_src_pixblock pixman_composite_out_reverse_8_8888_process_pixblock_tail ld4 {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32 cache_preload 8, 8 pixman_composite_out_reverse_8_8888_process_pixblock_head st4 {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32 .endm generate_composite_function \ pixman_composite_out_reverse_8_8888_asm_neon, 8, 0, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ 5, /* prefetch distance */ \ default_init, \ default_cleanup, \ pixman_composite_out_reverse_8_8888_process_pixblock_head, \ pixman_composite_out_reverse_8_8888_process_pixblock_tail, \ pixman_composite_out_reverse_8_8888_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 0 /* mask_basereg */ /******************************************************************************/ generate_composite_function_nearest_scanline \ pixman_scaled_nearest_scanline_8888_8888_OVER_asm_neon, 32, 0, 32, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ default_init, \ default_cleanup, \ pixman_composite_over_8888_8888_process_pixblock_head, \ pixman_composite_over_8888_8888_process_pixblock_tail, \ pixman_composite_over_8888_8888_process_pixblock_tail_head generate_composite_function_nearest_scanline \ pixman_scaled_nearest_scanline_8888_0565_OVER_asm_neon, 32, 0, 16, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ default_init, \ default_cleanup, \ pixman_composite_over_8888_0565_process_pixblock_head, \ pixman_composite_over_8888_0565_process_pixblock_tail, \ pixman_composite_over_8888_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 0, /* src_basereg */ \ 24 /* mask_basereg */ generate_composite_function_nearest_scanline \ pixman_scaled_nearest_scanline_8888_0565_SRC_asm_neon, 32, 0, 16, \ FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ default_init, \ default_cleanup, \ pixman_composite_src_8888_0565_process_pixblock_head, \ pixman_composite_src_8888_0565_process_pixblock_tail, \ pixman_composite_src_8888_0565_process_pixblock_tail_head generate_composite_function_nearest_scanline \ pixman_scaled_nearest_scanline_0565_8888_SRC_asm_neon, 16, 0, 32, \ FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ default_init, \ default_cleanup, \ pixman_composite_src_0565_8888_process_pixblock_head, \ pixman_composite_src_0565_8888_process_pixblock_tail, \ pixman_composite_src_0565_8888_process_pixblock_tail_head generate_composite_function_nearest_scanline \ pixman_scaled_nearest_scanline_8888_8_0565_OVER_asm_neon, 32, 8, 16, \ FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ 8, /* number of pixels, processed in a single block */ \ default_init_need_all_regs, \ default_cleanup_need_all_regs, \ pixman_composite_over_8888_8_0565_process_pixblock_head, \ pixman_composite_over_8888_8_0565_process_pixblock_tail, \ pixman_composite_over_8888_8_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 4, /* dst_r_basereg */ \ 8, /* src_basereg */ \ 24 /* mask_basereg */ generate_composite_function_nearest_scanline \ pixman_scaled_nearest_scanline_0565_8_0565_OVER_asm_neon, 16, 8, 16, \ FLAG_DST_READWRITE, \ 8, /* number of pixels, processed in a single block */ \ default_init_need_all_regs, \ default_cleanup_need_all_regs, \ pixman_composite_over_0565_8_0565_process_pixblock_head, \ pixman_composite_over_0565_8_0565_process_pixblock_tail, \ pixman_composite_over_0565_8_0565_process_pixblock_tail_head, \ 28, /* dst_w_basereg */ \ 10, /* dst_r_basereg */ \ 8, /* src_basereg */ \ 15 /* mask_basereg */ /******************************************************************************/ /* * Bilinear scaling support code which tries to provide pixel fetching, color * format conversion, and interpolation as separate macros which can be used * as the basic building blocks for constructing bilinear scanline functions. */ .macro bilinear_load_8888 reg1, reg2, tmp asr TMP1, X, #16 add X, X, UX add TMP1, TOP, TMP1, lsl #2 ld1 {\()\reg1\().2s}, [TMP1], STRIDE ld1 {\()\reg2\().2s}, [TMP1] .endm .macro bilinear_load_0565 reg1, reg2, tmp asr TMP1, X, #16 add X, X, UX add TMP1, TOP, TMP1, lsl #1 ld1 {\()\reg2\().s}[0], [TMP1], STRIDE ld1 {\()\reg2\().s}[1], [TMP1] convert_four_0565_to_x888_packed \reg2, \reg1, \reg2, \tmp .endm .macro bilinear_load_and_vertical_interpolate_two_8888 \ acc1, acc2, reg1, reg2, reg3, reg4, tmp1, tmp2 bilinear_load_8888 \reg1, \reg2, \tmp1 umull \()\acc1\().8h, \()\reg1\().8b, v28.8b umlal \()\acc1\().8h, \()\reg2\().8b, v29.8b bilinear_load_8888 \reg3, \reg4, \tmp2 umull \()\acc2\().8h, \()\reg3\().8b, v28.8b umlal \()\acc2\().8h, \()\reg4\().8b, v29.8b .endm .macro bilinear_load_and_vertical_interpolate_four_8888 \ xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi, \ yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi bilinear_load_and_vertical_interpolate_two_8888 \ \xacc1, \xacc2, \xreg1, \xreg2, \xreg3, \xreg4, \xacc2lo, \xacc2hi bilinear_load_and_vertical_interpolate_two_8888 \ \yacc1, \yacc2, \yreg1, \yreg2, \yreg3, \yreg4, \yacc2lo, \yacc2hi .endm .macro vzip reg1, reg2 umov TMP4, v31.d[0] zip1 v31.8b, \reg1, \reg2 zip2 \reg2, \reg1, \reg2 mov \reg1, v31.8b mov v31.d[0], TMP4 .endm .macro vuzp reg1, reg2 umov TMP4, v31.d[0] uzp1 v31.8b, \reg1, \reg2 uzp2 \reg2, \reg1, \reg2 mov \reg1, v31.8b mov v31.d[0], TMP4 .endm .macro bilinear_load_and_vertical_interpolate_two_0565 \ acc1, acc2, reg1, reg2, reg3, reg4, acc2lo, acc2hi asr TMP1, X, #16 add X, X, UX add TMP1, TOP, TMP1, lsl #1 asr TMP2, X, #16 add X, X, UX add TMP2, TOP, TMP2, lsl #1 ld1 {\()\acc2\().s}[0], [TMP1], STRIDE ld1 {\()\acc2\().s}[2], [TMP2], STRIDE ld1 {\()\acc2\().s}[1], [TMP1] ld1 {\()\acc2\().s}[3], [TMP2] convert_0565_to_x888 \acc2, \reg3, \reg2, \reg1 vzip \()\reg1\().8b, \()\reg3\().8b vzip \()\reg2\().8b, \()\reg4\().8b vzip \()\reg3\().8b, \()\reg4\().8b vzip \()\reg1\().8b, \()\reg2\().8b umull \()\acc1\().8h, \()\reg1\().8b, v28.8b umlal \()\acc1\().8h, \()\reg2\().8b, v29.8b umull \()\acc2\().8h, \()\reg3\().8b, v28.8b umlal \()\acc2\().8h, \()\reg4\().8b, v29.8b .endm .macro bilinear_load_and_vertical_interpolate_four_0565 \ xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi, \ yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi asr TMP1, X, #16 add X, X, UX add TMP1, TOP, TMP1, lsl #1 asr TMP2, X, #16 add X, X, UX add TMP2, TOP, TMP2, lsl #1 ld1 {\()\xacc2\().s}[0], [TMP1], STRIDE ld1 {\()\xacc2\().s}[2], [TMP2], STRIDE ld1 {\()\xacc2\().s}[1], [TMP1] ld1 {\()\xacc2\().s}[3], [TMP2] convert_0565_to_x888 \xacc2, \xreg3, \xreg2, \xreg1 asr TMP1, X, #16 add X, X, UX add TMP1, TOP, TMP1, lsl #1 asr TMP2, X, #16 add X, X, UX add TMP2, TOP, TMP2, lsl #1 ld1 {\()\yacc2\().s}[0], [TMP1], STRIDE vzip \()\xreg1\().8b, \()\xreg3\().8b ld1 {\()\yacc2\().s}[2], [TMP2], STRIDE vzip \()\xreg2\().8b, \()\xreg4\().8b ld1 {\()\yacc2\().s}[1], [TMP1] vzip \()\xreg3\().8b, \()\xreg4\().8b ld1 {\()\yacc2\().s}[3], [TMP2] vzip \()\xreg1\().8b, \()\xreg2\().8b convert_0565_to_x888 \yacc2, \yreg3, \yreg2, \yreg1 umull \()\xacc1\().8h, \()\xreg1\().8b, v28.8b vzip \()\yreg1\().8b, \()\yreg3\().8b umlal \()\xacc1\().8h, \()\xreg2\().8b, v29.8b vzip \()\yreg2\().8b, \()\yreg4\().8b umull \()\xacc2\().8h, \()\xreg3\().8b, v28.8b vzip \()\yreg3\().8b, \()\yreg4\().8b umlal \()\xacc2\().8h, \()\xreg4\().8b, v29.8b vzip \()\yreg1\().8b, \()\yreg2\().8b umull \()\yacc1\().8h, \()\yreg1\().8b, v28.8b umlal \()\yacc1\().8h, \()\yreg2\().8b, v29.8b umull \()\yacc2\().8h, \()\yreg3\().8b, v28.8b umlal \()\yacc2\().8h, \()\yreg4\().8b, v29.8b .endm .macro bilinear_store_8888 numpix, tmp1, tmp2 .if \numpix == 4 st1 {v0.2s, v1.2s}, [OUT], #16 .elseif \numpix == 2 st1 {v0.2s}, [OUT], #8 .elseif \numpix == 1 st1 {v0.s}[0], [OUT], #4 .else .error bilinear_store_8888 \numpix is unsupported .endif .endm .macro bilinear_store_0565 numpix, tmp1, tmp2 vuzp v0.8b, v1.8b vuzp v2.8b, v3.8b vuzp v1.8b, v3.8b vuzp v0.8b, v2.8b convert_8888_to_0565 v2, v1, v0, v1, \tmp1, \tmp2 .if \numpix == 4 st1 {v1.4h}, [OUT], #8 .elseif \numpix == 2 st1 {v1.s}[0], [OUT], #4 .elseif \numpix == 1 st1 {v1.h}[0], [OUT], #2 .else .error bilinear_store_0565 \numpix is unsupported .endif .endm .macro bilinear_interpolate_last_pixel src_fmt, dst_fmt bilinear_load_\()\src_fmt v0, v1, v2 umull v2.8h, v0.8b, v28.8b umlal v2.8h, v1.8b, v29.8b /* 5 cycles bubble */ ushll v0.4s, v2.4h, #BILINEAR_INTERPOLATION_BITS umlsl v0.4s, v2.4h, v15.h[0] umlal2 v0.4s, v2.8h, v15.h[0] /* 5 cycles bubble */ shrn v0.4h, v0.4s, #(2 * BILINEAR_INTERPOLATION_BITS) /* 3 cycles bubble */ xtn v0.8b, v0.8h /* 1 cycle bubble */ bilinear_store_\()\dst_fmt 1, v3, v4 .endm .macro bilinear_interpolate_two_pixels src_fmt, dst_fmt bilinear_load_and_vertical_interpolate_two_\()\src_fmt \ v1, v11, v2, v3, v20, v21, v22, v23 ushll v0.4s, v1.4h, #BILINEAR_INTERPOLATION_BITS umlsl v0.4s, v1.4h, v15.h[0] umlal2 v0.4s, v1.8h, v15.h[0] ushll v10.4s, v11.4h, #BILINEAR_INTERPOLATION_BITS umlsl v10.4s, v11.4h, v15.h[4] umlal2 v10.4s, v11.8h, v15.h[4] shrn v0.4h, v0.4s, #(2 * BILINEAR_INTERPOLATION_BITS) shrn2 v0.8h, v10.4s, #(2 * BILINEAR_INTERPOLATION_BITS) ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS) add v12.8h, v12.8h, v13.8h xtn v0.8b, v0.8h bilinear_store_\()\dst_fmt 2, v3, v4 .endm .macro bilinear_interpolate_four_pixels src_fmt, dst_fmt bilinear_load_and_vertical_interpolate_four_\()\src_fmt \ v1, v11, v14, v20, v16, v17, v22, v23, \ v3, v9, v24, v25, v26, v27, v18, v19 prfm PREFETCH_MODE, [TMP1, PF_OFFS] sub TMP1, TMP1, STRIDE ushll v0.4s, v1.4h, #BILINEAR_INTERPOLATION_BITS umlsl v0.4s, v1.4h, v15.h[0] umlal2 v0.4s, v1.8h, v15.h[0] ushll v10.4s, v11.4h, #BILINEAR_INTERPOLATION_BITS umlsl v10.4s, v11.4h, v15.h[4] umlal2 v10.4s, v11.8h, v15.h[4] ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS) ushll v2.4s, v3.4h, #BILINEAR_INTERPOLATION_BITS umlsl v2.4s, v3.4h, v15.h[0] umlal2 v2.4s, v3.8h, v15.h[0] ushll v8.4s, v9.4h, #BILINEAR_INTERPOLATION_BITS prfm PREFETCH_MODE, [TMP2, PF_OFFS] umlsl v8.4s, v9.4h, v15.h[4] umlal2 v8.4s, v9.8h, v15.h[4] add v12.8h, v12.8h, v13.8h shrn v0.4h, v0.4s, #(2 * BILINEAR_INTERPOLATION_BITS) shrn2 v0.8h, v10.4s, #(2 * BILINEAR_INTERPOLATION_BITS) shrn v2.4h, v2.4s, #(2 * BILINEAR_INTERPOLATION_BITS) shrn2 v2.8h, v8.4s, #(2 * BILINEAR_INTERPOLATION_BITS) ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS) xtn v0.8b, v0.8h xtn v1.8b, v2.8h add v12.8h, v12.8h, v13.8h bilinear_store_\()\dst_fmt 4, v3, v4 .endm .macro bilinear_interpolate_four_pixels_head src_fmt, dst_fmt .ifdef have_bilinear_interpolate_four_pixels_\()\src_fmt\()_\()\dst_fmt bilinear_interpolate_four_pixels_\()\src_fmt\()_\()\dst_fmt\()_head .else bilinear_interpolate_four_pixels \src_fmt, \dst_fmt .endif .endm .macro bilinear_interpolate_four_pixels_tail src_fmt, dst_fmt .ifdef have_bilinear_interpolate_four_pixels_\()\src_fmt\()_\()\dst_fmt bilinear_interpolate_four_pixels_\()\src_fmt\()_\()\dst_fmt\()_tail .endif .endm .macro bilinear_interpolate_four_pixels_tail_head src_fmt, dst_fmt .ifdef have_bilinear_interpolate_four_pixels_\()\src_fmt\()_\()\dst_fmt bilinear_interpolate_four_pixels_\()\src_fmt\()_\()\dst_fmt\()_tail_head .else bilinear_interpolate_four_pixels \src_fmt, \dst_fmt .endif .endm .macro bilinear_interpolate_eight_pixels_head src_fmt, dst_fmt .ifdef have_bilinear_interpolate_eight_pixels_\()\src_fmt\()_\()\dst_fmt bilinear_interpolate_eight_pixels_\()\src_fmt\()_\()\dst_fmt\()_head .else bilinear_interpolate_four_pixels_head \src_fmt, \dst_fmt bilinear_interpolate_four_pixels_tail_head \src_fmt, \dst_fmt .endif .endm .macro bilinear_interpolate_eight_pixels_tail src_fmt, dst_fmt .ifdef have_bilinear_interpolate_eight_pixels_\()\src_fmt\()_\()\dst_fmt bilinear_interpolate_eight_pixels_\()\src_fmt\()_\()\dst_fmt\()_tail .else bilinear_interpolate_four_pixels_tail \src_fmt, \dst_fmt .endif .endm .macro bilinear_interpolate_eight_pixels_tail_head src_fmt, dst_fmt .ifdef have_bilinear_interpolate_eight_pixels_\()\src_fmt\()_\()\dst_fmt bilinear_interpolate_eight_pixels_\()\src_fmt\()_\()\dst_fmt\()_tail_head .else bilinear_interpolate_four_pixels_tail_head \src_fmt, \dst_fmt bilinear_interpolate_four_pixels_tail_head \src_fmt, \dst_fmt .endif .endm .set BILINEAR_FLAG_UNROLL_4, 0 .set BILINEAR_FLAG_UNROLL_8, 1 .set BILINEAR_FLAG_USE_ALL_NEON_REGS, 2 /* * Main template macro for generating NEON optimized bilinear scanline * functions. * * Bilinear scanline scaler macro template uses the following arguments: * fname - name of the function to generate * src_fmt - source color format (8888 or 0565) * dst_fmt - destination color format (8888 or 0565) * bpp_shift - (1 << bpp_shift) is the size of source pixel in bytes * prefetch_distance - prefetch in the source image by that many * pixels ahead */ .macro generate_bilinear_scanline_func fname, src_fmt, dst_fmt, \ src_bpp_shift, dst_bpp_shift, \ prefetch_distance, flags pixman_asm_function \fname OUT .req x0 TOP .req x1 BOTTOM .req x2 WT .req x3 WB .req x4 X .req x5 UX .req x6 WIDTH .req x7 TMP1 .req x8 TMP2 .req x9 PF_OFFS .req x10 TMP3 .req x11 TMP4 .req x12 STRIDE .req x13 sxtw x3, w3 sxtw x4, w4 sxtw x5, w5 sxtw x6, w6 sxtw x7, w7 stp x29, x30, [sp, -16]! mov x29, sp sub sp, sp, 112 /* push all registers */ sub x29, x29, 64 st1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], #32 st1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], #32 stp x8, x9, [x29, -80] stp x10, x11, [x29, -96] stp x12, x13, [x29, -112] mov PF_OFFS, #\prefetch_distance mul PF_OFFS, PF_OFFS, UX subs STRIDE, BOTTOM, TOP .unreq BOTTOM cmp WIDTH, #0 ble 300f dup v12.8h, w5 dup v13.8h, w6 dup v28.8b, w3 dup v29.8b, w4 mov v25.d[0], v12.d[1] mov v26.d[0], v13.d[0] add v25.4h, v25.4h, v26.4h mov v12.d[1], v25.d[0] /* ensure good destination alignment */ cmp WIDTH, #1 blt 100f tst OUT, #(1 << \dst_bpp_shift) beq 100f ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS) add v12.8h, v12.8h, v13.8h bilinear_interpolate_last_pixel \src_fmt, \dst_fmt sub WIDTH, WIDTH, #1 100: add v13.8h, v13.8h, v13.8h ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS) add v12.8h, v12.8h, v13.8h cmp WIDTH, #2 blt 100f tst OUT, #(1 << (\dst_bpp_shift + 1)) beq 100f bilinear_interpolate_two_pixels \src_fmt, \dst_fmt sub WIDTH, WIDTH, #2 100: .if ((\flags) & BILINEAR_FLAG_UNROLL_8) != 0 /*********** 8 pixels per iteration *****************/ cmp WIDTH, #4 blt 100f tst OUT, #(1 << (\dst_bpp_shift + 2)) beq 100f bilinear_interpolate_four_pixels \src_fmt, \dst_fmt sub WIDTH, WIDTH, #4 100: subs WIDTH, WIDTH, #8 blt 100f asr PF_OFFS, PF_OFFS, #(16 - \src_bpp_shift) bilinear_interpolate_eight_pixels_head \src_fmt, \dst_fmt subs WIDTH, WIDTH, #8 blt 500f 1000: bilinear_interpolate_eight_pixels_tail_head \src_fmt, \dst_fmt subs WIDTH, WIDTH, #8 bge 1000b 500: bilinear_interpolate_eight_pixels_tail \src_fmt, \dst_fmt 100: tst WIDTH, #4 beq 200f bilinear_interpolate_four_pixels \src_fmt, \dst_fmt 200: .else /*********** 4 pixels per iteration *****************/ subs WIDTH, WIDTH, #4 blt 100f asr PF_OFFS, PF_OFFS, #(16 - \src_bpp_shift) bilinear_interpolate_four_pixels_head \src_fmt, \dst_fmt subs WIDTH, WIDTH, #4 blt 500f 1000: bilinear_interpolate_four_pixels_tail_head \src_fmt, \dst_fmt subs WIDTH, WIDTH, #4 bge 1000b 500: bilinear_interpolate_four_pixels_tail \src_fmt, \dst_fmt 100: /****************************************************/ .endif /* handle the remaining trailing pixels */ tst WIDTH, #2 beq 200f bilinear_interpolate_two_pixels \src_fmt, \dst_fmt 200: tst WIDTH, #1 beq 300f bilinear_interpolate_last_pixel \src_fmt, \dst_fmt 300: sub x29, x29, 64 ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], #32 ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], #32 ldp x8, x9, [x29, -80] ldp x10, x11, [x29, -96] ldp x12, x13, [x29, -104] mov sp, x29 ldp x29, x30, [sp], 16 VERIFY_LR ret .unreq OUT .unreq TOP .unreq WT .unreq WB .unreq X .unreq UX .unreq WIDTH .unreq TMP1 .unreq TMP2 .unreq PF_OFFS .unreq TMP3 .unreq TMP4 .unreq STRIDE pixman_end_asm_function .endm /*****************************************************************************/ .set have_bilinear_interpolate_four_pixels_8888_8888, 1 .macro bilinear_interpolate_four_pixels_8888_8888_head asr TMP1, X, #16 add X, X, UX add TMP1, TOP, TMP1, lsl #2 asr TMP2, X, #16 add X, X, UX add TMP2, TOP, TMP2, lsl #2 ld1 {v22.2s}, [TMP1], STRIDE ld1 {v23.2s}, [TMP1] asr TMP3, X, #16 add X, X, UX add TMP3, TOP, TMP3, lsl #2 umull v8.8h, v22.8b, v28.8b umlal v8.8h, v23.8b, v29.8b ld1 {v22.2s}, [TMP2], STRIDE ld1 {v23.2s}, [TMP2] asr TMP4, X, #16 add X, X, UX add TMP4, TOP, TMP4, lsl #2 umull v9.8h, v22.8b, v28.8b umlal v9.8h, v23.8b, v29.8b ld1 {v22.2s}, [TMP3], STRIDE ld1 {v23.2s}, [TMP3] umull v10.8h, v22.8b, v28.8b umlal v10.8h, v23.8b, v29.8b ushll v0.4s, v8.4h, #BILINEAR_INTERPOLATION_BITS umlsl v0.4s, v8.4h, v15.h[0] umlal2 v0.4s, v8.8h, v15.h[0] prfm PREFETCH_MODE, [TMP4, PF_OFFS] ld1 {v16.2s}, [TMP4], STRIDE ld1 {v17.2s}, [TMP4] prfm PREFETCH_MODE, [TMP4, PF_OFFS] umull v11.8h, v16.8b, v28.8b umlal v11.8h, v17.8b, v29.8b ushll v1.4s, v9.4h, #BILINEAR_INTERPOLATION_BITS umlsl v1.4s, v9.4h, v15.h[4] .endm .macro bilinear_interpolate_four_pixels_8888_8888_tail umlal2 v1.4s, v9.8h, v15.h[4] ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS) ushll v2.4s, v10.4h, #BILINEAR_INTERPOLATION_BITS umlsl v2.4s, v10.4h, v15.h[0] umlal2 v2.4s, v10.8h, v15.h[0] ushll v3.4s, v11.4h, #BILINEAR_INTERPOLATION_BITS umlsl v3.4s, v11.4h, v15.h[4] umlal2 v3.4s, v11.8h, v15.h[4] add v12.8h, v12.8h, v13.8h shrn v0.4h, v0.4s, #(2 * BILINEAR_INTERPOLATION_BITS) shrn2 v0.8h, v1.4s, #(2 * BILINEAR_INTERPOLATION_BITS) shrn v2.4h, v2.4s, #(2 * BILINEAR_INTERPOLATION_BITS) ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS) shrn2 v2.8h, v3.4s, #(2 * BILINEAR_INTERPOLATION_BITS) xtn v6.8b, v0.8h xtn v7.8b, v2.8h add v12.8h, v12.8h, v13.8h st1 {v6.2s, v7.2s}, [OUT], #16 .endm .macro bilinear_interpolate_four_pixels_8888_8888_tail_head asr TMP1, X, #16 add X, X, UX add TMP1, TOP, TMP1, lsl #2 asr TMP2, X, #16 add X, X, UX add TMP2, TOP, TMP2, lsl #2 umlal2 v1.4s, v9.8h, v15.h[4] ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS) ushll v2.4s, v10.4h, #BILINEAR_INTERPOLATION_BITS umlsl v2.4s, v10.4h, v15.h[0] umlal2 v2.4s, v10.8h, v15.h[0] ushll v3.4s, v11.4h, #BILINEAR_INTERPOLATION_BITS ld1 {v20.2s}, [TMP1], STRIDE umlsl v3.4s, v11.4h, v15.h[4] umlal2 v3.4s, v11.8h, v15.h[4] ld1 {v21.2s}, [TMP1] umull v8.8h, v20.8b, v28.8b umlal v8.8h, v21.8b, v29.8b shrn v0.4h, v0.4s, #(2 * BILINEAR_INTERPOLATION_BITS) shrn2 v0.8h, v1.4s, #(2 * BILINEAR_INTERPOLATION_BITS) shrn v4.4h, v2.4s, #(2 * BILINEAR_INTERPOLATION_BITS) ld1 {v22.2s}, [TMP2], STRIDE shrn2 v4.8h, v3.4s, #(2 * BILINEAR_INTERPOLATION_BITS) add v12.8h, v12.8h, v13.8h ld1 {v23.2s}, [TMP2] umull v9.8h, v22.8b, v28.8b asr TMP3, X, #16 add X, X, UX add TMP3, TOP, TMP3, lsl #2 asr TMP4, X, #16 add X, X, UX add TMP4, TOP, TMP4, lsl #2 umlal v9.8h, v23.8b, v29.8b ld1 {v22.2s}, [TMP3], STRIDE ushr v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS) ld1 {v23.2s}, [TMP3] umull v10.8h, v22.8b, v28.8b umlal v10.8h, v23.8b, v29.8b xtn v6.8b, v0.8h ushll v0.4s, v8.4h, #BILINEAR_INTERPOLATION_BITS xtn v7.8b, v4.8h umlsl v0.4s, v8.4h, v15.h[0] umlal2 v0.4s, v8.8h, v15.h[0] prfm PREFETCH_MODE, [TMP4, PF_OFFS] ld1 {v16.2s}, [TMP4], STRIDE add v12.8h, v12.8h, v13.8h ld1 {v17.2s}, [TMP4] prfm PREFETCH_MODE, [TMP4, PF_OFFS] umull v11.8h, v16.8b, v28.8b umlal v11.8h, v17.8b, v29.8b st1 {v6.2s, v7.2s}, [OUT], #16 ushll v1.4s, v9.4h, #BILINEAR_INTERPOLATION_BITS umlsl v1.4s, v9.4h, v15.h[4] .endm /*****************************************************************************/ generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_8888_8888_SRC_asm_neon, 8888, 8888, \ 2, 2, 28, BILINEAR_FLAG_UNROLL_4 generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_8888_0565_SRC_asm_neon, 8888, 0565, \ 2, 1, 28, BILINEAR_FLAG_UNROLL_8 | BILINEAR_FLAG_USE_ALL_NEON_REGS generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_0565_x888_SRC_asm_neon, 0565, 8888, \ 1, 2, 28, BILINEAR_FLAG_UNROLL_4 generate_bilinear_scanline_func \ pixman_scaled_bilinear_scanline_0565_0565_SRC_asm_neon, 0565, 0565, \ 1, 1, 28, BILINEAR_FLAG_UNROLL_4 ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-arma64-neon-asm.h0000664000175000017500000013032414712446423020665 0ustar00mattst88mattst88/* * Copyright Âİ 2009 Nokia Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Author: Siarhei Siamashka (siarhei.siamashka@nokia.com) */ /* * This file contains a macro ('generate_composite_function') which can * construct 2D image processing functions, based on a common template. * Any combinations of source, destination and mask images with 8bpp, * 16bpp, 24bpp, 32bpp color formats are supported. * * This macro takes care of: * - handling of leading and trailing unaligned pixels * - doing most of the work related to L2 cache preload * - encourages the use of software pipelining for better instructions * scheduling * * The user of this macro has to provide some configuration parameters * (bit depths for the images, prefetch distance, etc.) and a set of * macros, which should implement basic code chunks responsible for * pixels processing. See 'pixman-armv8-neon-asm.S' file for the usage * examples. * * TODO: * - try overlapped pixel method (from Ian Rickards) when processing * exactly two blocks of pixels * - maybe add an option to do reverse scanline processing */ #include "pixman-arm-asm.h" /* * Bit flags for 'generate_composite_function' macro which are used * to tune generated functions behavior. */ .set FLAG_DST_WRITEONLY, 0 .set FLAG_DST_READWRITE, 1 .set FLAG_DEINTERLEAVE_32BPP, 2 /* * Constants for selecting preferable prefetch type. */ .set PREFETCH_TYPE_NONE, 0 /* No prefetch at all */ .set PREFETCH_TYPE_SIMPLE, 1 /* A simple, fixed-distance-ahead prefetch */ .set PREFETCH_TYPE_ADVANCED, 2 /* Advanced fine-grained prefetch */ /* * prefetch mode * available modes are: * pldl1keep * pldl1strm * pldl2keep * pldl2strm * pldl3keep * pldl3strm */ #define PREFETCH_MODE pldl1keep /* * Definitions of supplementary pixld/pixst macros (for partial load/store of * pixel data). */ .macro pixldst1 op, elem_size, reg1, mem_operand, abits \op {v\()\reg1\().\()\elem_size}, [\()\mem_operand\()], #8 .endm .macro pixldst2 op, elem_size, reg1, reg2, mem_operand, abits \op {v\()\reg1\().\()\elem_size, v\()\reg2\().\()\elem_size}, [\()\mem_operand\()], #16 .endm .macro pixldst4 op, elem_size, reg1, reg2, reg3, reg4, mem_operand, abits \op {v\()\reg1\().\()\elem_size, v\()\reg2\().\()\elem_size, v\()\reg3\().\()\elem_size, v\()\reg4\().\()\elem_size}, [\()\mem_operand\()], #32 .endm .macro pixldst0 op, elem_size, reg1, idx, mem_operand, abits, bytes \op {v\()\reg1\().\()\elem_size}[\idx], [\()\mem_operand\()], #\()\bytes\() .endm .macro pixldst3 op, elem_size, reg1, reg2, reg3, mem_operand \op {v\()\reg1\().\()\elem_size, v\()\reg2\().\()\elem_size, v\()\reg3\().\()\elem_size}, [\()\mem_operand\()], #24 .endm .macro pixldst30 op, elem_size, reg1, reg2, reg3, idx, mem_operand \op {v\()\reg1\().\()\elem_size, v\()\reg2\().\()\elem_size, v\()\reg3\().\()\elem_size}[\idx], [\()\mem_operand\()], #3 .endm .macro pixldst numbytes, op, elem_size, basereg, mem_operand, abits .if \numbytes == 32 .if \elem_size==32 pixldst4 \op, 2s, %(\basereg+4), %(\basereg+5), \ %(\basereg+6), %(\basereg+7), \mem_operand, \abits .elseif \elem_size==16 pixldst4 \op, 4h, %(\basereg+4), %(\basereg+5), \ %(\basereg+6), %(\basereg+7), \mem_operand, \abits .else pixldst4 \op, 8b, %(\basereg+4), %(\basereg+5), \ %(\basereg+6), %(\basereg+7), \mem_operand, \abits .endif .elseif \numbytes == 16 .if \elem_size==32 pixldst2 \op, 2s, %(\basereg+2), %(\basereg+3), \mem_operand, \abits .elseif \elem_size==16 pixldst2 \op, 4h, %(\basereg+2), %(\basereg+3), \mem_operand, \abits .else pixldst2 \op, 8b, %(\basereg+2), %(\basereg+3), \mem_operand, \abits .endif .elseif \numbytes == 8 .if \elem_size==32 pixldst1 \op, 2s, %(\basereg+1), \mem_operand, \abits .elseif \elem_size==16 pixldst1 \op, 4h, %(\basereg+1), \mem_operand, \abits .else pixldst1 \op, 8b, %(\basereg+1), \mem_operand, \abits .endif .elseif \numbytes == 4 .if !RESPECT_STRICT_ALIGNMENT || (\elem_size == 32) pixldst0 \op, s, %(\basereg+0), 1, \mem_operand, \abits, 4 .elseif \elem_size == 16 pixldst0 \op, h, %(\basereg+0), 2, \mem_operand, \abits, 2 pixldst0 \op, h, %(\basereg+0), 3, \mem_operand, \abits, 2 .else pixldst0 \op, b, %(\basereg+0), 4, \mem_operand, \abits, 1 pixldst0 \op, b, %(\basereg+0), 5, \mem_operand, \abits, 1 pixldst0 \op, b, %(\basereg+0), 6, \mem_operand, \abits, 1 pixldst0 \op, b, %(\basereg+0), 7, \mem_operand, \abits, 1 .endif .elseif \numbytes == 2 .if !RESPECT_STRICT_ALIGNMENT || (\elem_size == 16) pixldst0 \op, h, %(\basereg+0), 1, \mem_operand, \abits, 2 .else pixldst0 \op, b, %(\basereg+0), 2, \mem_operand, \abits, 1 pixldst0 \op, b, %(\basereg+0), 3, \mem_operand, \abits, 1 .endif .elseif \numbytes == 1 pixldst0 \op, b, %(\basereg+0), 1, \mem_operand, \abits, 1 .else .error "unsupported size: \numbytes" .endif .endm .macro pixld numpix, bpp, basereg, mem_operand, abits=0 .if \bpp > 0 .if (\bpp == 32) && (\numpix == 8) && (DEINTERLEAVE_32BPP_ENABLED != 0) pixldst4 ld4, 8b, %(\basereg+4), %(\basereg+5), \ %(\basereg+6), %(\basereg+7), \mem_operand, \abits .elseif (\bpp == 24) && (\numpix == 8) pixldst3 ld3, 8b, %(\basereg+3), %(\basereg+4), %(\basereg+5), \mem_operand .elseif (\bpp == 24) && (\numpix == 4) pixldst30 ld3, b, %(\basereg+0), %(\basereg+1), %(\basereg+2), 4, \mem_operand pixldst30 ld3, b, %(\basereg+0), %(\basereg+1), %(\basereg+2), 5, \mem_operand pixldst30 ld3, b, %(\basereg+0), %(\basereg+1), %(\basereg+2), 6, \mem_operand pixldst30 ld3, b, %(\basereg+0), %(\basereg+1), %(\basereg+2), 7, \mem_operand .elseif (\bpp == 24) && (\numpix == 2) pixldst30 ld3, b, %(\basereg+0), %(\basereg+1), %(\basereg+2), 2, \mem_operand pixldst30 ld3, b, %(\basereg+0), %(\basereg+1), %(\basereg+2), 3, \mem_operand .elseif (\bpp == 24) && (\numpix == 1) pixldst30 ld3, b, %(\basereg+0), %(\basereg+1), %(\basereg+2), 1, \mem_operand .else pixldst %(\numpix * \bpp / 8), ld1, %(\bpp), \basereg, \mem_operand, \abits .endif .endif .endm .macro pixst numpix, bpp, basereg, mem_operand, abits=0 .if \bpp > 0 .if (\bpp == 32) && (\numpix == 8) && (DEINTERLEAVE_32BPP_ENABLED != 0) pixldst4 st4, 8b, %(\basereg+4), %(\basereg+5), \ %(\basereg+6), %(\basereg+7), \mem_operand, \abits .elseif (\bpp == 24) && (\numpix == 8) pixldst3 st3, 8b, %(\basereg+3), %(\basereg+4), %(\basereg+5), \mem_operand .elseif (\bpp == 24) && (\numpix == 4) pixldst30 st3, b, %(\basereg+0), %(\basereg+1), %(\basereg+2), 4, \mem_operand pixldst30 st3, b, %(\basereg+0), %(\basereg+1), %(\basereg+2), 5, \mem_operand pixldst30 st3, b, %(\basereg+0), %(\basereg+1), %(\basereg+2), 6, \mem_operand pixldst30 st3, b, %(\basereg+0), %(\basereg+1), %(\basereg+2), 7, \mem_operand .elseif (\bpp == 24) && (\numpix == 2) pixldst30 st3, b, %(\basereg+0), %(\basereg+1), %(\basereg+2), 2, \mem_operand pixldst30 st3, b, %(\basereg+0), %(\basereg+1), %(\basereg+2), 3, \mem_operand .elseif (\bpp == 24) && (\numpix == 1) pixldst30 st3, b, %(\basereg+0), %(\basereg+1), %(\basereg+2), 1, \mem_operand .elseif \numpix * \bpp == 32 && \abits == 32 pixldst 4, st1, 32, \basereg, \mem_operand, \abits .elseif \numpix * \bpp == 16 && \abits == 16 pixldst 2, st1, 16, \basereg, \mem_operand, \abits .else pixldst %(\numpix * \bpp / 8), st1, %(\bpp), \basereg, \mem_operand, \abits .endif .endif .endm .macro pixld_a numpix, bpp, basereg, mem_operand .if (\bpp * \numpix) <= 128 pixld \numpix, \bpp, \basereg, \mem_operand, %(\bpp * \numpix) .else pixld \numpix, \bpp, \basereg, \mem_operand, 128 .endif .endm .macro pixst_a numpix, bpp, basereg, mem_operand .if (\bpp * \numpix) <= 128 pixst \numpix, \bpp, \basereg, \mem_operand, %(\bpp * \numpix) .else pixst \numpix, \bpp, \basereg, \mem_operand, 128 .endif .endm /* * Pixel fetcher for nearest scaling (needs TMP1, TMP2, VX, UNIT_X register * aliases to be defined) */ .macro pixld1_s elem_size, reg1, mem_operand .if \elem_size == 16 asr TMP1, VX, #16 adds VX, VX, UNIT_X bmi 55f 5: subs VX, VX, SRC_WIDTH_FIXED bpl 5b 55: add TMP1, \mem_operand, TMP1, lsl #1 asr TMP2, VX, #16 adds VX, VX, UNIT_X bmi 55f 5: subs VX, VX, SRC_WIDTH_FIXED bpl 5b 55: add TMP2, \mem_operand, TMP2, lsl #1 ld1 {v\()\reg1\().h}[0], [TMP1] asr TMP1, VX, #16 adds VX, VX, UNIT_X bmi 55f 5: subs VX, VX, SRC_WIDTH_FIXED bpl 5b 55: add TMP1, \mem_operand, TMP1, lsl #1 ld1 {v\()\reg1\().h}[1], [TMP2] asr TMP2, VX, #16 adds VX, VX, UNIT_X bmi 55f 5: subs VX, VX, SRC_WIDTH_FIXED bpl 5b 55: add TMP2, \mem_operand, TMP2, lsl #1 ld1 {v\()\reg1\().h}[2], [TMP1] ld1 {v\()\reg1\().h}[3], [TMP2] .elseif \elem_size == 32 asr TMP1, VX, #16 adds VX, VX, UNIT_X bmi 55f 5: subs VX, VX, SRC_WIDTH_FIXED bpl 5b 55: add TMP1, \mem_operand, TMP1, lsl #2 asr TMP2, VX, #16 adds VX, VX, UNIT_X bmi 55f 5: subs VX, VX, SRC_WIDTH_FIXED bpl 5b 55: add TMP2, \mem_operand, TMP2, lsl #2 ld1 {v\()\reg1\().s}[0], [TMP1] ld1 {v\()\reg1\().s}[1], [TMP2] .else .error "unsupported" .endif .endm .macro pixld2_s elem_size, reg1, reg2, mem_operand .if 0 /* \elem_size == 32 */ mov TMP1, VX, asr #16 add VX, VX, UNIT_X, asl #1 add TMP1, \mem_operand, TMP1, asl #2 mov TMP2, VX, asr #16 sub VX, VX, UNIT_X add TMP2, \mem_operand, TMP2, asl #2 ld1 {v\()\reg1\().s}[0], [TMP1] mov TMP1, VX, asr #16 add VX, VX, UNIT_X, asl #1 add TMP1, \mem_operand, TMP1, asl #2 ld1 {v\()\reg2\().s}[0], [TMP2, :32] mov TMP2, VX, asr #16 add VX, VX, UNIT_X add TMP2, \mem_operand, TMP2, asl #2 ld1 {v\()\reg1\().s}[1], [TMP1] ld1 {v\()\reg2\().s}[1], [TMP2] .else pixld1_s \elem_size, \reg1, \mem_operand pixld1_s \elem_size, \reg2, \mem_operand .endif .endm .macro pixld0_s elem_size, reg1, idx, mem_operand .if \elem_size == 16 asr TMP1, VX, #16 adds VX, VX, UNIT_X bmi 55f 5: subs VX, VX, SRC_WIDTH_FIXED bpl 5b 55: add TMP1, \mem_operand, TMP1, lsl #1 ld1 {v\()\reg1\().h}[\idx], [TMP1] .elseif \elem_size == 32 asr DUMMY, VX, #16 mov TMP1, DUMMY adds VX, VX, UNIT_X bmi 55f 5: subs VX, VX, SRC_WIDTH_FIXED bpl 5b 55: add TMP1, \mem_operand, TMP1, lsl #2 ld1 {v\()\reg1\().s}[\idx], [TMP1] .endif .endm .macro pixld_s_internal numbytes, elem_size, basereg, mem_operand .if \numbytes == 32 pixld2_s \elem_size, %(\basereg+4), %(\basereg+5), \mem_operand pixld2_s \elem_size, %(\basereg+6), %(\basereg+7), \mem_operand pixdeinterleave \elem_size, %(\basereg+4) .elseif \numbytes == 16 pixld2_s \elem_size, %(\basereg+2), %(\basereg+3), \mem_operand .elseif \numbytes == 8 pixld1_s \elem_size, %(\basereg+1), \mem_operand .elseif \numbytes == 4 .if \elem_size == 32 pixld0_s \elem_size, %(\basereg+0), 1, \mem_operand .elseif \elem_size == 16 pixld0_s \elem_size, %(\basereg+0), 2, \mem_operand pixld0_s \elem_size, %(\basereg+0), 3, \mem_operand .else pixld0_s \elem_size, %(\basereg+0), 4, \mem_operand pixld0_s \elem_size, %(\basereg+0), 5, \mem_operand pixld0_s \elem_size, %(\basereg+0), 6, \mem_operand pixld0_s \elem_size, %(\basereg+0), 7, \mem_operand .endif .elseif \numbytes == 2 .if \elem_size == 16 pixld0_s \elem_size, %(\basereg+0), 1, \mem_operand .else pixld0_s \elem_size, %(\basereg+0), 2, \mem_operand pixld0_s \elem_size, %(\basereg+0), 3, \mem_operand .endif .elseif \numbytes == 1 pixld0_s \elem_size, %(\basereg+0), 1, \mem_operand .else .error "unsupported size: \numbytes" .endif .endm .macro pixld_s numpix, bpp, basereg, mem_operand .if \bpp > 0 pixld_s_internal %(\numpix * \bpp / 8), %(\bpp), \basereg, \mem_operand .endif .endm .macro vuzp8 reg1, reg2 umov DUMMY, v16.d[0] uzp1 v16.8b, v\()\reg1\().8b, v\()\reg2\().8b uzp2 v\()\reg2\().8b, v\()\reg1\().8b, v\()\reg2\().8b mov v\()\reg1\().8b, v16.8b mov v16.d[0], DUMMY .endm .macro vzip8 reg1, reg2 umov DUMMY, v16.d[0] zip1 v16.8b, v\()\reg1\().8b, v\()\reg2\().8b zip2 v\()\reg2\().8b, v\()\reg1\().8b, v\()\reg2\().8b mov v\()\reg1\().8b, v16.8b mov v16.d[0], DUMMY .endm /* deinterleave B, G, R, A channels for eight 32bpp pixels in 4 registers */ .macro pixdeinterleave bpp, basereg .if (\bpp == 32) && (DEINTERLEAVE_32BPP_ENABLED != 0) vuzp8 %(\basereg+0), %(\basereg+1) vuzp8 %(\basereg+2), %(\basereg+3) vuzp8 %(\basereg+1), %(\basereg+3) vuzp8 %(\basereg+0), %(\basereg+2) .endif .endm /* interleave B, G, R, A channels for eight 32bpp pixels in 4 registers */ .macro pixinterleave bpp, basereg .if (\bpp == 32) && (DEINTERLEAVE_32BPP_ENABLED != 0) vzip8 %(\basereg+0), %(\basereg+2) vzip8 %(\basereg+1), %(\basereg+3) vzip8 %(\basereg+2), %(\basereg+3) vzip8 %(\basereg+0), %(\basereg+1) .endif .endm /* * This is a macro for implementing cache preload. The main idea is that * cache preload logic is mostly independent from the rest of pixels * processing code. It starts at the top left pixel and moves forward * across pixels and can jump across scanlines. Prefetch distance is * handled in an 'incremental' way: it starts from 0 and advances to the * optimal distance over time. After reaching optimal prefetch distance, * it is kept constant. There are some checks which prevent prefetching * unneeded pixel lines below the image (but it still can prefetch a bit * more data on the right side of the image - not a big issue and may * be actually helpful when rendering text glyphs). Additional trick is * the use of LDR instruction for prefetch instead of PLD when moving to * the next line, the point is that we have a high chance of getting TLB * miss in this case, and PLD would be useless. * * This sounds like it may introduce a noticeable overhead (when working with * fully cached data). But in reality, due to having a separate pipeline and * instruction queue for NEON unit in ARM Cortex-A8, normal ARM code can * execute simultaneously with NEON and be completely shadowed by it. Thus * we get no performance overhead at all (*). This looks like a very nice * feature of Cortex-A8, if used wisely. We don't have a hardware prefetcher, * but still can implement some rather advanced prefetch logic in software * for almost zero cost! * * (*) The overhead of the prefetcher is visible when running some trivial * pixels processing like simple copy. Anyway, having prefetch is a must * when working with the graphics data. */ .macro PF a, x:vararg .if (PREFETCH_TYPE_CURRENT == PREFETCH_TYPE_ADVANCED) \a \x .endif .endm .macro cache_preload std_increment, boost_increment .if (src_bpp_shift >= 0) || (dst_r_bpp != 0) || (mask_bpp_shift >= 0) .if \std_increment != 0 PF add, PF_X, PF_X, #\std_increment .endif PF tst, PF_CTL, #0xF PF beq, 71f PF add, PF_X, PF_X, #\boost_increment PF sub, PF_CTL, PF_CTL, #1 71: PF cmp, PF_X, ORIG_W .if src_bpp_shift >= 0 PF lsl, DUMMY, PF_X, #src_bpp_shift PF prfm, PREFETCH_MODE, [PF_SRC, DUMMY] .endif .if dst_r_bpp != 0 PF lsl, DUMMY, PF_X, #dst_bpp_shift PF prfm, PREFETCH_MODE, [PF_DST, DUMMY] .endif .if mask_bpp_shift >= 0 PF lsl, DUMMY, PF_X, #mask_bpp_shift PF prfm, PREFETCH_MODE, [PF_MASK, DUMMY] .endif PF ble, 71f PF sub, PF_X, PF_X, ORIG_W PF subs, PF_CTL, PF_CTL, #0x10 71: PF ble, 72f .if src_bpp_shift >= 0 PF lsl, DUMMY, SRC_STRIDE, #src_bpp_shift PF ldrsb, DUMMY, [PF_SRC, DUMMY] PF add, PF_SRC, PF_SRC, #1 .endif .if dst_r_bpp != 0 PF lsl, DUMMY, DST_STRIDE, #dst_bpp_shift PF ldrsb, DUMMY, [PF_DST, DUMMY] PF add, PF_DST, PF_DST, #1 .endif .if mask_bpp_shift >= 0 PF lsl, DUMMY, MASK_STRIDE, #mask_bpp_shift PF ldrsb, DUMMY, [PF_MASK, DUMMY] PF add, PF_MASK, PF_MASK, #1 .endif 72: .endif .endm .macro cache_preload_simple .if (PREFETCH_TYPE_CURRENT == PREFETCH_TYPE_SIMPLE) .if src_bpp > 0 prfm PREFETCH_MODE, [SRC, #(PREFETCH_DISTANCE_SIMPLE * src_bpp / 8)] .endif .if dst_r_bpp > 0 prfm PREFETCH_MODE, [DST_R, #(PREFETCH_DISTANCE_SIMPLE * dst_r_bpp / 8)] .endif .if mask_bpp > 0 prfm PREFETCH_MODE, [MASK, #(PREFETCH_DISTANCE_SIMPLE * mask_bpp / 8)] .endif .endif .endm .macro fetch_mask_pixblock pixld pixblock_size, mask_bpp, \ (mask_basereg - pixblock_size * mask_bpp / 64), MASK .endm /* * Macro which is used to process leading pixels until destination * pointer is properly aligned (at 16 bytes boundary). When destination * buffer uses 16bpp format, this is unnecessary, or even pointless. */ .macro ensure_destination_ptr_alignment process_pixblock_head, \ process_pixblock_tail, \ process_pixblock_tail_head .if dst_w_bpp != 24 tst DST_R, #0xF beq 52f .if src_bpp > 0 || mask_bpp > 0 || dst_r_bpp > 0 .irp lowbit, 1, 2, 4, 8, 16 .if (dst_w_bpp <= (\lowbit * 8)) && ((\lowbit * 8) < (pixblock_size * dst_w_bpp)) .if \lowbit < 16 /* we don't need more than 16-byte alignment */ tst DST_R, #\lowbit beq 51f .endif pixld_src (\lowbit * 8 / dst_w_bpp), src_bpp, src_basereg, SRC pixld (\lowbit * 8 / dst_w_bpp), mask_bpp, mask_basereg, MASK .if dst_r_bpp > 0 pixld_a (\lowbit * 8 / dst_r_bpp), dst_r_bpp, dst_r_basereg, DST_R .else add DST_R, DST_R, #\lowbit .endif PF add, PF_X, PF_X, #(\lowbit * 8 / dst_w_bpp) sub W, W, #(\lowbit * 8 / dst_w_bpp) 51: .endif .endr .endif pixdeinterleave src_bpp, src_basereg pixdeinterleave mask_bpp, mask_basereg pixdeinterleave dst_r_bpp, dst_r_basereg \process_pixblock_head cache_preload 0, pixblock_size cache_preload_simple \process_pixblock_tail pixinterleave dst_w_bpp, dst_w_basereg .irp lowbit, 1, 2, 4, 8, 16 .if (dst_w_bpp <= (\lowbit * 8)) && ((\lowbit * 8) < (pixblock_size * dst_w_bpp)) .if \lowbit < 16 /* we don't need more than 16-byte alignment */ tst DST_W, #\lowbit beq 51f .endif .if src_bpp == 0 && mask_bpp == 0 && dst_r_bpp == 0 sub W, W, #(\lowbit * 8 / dst_w_bpp) .endif pixst_a (\lowbit * 8 / dst_w_bpp), dst_w_bpp, dst_w_basereg, DST_W 51: .endif .endr .endif 52: .endm /* * Special code for processing up to (pixblock_size - 1) remaining * trailing pixels. As SIMD processing performs operation on * pixblock_size pixels, anything smaller than this has to be loaded * and stored in a special way. Loading and storing of pixel data is * performed in such a way that we fill some 'slots' in the NEON * registers (some slots naturally are unused), then perform compositing * operation as usual. In the end, the data is taken from these 'slots' * and saved to memory. * * cache_preload_flag - allows to suppress prefetch if * set to 0 * dst_aligned_flag - selects whether destination buffer * is aligned */ .macro process_trailing_pixels cache_preload_flag, \ dst_aligned_flag, \ process_pixblock_head, \ process_pixblock_tail, \ process_pixblock_tail_head tst W, #(pixblock_size - 1) beq 52f .if src_bpp > 0 || mask_bpp > 0 || dst_r_bpp > 0 .irp chunk_size, 16, 8, 4, 2, 1 .if pixblock_size > \chunk_size tst W, #\chunk_size beq 51f pixld_src \chunk_size, src_bpp, src_basereg, SRC pixld \chunk_size, mask_bpp, mask_basereg, MASK .if \dst_aligned_flag != 0 pixld_a \chunk_size, dst_r_bpp, dst_r_basereg, DST_R .else pixld \chunk_size, dst_r_bpp, dst_r_basereg, DST_R .endif .if \cache_preload_flag != 0 PF add, PF_X, PF_X, #\chunk_size .endif 51: .endif .endr .endif pixdeinterleave src_bpp, src_basereg pixdeinterleave mask_bpp, mask_basereg pixdeinterleave dst_r_bpp, dst_r_basereg \process_pixblock_head .if \cache_preload_flag != 0 cache_preload 0, pixblock_size cache_preload_simple .endif \process_pixblock_tail pixinterleave dst_w_bpp, dst_w_basereg .irp chunk_size, 16, 8, 4, 2, 1 .if pixblock_size > \chunk_size tst W, #\chunk_size beq 51f .if \dst_aligned_flag != 0 pixst_a \chunk_size, dst_w_bpp, dst_w_basereg, DST_W .else pixst \chunk_size, dst_w_bpp, dst_w_basereg, DST_W .endif 51: .endif .endr 52: .endm /* * Macro, which performs all the needed operations to switch to the next * scanline and start the next loop iteration unless all the scanlines * are already processed. */ .macro advance_to_next_scanline start_of_loop_label mov W, ORIG_W add DST_W, DST_W, DST_STRIDE, lsl #dst_bpp_shift .if src_bpp != 0 add SRC, SRC, SRC_STRIDE, lsl #src_bpp_shift .endif .if mask_bpp != 0 add MASK, MASK, MASK_STRIDE, lsl #mask_bpp_shift .endif .if (dst_w_bpp != 24) sub DST_W, DST_W, W, lsl #dst_bpp_shift .endif .if (src_bpp != 24) && (src_bpp != 0) sub SRC, SRC, W, lsl #src_bpp_shift .endif .if (mask_bpp != 24) && (mask_bpp != 0) sub MASK, MASK, W, lsl #mask_bpp_shift .endif subs H, H, #1 mov DST_R, DST_W bge \start_of_loop_label .endm /* * Registers are allocated in the following way by default: * v0, v1, v2, v3 - reserved for loading source pixel data * v4, v5, v6, v7 - reserved for loading destination pixel data * v24, v25, v26, v27 - reserved for loading mask pixel data * v28, v29, v30, v31 - final destination pixel data for writeback to memory */ .macro generate_composite_function fname, \ src_bpp_, \ mask_bpp_, \ dst_w_bpp_, \ flags, \ pixblock_size_, \ prefetch_distance, \ init, \ cleanup, \ process_pixblock_head, \ process_pixblock_tail, \ process_pixblock_tail_head, \ dst_w_basereg_ = 28, \ dst_r_basereg_ = 4, \ src_basereg_ = 0, \ mask_basereg_ = 24 pixman_asm_function \fname stp x29, x30, [sp, -16]! mov x29, sp sub sp, sp, 232 /* push all registers */ sub x29, x29, 64 st1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], #32 st1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], #32 stp x8, x9, [x29, -80] stp x10, x11, [x29, -96] stp x12, x13, [x29, -112] stp x14, x15, [x29, -128] stp x16, x17, [x29, -144] stp x18, x19, [x29, -160] stp x20, x21, [x29, -176] stp x22, x23, [x29, -192] stp x24, x25, [x29, -208] stp x26, x27, [x29, -224] str x28, [x29, -232] /* * Select prefetch type for this function. If prefetch distance is * set to 0 or one of the color formats is 24bpp, SIMPLE prefetch * has to be used instead of ADVANCED. */ .set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_DEFAULT .if \prefetch_distance == 0 .set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_NONE .elseif (PREFETCH_TYPE_CURRENT > PREFETCH_TYPE_SIMPLE) && \ ((\src_bpp_ == 24) || (\mask_bpp_ == 24) || (\dst_w_bpp_ == 24)) .set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_SIMPLE .endif /* * Make some macro arguments globally visible and accessible * from other macros */ .set src_bpp, \src_bpp_ .set mask_bpp, \mask_bpp_ .set dst_w_bpp, \dst_w_bpp_ .set pixblock_size, \pixblock_size_ .set dst_w_basereg, \dst_w_basereg_ .set dst_r_basereg, \dst_r_basereg_ .set src_basereg, \src_basereg_ .set mask_basereg, \mask_basereg_ .macro pixld_src x:vararg pixld \x .endm .macro fetch_src_pixblock pixld_src pixblock_size, src_bpp, \ (src_basereg - pixblock_size * src_bpp / 64), SRC .endm /* * Assign symbolic names to registers */ W .req x0 /* width (is updated during processing) */ H .req x1 /* height (is updated during processing) */ DST_W .req x2 /* destination buffer pointer for writes */ DST_STRIDE .req x3 /* destination image stride */ SRC .req x4 /* source buffer pointer */ SRC_STRIDE .req x5 /* source image stride */ MASK .req x6 /* mask pointer */ MASK_STRIDE .req x7 /* mask stride */ DST_R .req x8 /* destination buffer pointer for reads */ PF_CTL .req x9 /* combined lines counter and prefetch */ /* distance increment counter */ PF_X .req x10 /* pixel index in a scanline for current */ /* pretetch position */ PF_SRC .req x11 /* pointer to source scanline start */ /* for prefetch purposes */ PF_DST .req x12 /* pointer to destination scanline start */ /* for prefetch purposes */ PF_MASK .req x13 /* pointer to mask scanline start */ /* for prefetch purposes */ ORIG_W .req x14 /* saved original width */ DUMMY .req x15 /* temporary register */ sxtw x0, w0 sxtw x1, w1 sxtw x3, w3 sxtw x5, w5 sxtw x7, w7 .set mask_bpp_shift, -1 .if src_bpp == 32 .set src_bpp_shift, 2 .elseif src_bpp == 24 .set src_bpp_shift, 0 .elseif src_bpp == 16 .set src_bpp_shift, 1 .elseif src_bpp == 8 .set src_bpp_shift, 0 .elseif src_bpp == 0 .set src_bpp_shift, -1 .else .error "requested src bpp (src_bpp) is not supported" .endif .if mask_bpp == 32 .set mask_bpp_shift, 2 .elseif mask_bpp == 24 .set mask_bpp_shift, 0 .elseif mask_bpp == 8 .set mask_bpp_shift, 0 .elseif mask_bpp == 0 .set mask_bpp_shift, -1 .else .error "requested mask bpp (mask_bpp) is not supported" .endif .if dst_w_bpp == 32 .set dst_bpp_shift, 2 .elseif dst_w_bpp == 24 .set dst_bpp_shift, 0 .elseif dst_w_bpp == 16 .set dst_bpp_shift, 1 .elseif dst_w_bpp == 8 .set dst_bpp_shift, 0 .else .error "requested dst bpp (dst_w_bpp) is not supported" .endif .if (((\flags) & FLAG_DST_READWRITE) != 0) .set dst_r_bpp, dst_w_bpp .else .set dst_r_bpp, 0 .endif .if (((\flags) & FLAG_DEINTERLEAVE_32BPP) != 0) .set DEINTERLEAVE_32BPP_ENABLED, 1 .else .set DEINTERLEAVE_32BPP_ENABLED, 0 .endif .if \prefetch_distance < 0 || \prefetch_distance > 15 .error "invalid prefetch distance (\prefetch_distance)" .endif PF mov, PF_X, #0 mov DST_R, DST_W .if src_bpp == 24 sub SRC_STRIDE, SRC_STRIDE, W sub SRC_STRIDE, SRC_STRIDE, W, lsl #1 .endif .if mask_bpp == 24 sub MASK_STRIDE, MASK_STRIDE, W sub MASK_STRIDE, MASK_STRIDE, W, lsl #1 .endif .if dst_w_bpp == 24 sub DST_STRIDE, DST_STRIDE, W sub DST_STRIDE, DST_STRIDE, W, lsl #1 .endif /* * Setup advanced prefetcher initial state */ PF mov, PF_SRC, SRC PF mov, PF_DST, DST_R PF mov, PF_MASK, MASK /* PF_CTL = \prefetch_distance | ((h - 1) << 4) */ PF lsl, DUMMY, H, #4 PF mov, PF_CTL, DUMMY PF add, PF_CTL, PF_CTL, #(\prefetch_distance - 0x10) \init subs H, H, #1 mov ORIG_W, W blt 9f cmp W, #(pixblock_size * 2) blt 800f /* * This is the start of the pipelined loop, which if optimized for * long scanlines */ 0: ensure_destination_ptr_alignment \process_pixblock_head, \ \process_pixblock_tail, \ \process_pixblock_tail_head /* Implement "head (tail_head) ... (tail_head) tail" loop pattern */ pixld_a pixblock_size, dst_r_bpp, \ (dst_r_basereg - pixblock_size * dst_r_bpp / 64), DST_R fetch_src_pixblock pixld pixblock_size, mask_bpp, \ (mask_basereg - pixblock_size * mask_bpp / 64), MASK PF add, PF_X, PF_X, #pixblock_size \process_pixblock_head cache_preload 0, pixblock_size cache_preload_simple subs W, W, #(pixblock_size * 2) blt 200f 100: \process_pixblock_tail_head cache_preload_simple subs W, W, #pixblock_size bge 100b 200: \process_pixblock_tail pixst_a pixblock_size, dst_w_bpp, \ (dst_w_basereg - pixblock_size * dst_w_bpp / 64), DST_W /* Process the remaining trailing pixels in the scanline */ process_trailing_pixels 1, 1, \ \process_pixblock_head, \ \process_pixblock_tail, \ \process_pixblock_tail_head advance_to_next_scanline 0b \cleanup 1000: /* pop all registers */ sub x29, x29, 64 ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], 32 ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], 32 ldp x8, x9, [x29, -80] ldp x10, x11, [x29, -96] ldp x12, x13, [x29, -112] ldp x14, x15, [x29, -128] ldp x16, x17, [x29, -144] ldp x18, x19, [x29, -160] ldp x20, x21, [x29, -176] ldp x22, x23, [x29, -192] ldp x24, x25, [x29, -208] ldp x26, x27, [x29, -224] ldr x28, [x29, -232] mov sp, x29 ldp x29, x30, [sp], 16 VERIFY_LR ret /* exit */ /* * This is the start of the loop, designed to process images with small width * (less than pixblock_size * 2 pixels). In this case neither pipelining * nor prefetch are used. */ 800: .if src_bpp_shift >= 0 PF lsl, DUMMY, SRC_STRIDE, #src_bpp_shift PF prfm, PREFETCH_MODE, [SRC, DUMMY] .endif .if dst_r_bpp != 0 PF lsl, DUMMY, DST_STRIDE, #dst_bpp_shift PF prfm, PREFETCH_MODE, [DST_R, DUMMY] .endif .if mask_bpp_shift >= 0 PF lsl, DUMMY, MASK_STRIDE, #mask_bpp_shift PF prfm, PREFETCH_MODE, [MASK, DUMMY] .endif /* Process exactly pixblock_size pixels if needed */ tst W, #pixblock_size beq 100f pixld pixblock_size, dst_r_bpp, \ (dst_r_basereg - pixblock_size * dst_r_bpp / 64), DST_R fetch_src_pixblock pixld pixblock_size, mask_bpp, \ (mask_basereg - pixblock_size * mask_bpp / 64), MASK \process_pixblock_head \process_pixblock_tail pixst pixblock_size, dst_w_bpp, \ (dst_w_basereg - pixblock_size * dst_w_bpp / 64), DST_W 100: /* Process the remaining trailing pixels in the scanline */ process_trailing_pixels 0, 0, \ \process_pixblock_head, \ \process_pixblock_tail, \ \process_pixblock_tail_head advance_to_next_scanline 800b 9: \cleanup /* pop all registers */ sub x29, x29, 64 ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], 32 ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], 32 ldp x8, x9, [x29, -80] ldp x10, x11, [x29, -96] ldp x12, x13, [x29, -112] ldp x14, x15, [x29, -128] ldp x16, x17, [x29, -144] ldp x18, x19, [x29, -160] ldp x20, x21, [x29, -176] ldp x22, x23, [x29, -192] ldp x24, x25, [x29, -208] ldp x26, x27, [x29, -224] ldr x28, [x29, -232] mov sp, x29 ldp x29, x30, [sp], 16 VERIFY_LR ret /* exit */ .purgem fetch_src_pixblock .purgem pixld_src .unreq SRC .unreq MASK .unreq DST_R .unreq DST_W .unreq ORIG_W .unreq W .unreq H .unreq SRC_STRIDE .unreq DST_STRIDE .unreq MASK_STRIDE .unreq PF_CTL .unreq PF_X .unreq PF_SRC .unreq PF_DST .unreq PF_MASK .unreq DUMMY pixman_end_asm_function .endm /* * A simplified variant of function generation template for a single * scanline processing (for implementing pixman combine functions) */ .macro generate_composite_function_scanline use_nearest_scaling, \ fname, \ src_bpp_, \ mask_bpp_, \ dst_w_bpp_, \ flags, \ pixblock_size_, \ init, \ cleanup, \ process_pixblock_head, \ process_pixblock_tail, \ process_pixblock_tail_head, \ dst_w_basereg_ = 28, \ dst_r_basereg_ = 4, \ src_basereg_ = 0, \ mask_basereg_ = 24 pixman_asm_function \fname .set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_NONE /* * Make some macro arguments globally visible and accessible * from other macros */ .set src_bpp, \src_bpp_ .set mask_bpp, \mask_bpp_ .set dst_w_bpp, \dst_w_bpp_ .set pixblock_size, \pixblock_size_ .set dst_w_basereg, \dst_w_basereg_ .set dst_r_basereg, \dst_r_basereg_ .set src_basereg, \src_basereg_ .set mask_basereg, \mask_basereg_ .if \use_nearest_scaling != 0 /* * Assign symbolic names to registers for nearest scaling */ W .req x0 DST_W .req x1 SRC .req x2 VX .req x3 UNIT_X .req x4 SRC_WIDTH_FIXED .req x5 MASK .req x6 TMP1 .req x8 TMP2 .req x9 DST_R .req x10 DUMMY .req x30 .macro pixld_src x:vararg pixld_s \x .endm sxtw x0, w0 sxtw x3, w3 sxtw x4, w4 sxtw x5, w5 stp x29, x30, [sp, -16]! mov x29, sp sub sp, sp, 88 sub x29, x29, 64 st1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], 32 st1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], 32 stp x8, x9, [x29, -80] str x10, [x29, -88] .else /* * Assign symbolic names to registers */ W .req x0 /* width (is updated during processing) */ DST_W .req x1 /* destination buffer pointer for writes */ SRC .req x2 /* source buffer pointer */ MASK .req x3 /* mask pointer */ DST_R .req x4 /* destination buffer pointer for reads */ DUMMY .req x30 .macro pixld_src x:vararg pixld \x .endm sxtw x0, w0 stp x29, x30, [sp, -16]! mov x29, sp sub sp, sp, 64 sub x29, x29, 64 st1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], 32 st1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], 32 .endif .if (((\flags) & FLAG_DST_READWRITE) != 0) .set dst_r_bpp, dst_w_bpp .else .set dst_r_bpp, 0 .endif .if (((\flags) & FLAG_DEINTERLEAVE_32BPP) != 0) .set DEINTERLEAVE_32BPP_ENABLED, 1 .else .set DEINTERLEAVE_32BPP_ENABLED, 0 .endif .macro fetch_src_pixblock pixld_src pixblock_size, src_bpp, \ (src_basereg - pixblock_size * src_bpp / 64), SRC .endm \init mov DST_R, DST_W cmp W, #pixblock_size blt 800f ensure_destination_ptr_alignment \process_pixblock_head, \ \process_pixblock_tail, \ \process_pixblock_tail_head subs W, W, #pixblock_size blt 700f /* Implement "head (tail_head) ... (tail_head) tail" loop pattern */ pixld_a pixblock_size, dst_r_bpp, \ (dst_r_basereg - pixblock_size * dst_r_bpp / 64), DST_R fetch_src_pixblock pixld pixblock_size, mask_bpp, \ (mask_basereg - pixblock_size * mask_bpp / 64), MASK \process_pixblock_head subs W, W, #pixblock_size blt 200f 100: \process_pixblock_tail_head subs W, W, #pixblock_size bge 100b 200: \process_pixblock_tail pixst_a pixblock_size, dst_w_bpp, \ (dst_w_basereg - pixblock_size * dst_w_bpp / 64), DST_W 700: /* Process the remaining trailing pixels in the scanline (dst aligned) */ process_trailing_pixels 0, 1, \ \process_pixblock_head, \ \process_pixblock_tail, \ \process_pixblock_tail_head \cleanup .if \use_nearest_scaling != 0 sub x29, x29, 64 ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], 32 ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], 32 ldp x8, x9, [x29, -80] ldr x10, [x29, -96] mov sp, x29 ldp x29, x30, [sp], 16 VERIFY_LR ret /* exit */ .else sub x29, x29, 64 ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], 32 ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], 32 mov sp, x29 ldp x29, x30, [sp], 16 VERIFY_LR ret /* exit */ .endif 800: /* Process the remaining trailing pixels in the scanline (dst unaligned) */ process_trailing_pixels 0, 0, \ \process_pixblock_head, \ \process_pixblock_tail, \ \process_pixblock_tail_head \cleanup .if \use_nearest_scaling != 0 sub x29, x29, 64 ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], 32 ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], 32 ldp x8, x9, [x29, -80] ldr x10, [x29, -88] mov sp, x29 ldp x29, x30, [sp], 16 VERIFY_LR ret /* exit */ .unreq DUMMY .unreq DST_R .unreq SRC .unreq W .unreq VX .unreq UNIT_X .unreq TMP1 .unreq TMP2 .unreq DST_W .unreq MASK .unreq SRC_WIDTH_FIXED .else sub x29, x29, 64 ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], 32 ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], 32 mov sp, x29 ldp x29, x30, [sp], 16 VERIFY_LR ret /* exit */ .unreq DUMMY .unreq SRC .unreq MASK .unreq DST_R .unreq DST_W .unreq W .endif .purgem fetch_src_pixblock .purgem pixld_src pixman_end_asm_function .endm .macro generate_composite_function_single_scanline x:vararg generate_composite_function_scanline 0, \x .endm .macro generate_composite_function_nearest_scanline x:vararg generate_composite_function_scanline 1, \x .endm /* Default prologue/epilogue, nothing special needs to be done */ .macro default_init .endm .macro default_cleanup .endm /* * Prologue/epilogue variant which additionally saves/restores v8-v15 * registers (they need to be saved/restored by callee according to ABI). * This is required if the code needs to use all the NEON registers. */ .macro default_init_need_all_regs .endm .macro default_cleanup_need_all_regs .endm /******************************************************************************/ /* * Conversion of 8 r5g6b6 pixels packed in 128-bit register (in) * into a planar a8r8g8b8 format (with a, r, g, b color components * stored into 64-bit registers out_a, out_r, out_g, out_b respectively). * * Warning: the conversion is destructive and the original * value (in) is lost. */ .macro convert_0565_to_8888 in, out_a, out_r, out_g, out_b shrn \()\out_r\().8b, \()\in\().8h, #8 shrn \()\out_g\().8b, \()\in\().8h, #3 sli \()\in\().8h, \()\in\().8h, #5 movi \()\out_a\().8b, #255 sri \()\out_r\().8b, \()\out_r\().8b, #5 sri \()\out_g\().8b, \()\out_g\().8b, #6 shrn \()\out_b\().8b, \()\in\().8h, #2 .endm .macro convert_0565_to_x888 in, out_r, out_g, out_b shrn \()\out_r\().8b, \()\in\().8h, #8 shrn \()\out_g\().8b, \()\in\().8h, #3 sli \()\in\().8h, \()\in\().8h, #5 sri \()\out_r\().8b, \()\out_r\().8b, #5 sri \()\out_g\().8b, \()\out_g\().8b, #6 shrn \()\out_b\().8b, \()\in\().8h, #2 .endm /* * Conversion from planar a8r8g8b8 format (with a, r, g, b color components * in 64-bit registers in_a, in_r, in_g, in_b respectively) into 8 r5g6b6 * pixels packed in 128-bit register (out). Requires two temporary 128-bit * registers (tmp1, tmp2) */ .macro convert_8888_to_0565 in_r, in_g, in_b, out, tmp1, tmp2 ushll \()\tmp1\().8h, \()\in_g\().8b, #7 shl \()\tmp1\().8h, \()\tmp1\().8h, #1 ushll \()\out\().8h, \()\in_r\().8b, #7 shl \()\out\().8h, \()\out\().8h, #1 ushll \()\tmp2\().8h, \()\in_b\().8b, #7 shl \()\tmp2\().8h, \()\tmp2\().8h, #1 sri \()\out\().8h, \()\tmp1\().8h, #5 sri \()\out\().8h, \()\tmp2\().8h, #11 .endm /* * Conversion of four r5g6b5 pixels (in) to four x8r8g8b8 pixels * returned in (out0, out1) registers pair. Requires one temporary * 64-bit register (tmp). 'out1' and 'in' may overlap, the original * value from 'in' is lost */ .macro convert_four_0565_to_x888_packed in, out0, out1, tmp shl \()\out0\().4h, \()\in\().4h, #5 /* G top 6 bits */ shl \()\tmp\().4h, \()\in\().4h, #11 /* B top 5 bits */ sri \()\in\().4h, \()\in\().4h, #5 /* R is ready \in top bits */ sri \()\out0\().4h, \()\out0\().4h, #6 /* G is ready \in top bits */ sri \()\tmp\().4h, \()\tmp\().4h, #5 /* B is ready \in top bits */ ushr \()\out1\().4h, \()\in\().4h, #8 /* R is \in place */ sri \()\out0\().4h, \()\tmp\().4h, #8 /* G \() B is \in place */ zip1 \()\tmp\().4h, \()\out0\().4h, \()\out1\().4h /* everything is \in place */ zip2 \()\out1\().4h, \()\out0\().4h, \()\out1\().4h mov \()\out0\().d[0], \()\tmp\().d[0] .endm ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-bits-image.c0000664000175000017500000010612514712446423020076 0ustar00mattst88mattst88/* * Copyright Âİ 2000 Keith Packard, member of The XFree86 Project, Inc. * 2005 Lars Knoll & Zack Rusin, Trolltech * 2008 Aaron Plattner, NVIDIA Corporation * Copyright Âİ 2000 SuSE, Inc. * Copyright Âİ 2007, 2009 Red Hat, Inc. * Copyright Âİ 2008 Andrİ TupinambĦ * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of Keith Packard not be used in * advertising or publicity pertaining to distribution of the software without * specific, written prior permission. Keith Packard makes no * representations about the suitability of this software for any purpose. It * is provided "as is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS * SOFTWARE. */ #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include "pixman-private.h" #include "pixman-combine32.h" #include "pixman-inlines.h" #include "dither/blue-noise-64x64.h" /* Fetch functions */ static force_inline void fetch_pixel_no_alpha_32 (bits_image_t *image, int x, int y, pixman_bool_t check_bounds, void *out) { uint32_t *ret = out; if (check_bounds && (x < 0 || x >= image->width || y < 0 || y >= image->height)) *ret = 0; else *ret = image->fetch_pixel_32 (image, x, y); } static force_inline void fetch_pixel_no_alpha_float (bits_image_t *image, int x, int y, pixman_bool_t check_bounds, void *out) { argb_t *ret = out; if (check_bounds && (x < 0 || x >= image->width || y < 0 || y >= image->height)) ret->a = ret->r = ret->g = ret->b = 0.f; else *ret = image->fetch_pixel_float (image, x, y); } typedef void (* get_pixel_t) (bits_image_t *image, int x, int y, pixman_bool_t check_bounds, void *out); static force_inline void bits_image_fetch_pixel_nearest (bits_image_t *image, pixman_fixed_t x, pixman_fixed_t y, get_pixel_t get_pixel, void *out) { int x0 = pixman_fixed_to_int (x - pixman_fixed_e); int y0 = pixman_fixed_to_int (y - pixman_fixed_e); if (image->common.repeat != PIXMAN_REPEAT_NONE) { repeat (image->common.repeat, &x0, image->width); repeat (image->common.repeat, &y0, image->height); get_pixel (image, x0, y0, FALSE, out); } else { get_pixel (image, x0, y0, TRUE, out); } } static force_inline void bits_image_fetch_pixel_bilinear_32 (bits_image_t *image, pixman_fixed_t x, pixman_fixed_t y, get_pixel_t get_pixel, void *out) { pixman_repeat_t repeat_mode = image->common.repeat; int width = image->width; int height = image->height; int x1, y1, x2, y2; uint32_t tl, tr, bl, br; int32_t distx, disty; uint32_t *ret = out; x1 = x - pixman_fixed_1 / 2; y1 = y - pixman_fixed_1 / 2; distx = pixman_fixed_to_bilinear_weight (x1); disty = pixman_fixed_to_bilinear_weight (y1); x1 = pixman_fixed_to_int (x1); y1 = pixman_fixed_to_int (y1); x2 = x1 + 1; y2 = y1 + 1; if (repeat_mode != PIXMAN_REPEAT_NONE) { repeat (repeat_mode, &x1, width); repeat (repeat_mode, &y1, height); repeat (repeat_mode, &x2, width); repeat (repeat_mode, &y2, height); get_pixel (image, x1, y1, FALSE, &tl); get_pixel (image, x2, y1, FALSE, &tr); get_pixel (image, x1, y2, FALSE, &bl); get_pixel (image, x2, y2, FALSE, &br); } else { get_pixel (image, x1, y1, TRUE, &tl); get_pixel (image, x2, y1, TRUE, &tr); get_pixel (image, x1, y2, TRUE, &bl); get_pixel (image, x2, y2, TRUE, &br); } *ret = bilinear_interpolation (tl, tr, bl, br, distx, disty); } static force_inline void bits_image_fetch_pixel_bilinear_float (bits_image_t *image, pixman_fixed_t x, pixman_fixed_t y, get_pixel_t get_pixel, void *out) { pixman_repeat_t repeat_mode = image->common.repeat; int width = image->width; int height = image->height; int x1, y1, x2, y2; argb_t tl, tr, bl, br; float distx, disty; argb_t *ret = out; x1 = x - pixman_fixed_1 / 2; y1 = y - pixman_fixed_1 / 2; distx = ((float)pixman_fixed_fraction(x1)) / 65536.f; disty = ((float)pixman_fixed_fraction(y1)) / 65536.f; x1 = pixman_fixed_to_int (x1); y1 = pixman_fixed_to_int (y1); x2 = x1 + 1; y2 = y1 + 1; if (repeat_mode != PIXMAN_REPEAT_NONE) { repeat (repeat_mode, &x1, width); repeat (repeat_mode, &y1, height); repeat (repeat_mode, &x2, width); repeat (repeat_mode, &y2, height); get_pixel (image, x1, y1, FALSE, &tl); get_pixel (image, x2, y1, FALSE, &tr); get_pixel (image, x1, y2, FALSE, &bl); get_pixel (image, x2, y2, FALSE, &br); } else { get_pixel (image, x1, y1, TRUE, &tl); get_pixel (image, x2, y1, TRUE, &tr); get_pixel (image, x1, y2, TRUE, &bl); get_pixel (image, x2, y2, TRUE, &br); } *ret = bilinear_interpolation_float (tl, tr, bl, br, distx, disty); } static force_inline void accum_32(unsigned int *satot, unsigned int *srtot, unsigned int *sgtot, unsigned int *sbtot, const void *p, pixman_fixed_t f) { uint32_t pixel = *(uint32_t *)p; *srtot += (int)RED_8 (pixel) * f; *sgtot += (int)GREEN_8 (pixel) * f; *sbtot += (int)BLUE_8 (pixel) * f; *satot += (int)ALPHA_8 (pixel) * f; } static force_inline void reduce_32(unsigned int satot, unsigned int srtot, unsigned int sgtot, unsigned int sbtot, void *p) { uint32_t *ret = p; satot = (int32_t)(satot + 0x8000) / 65536; srtot = (int32_t)(srtot + 0x8000) / 65536; sgtot = (int32_t)(sgtot + 0x8000) / 65536; sbtot = (int32_t)(sbtot + 0x8000) / 65536; satot = CLIP ((int32_t)satot, 0, 0xff); srtot = CLIP ((int32_t)srtot, 0, 0xff); sgtot = CLIP ((int32_t)sgtot, 0, 0xff); sbtot = CLIP ((int32_t)sbtot, 0, 0xff); *ret = ((satot << 24) | (srtot << 16) | (sgtot << 8) | (sbtot)); } static force_inline void accum_float(unsigned int *satot, unsigned int *srtot, unsigned int *sgtot, unsigned int *sbtot, const void *p, pixman_fixed_t f) { const argb_t *pixel = p; *satot += pixel->a * f; *srtot += pixel->r * f; *sgtot += pixel->g * f; *sbtot += pixel->b * f; } static force_inline void reduce_float(unsigned int satot, unsigned int srtot, unsigned int sgtot, unsigned int sbtot, void *p) { argb_t *ret = p; ret->a = CLIP ((int32_t)satot / 65536.f, 0.f, 1.f); ret->r = CLIP ((int32_t)srtot / 65536.f, 0.f, 1.f); ret->g = CLIP ((int32_t)sgtot / 65536.f, 0.f, 1.f); ret->b = CLIP ((int32_t)sbtot / 65536.f, 0.f, 1.f); } typedef void (* accumulate_pixel_t) (unsigned int *satot, unsigned int *srtot, unsigned int *sgtot, unsigned int *sbtot, const void *pixel, pixman_fixed_t f); typedef void (* reduce_pixel_t) (unsigned int satot, unsigned int srtot, unsigned int sgtot, unsigned int sbtot, void *out); static force_inline void bits_image_fetch_pixel_convolution (bits_image_t *image, pixman_fixed_t x, pixman_fixed_t y, get_pixel_t get_pixel, void *out, accumulate_pixel_t accum, reduce_pixel_t reduce) { pixman_fixed_t *params = image->common.filter_params; int x_off = (params[0] - pixman_fixed_1) >> 1; int y_off = (params[1] - pixman_fixed_1) >> 1; int32_t cwidth = pixman_fixed_to_int (params[0]); int32_t cheight = pixman_fixed_to_int (params[1]); int32_t i, j, x1, x2, y1, y2; pixman_repeat_t repeat_mode = image->common.repeat; int width = image->width; int height = image->height; unsigned int srtot, sgtot, sbtot, satot; params += 2; x1 = pixman_fixed_to_int (x - pixman_fixed_e - x_off); y1 = pixman_fixed_to_int (y - pixman_fixed_e - y_off); x2 = x1 + cwidth; y2 = y1 + cheight; srtot = sgtot = sbtot = satot = 0; for (i = y1; i < y2; ++i) { for (j = x1; j < x2; ++j) { int rx = j; int ry = i; pixman_fixed_t f = *params; if (f) { /* Must be big enough to hold a argb_t */ argb_t pixel; if (repeat_mode != PIXMAN_REPEAT_NONE) { repeat (repeat_mode, &rx, width); repeat (repeat_mode, &ry, height); get_pixel (image, rx, ry, FALSE, &pixel); } else { get_pixel (image, rx, ry, TRUE, &pixel); } accum (&satot, &srtot, &sgtot, &sbtot, &pixel, f); } params++; } } reduce (satot, srtot, sgtot, sbtot, out); } static void bits_image_fetch_pixel_separable_convolution (bits_image_t *image, pixman_fixed_t x, pixman_fixed_t y, get_pixel_t get_pixel, void *out, accumulate_pixel_t accum, reduce_pixel_t reduce) { pixman_fixed_t *params = image->common.filter_params; pixman_repeat_t repeat_mode = image->common.repeat; int width = image->width; int height = image->height; int cwidth = pixman_fixed_to_int (params[0]); int cheight = pixman_fixed_to_int (params[1]); int x_phase_bits = pixman_fixed_to_int (params[2]); int y_phase_bits = pixman_fixed_to_int (params[3]); int x_phase_shift = 16 - x_phase_bits; int y_phase_shift = 16 - y_phase_bits; int x_off = ((cwidth << 16) - pixman_fixed_1) >> 1; int y_off = ((cheight << 16) - pixman_fixed_1) >> 1; pixman_fixed_t *y_params; unsigned int srtot, sgtot, sbtot, satot; int32_t x1, x2, y1, y2; int32_t px, py; int i, j; /* Round x and y to the middle of the closest phase before continuing. This * ensures that the convolution matrix is aligned right, since it was * positioned relative to a particular phase (and not relative to whatever * exact fraction we happen to get here). */ x = ((x >> x_phase_shift) << x_phase_shift) + ((1 << x_phase_shift) >> 1); y = ((y >> y_phase_shift) << y_phase_shift) + ((1 << y_phase_shift) >> 1); px = (x & 0xffff) >> x_phase_shift; py = (y & 0xffff) >> y_phase_shift; y_params = params + 4 + (1 << x_phase_bits) * cwidth + py * cheight; x1 = pixman_fixed_to_int (x - pixman_fixed_e - x_off); y1 = pixman_fixed_to_int (y - pixman_fixed_e - y_off); x2 = x1 + cwidth; y2 = y1 + cheight; srtot = sgtot = sbtot = satot = 0; for (i = y1; i < y2; ++i) { pixman_fixed_48_16_t fy = *y_params++; pixman_fixed_t *x_params = params + 4 + px * cwidth; if (fy) { for (j = x1; j < x2; ++j) { pixman_fixed_t fx = *x_params++; int rx = j; int ry = i; if (fx) { /* Must be big enough to hold a argb_t */ argb_t pixel; pixman_fixed_t f; if (repeat_mode != PIXMAN_REPEAT_NONE) { repeat (repeat_mode, &rx, width); repeat (repeat_mode, &ry, height); get_pixel (image, rx, ry, FALSE, &pixel); } else { get_pixel (image, rx, ry, TRUE, &pixel); } f = (fy * fx + 0x8000) >> 16; accum(&satot, &srtot, &sgtot, &sbtot, &pixel, f); } } } } reduce(satot, srtot, sgtot, sbtot, out); } static force_inline void bits_image_fetch_pixel_filtered (bits_image_t *image, pixman_bool_t wide, pixman_fixed_t x, pixman_fixed_t y, get_pixel_t get_pixel, void *out) { switch (image->common.filter) { case PIXMAN_FILTER_NEAREST: case PIXMAN_FILTER_FAST: bits_image_fetch_pixel_nearest (image, x, y, get_pixel, out); break; case PIXMAN_FILTER_BILINEAR: case PIXMAN_FILTER_GOOD: case PIXMAN_FILTER_BEST: if (wide) bits_image_fetch_pixel_bilinear_float (image, x, y, get_pixel, out); else bits_image_fetch_pixel_bilinear_32 (image, x, y, get_pixel, out); break; case PIXMAN_FILTER_CONVOLUTION: if (wide) { bits_image_fetch_pixel_convolution (image, x, y, get_pixel, out, accum_float, reduce_float); } else { bits_image_fetch_pixel_convolution (image, x, y, get_pixel, out, accum_32, reduce_32); } break; case PIXMAN_FILTER_SEPARABLE_CONVOLUTION: if (wide) { bits_image_fetch_pixel_separable_convolution (image, x, y, get_pixel, out, accum_float, reduce_float); } else { bits_image_fetch_pixel_separable_convolution (image, x, y, get_pixel, out, accum_32, reduce_32); } break; default: assert (0); break; } } static uint32_t * __bits_image_fetch_affine_no_alpha (pixman_iter_t * iter, pixman_bool_t wide, const uint32_t * mask) { pixman_image_t *image = iter->image; int offset = iter->x; int line = iter->y++; int width = iter->width; uint32_t * buffer = iter->buffer; const uint32_t wide_zero[4] = {0}; pixman_fixed_t x, y; pixman_fixed_t ux, uy; pixman_vector_t v; int i; get_pixel_t get_pixel = wide ? fetch_pixel_no_alpha_float : fetch_pixel_no_alpha_32; /* reference point is the center of the pixel */ v.vector[0] = pixman_int_to_fixed (offset) + pixman_fixed_1 / 2; v.vector[1] = pixman_int_to_fixed (line) + pixman_fixed_1 / 2; v.vector[2] = pixman_fixed_1; if (image->common.transform) { if (!pixman_transform_point_3d (image->common.transform, &v)) return iter->buffer; ux = image->common.transform->matrix[0][0]; uy = image->common.transform->matrix[1][0]; } else { ux = pixman_fixed_1; uy = 0; } x = v.vector[0]; y = v.vector[1]; for (i = 0; i < width; ++i) { if (!mask || (!wide && mask[i]) || (wide && memcmp(&mask[4 * i], wide_zero, 16) != 0)) { bits_image_fetch_pixel_filtered ( &image->bits, wide, x, y, get_pixel, buffer); } x += ux; y += uy; buffer += wide ? 4 : 1; } return iter->buffer; } static uint32_t * bits_image_fetch_affine_no_alpha_32 (pixman_iter_t *iter, const uint32_t *mask) { return __bits_image_fetch_affine_no_alpha(iter, FALSE, mask); } static uint32_t * bits_image_fetch_affine_no_alpha_float (pixman_iter_t *iter, const uint32_t *mask) { return __bits_image_fetch_affine_no_alpha(iter, TRUE, mask); } /* General fetcher */ static force_inline void fetch_pixel_general_32 (bits_image_t *image, int x, int y, pixman_bool_t check_bounds, void *out) { uint32_t pixel, *ret = out; if (check_bounds && (x < 0 || x >= image->width || y < 0 || y >= image->height)) { *ret = 0; return; } pixel = image->fetch_pixel_32 (image, x, y); if (image->common.alpha_map) { uint32_t pixel_a; x -= image->common.alpha_origin_x; y -= image->common.alpha_origin_y; if (x < 0 || x >= image->common.alpha_map->width || y < 0 || y >= image->common.alpha_map->height) { pixel_a = 0; } else { pixel_a = image->common.alpha_map->fetch_pixel_32 ( image->common.alpha_map, x, y); pixel_a = ALPHA_8 (pixel_a); } pixel &= 0x00ffffff; pixel |= (pixel_a << 24); } *ret = pixel; } static force_inline void fetch_pixel_general_float (bits_image_t *image, int x, int y, pixman_bool_t check_bounds, void *out) { argb_t *ret = out; if (check_bounds && (x < 0 || x >= image->width || y < 0 || y >= image->height)) { ret->a = ret->r = ret->g = ret->b = 0; return; } *ret = image->fetch_pixel_float (image, x, y); if (image->common.alpha_map) { x -= image->common.alpha_origin_x; y -= image->common.alpha_origin_y; if (x < 0 || x >= image->common.alpha_map->width || y < 0 || y >= image->common.alpha_map->height) { ret->a = 0.f; } else { argb_t alpha; alpha = image->common.alpha_map->fetch_pixel_float ( image->common.alpha_map, x, y); ret->a = alpha.a; } } } static uint32_t * __bits_image_fetch_general (pixman_iter_t *iter, pixman_bool_t wide, const uint32_t *mask) { pixman_image_t *image = iter->image; int offset = iter->x; int line = iter->y++; int width = iter->width; uint32_t * buffer = iter->buffer; get_pixel_t get_pixel = wide ? fetch_pixel_general_float : fetch_pixel_general_32; const uint32_t wide_zero[4] = {0}; pixman_fixed_t x, y, w; pixman_fixed_t ux, uy, uw; pixman_vector_t v; int i; /* reference point is the center of the pixel */ v.vector[0] = pixman_int_to_fixed (offset) + pixman_fixed_1 / 2; v.vector[1] = pixman_int_to_fixed (line) + pixman_fixed_1 / 2; v.vector[2] = pixman_fixed_1; if (image->common.transform) { if (!pixman_transform_point_3d (image->common.transform, &v)) return buffer; ux = image->common.transform->matrix[0][0]; uy = image->common.transform->matrix[1][0]; uw = image->common.transform->matrix[2][0]; } else { ux = pixman_fixed_1; uy = 0; uw = 0; } x = v.vector[0]; y = v.vector[1]; w = v.vector[2]; for (i = 0; i < width; ++i) { pixman_fixed_t x0, y0; if (!mask || (!wide && mask[i]) || (wide && memcmp(&mask[4 * i], wide_zero, 16) != 0)) { if (w != 0) { x0 = ((uint64_t)x << 16) / w; y0 = ((uint64_t)y << 16) / w; } else { x0 = 0; y0 = 0; } bits_image_fetch_pixel_filtered ( &image->bits, wide, x0, y0, get_pixel, buffer); } x += ux; y += uy; w += uw; buffer += wide ? 4 : 1; } return iter->buffer; } static uint32_t * bits_image_fetch_general_32 (pixman_iter_t *iter, const uint32_t *mask) { return __bits_image_fetch_general(iter, FALSE, mask); } static uint32_t * bits_image_fetch_general_float (pixman_iter_t *iter, const uint32_t *mask) { return __bits_image_fetch_general(iter, TRUE, mask); } static void replicate_pixel_32 (bits_image_t * bits, int x, int y, int width, uint32_t * buffer) { uint32_t color; uint32_t *end; color = bits->fetch_pixel_32 (bits, x, y); end = buffer + width; while (buffer < end) *(buffer++) = color; } static void replicate_pixel_float (bits_image_t * bits, int x, int y, int width, uint32_t * b) { argb_t color; argb_t *buffer = (argb_t *)b; argb_t *end; color = bits->fetch_pixel_float (bits, x, y); end = buffer + width; while (buffer < end) *(buffer++) = color; } static void bits_image_fetch_untransformed_repeat_none (bits_image_t *image, pixman_bool_t wide, int x, int y, int width, uint32_t * buffer) { uint32_t w; if (y < 0 || y >= image->height) { memset (buffer, 0, width * (wide? sizeof (argb_t) : 4)); return; } if (x < 0) { w = MIN (width, -x); memset (buffer, 0, w * (wide ? sizeof (argb_t) : 4)); width -= w; buffer += w * (wide? 4 : 1); x += w; } if (x < image->width) { w = MIN (width, image->width - x); if (wide) image->fetch_scanline_float (image, x, y, w, buffer, NULL); else image->fetch_scanline_32 (image, x, y, w, buffer, NULL); width -= w; buffer += w * (wide? 4 : 1); x += w; } memset (buffer, 0, width * (wide ? sizeof (argb_t) : 4)); } static void bits_image_fetch_untransformed_repeat_normal (bits_image_t *image, pixman_bool_t wide, int x, int y, int width, uint32_t * buffer) { uint32_t w; while (y < 0) y += image->height; while (y >= image->height) y -= image->height; if (image->width == 1) { if (wide) replicate_pixel_float (image, 0, y, width, buffer); else replicate_pixel_32 (image, 0, y, width, buffer); return; } while (width) { while (x < 0) x += image->width; while (x >= image->width) x -= image->width; w = MIN (width, image->width - x); if (wide) image->fetch_scanline_float (image, x, y, w, buffer, NULL); else image->fetch_scanline_32 (image, x, y, w, buffer, NULL); buffer += w * (wide? 4 : 1); x += w; width -= w; } } static uint32_t * bits_image_fetch_untransformed_32 (pixman_iter_t * iter, const uint32_t *mask) { pixman_image_t *image = iter->image; int x = iter->x; int y = iter->y; int width = iter->width; uint32_t * buffer = iter->buffer; if (image->common.repeat == PIXMAN_REPEAT_NONE) { bits_image_fetch_untransformed_repeat_none ( &image->bits, FALSE, x, y, width, buffer); } else { bits_image_fetch_untransformed_repeat_normal ( &image->bits, FALSE, x, y, width, buffer); } iter->y++; return buffer; } static uint32_t * bits_image_fetch_untransformed_float (pixman_iter_t * iter, const uint32_t *mask) { pixman_image_t *image = iter->image; int x = iter->x; int y = iter->y; int width = iter->width; uint32_t * buffer = iter->buffer; if (image->common.repeat == PIXMAN_REPEAT_NONE) { bits_image_fetch_untransformed_repeat_none ( &image->bits, TRUE, x, y, width, buffer); } else { bits_image_fetch_untransformed_repeat_normal ( &image->bits, TRUE, x, y, width, buffer); } iter->y++; return buffer; } typedef struct { pixman_format_code_t format; uint32_t flags; pixman_iter_get_scanline_t get_scanline_32; pixman_iter_get_scanline_t get_scanline_float; } fetcher_info_t; static const fetcher_info_t fetcher_info[] = { { PIXMAN_any, (FAST_PATH_NO_ALPHA_MAP | FAST_PATH_ID_TRANSFORM | FAST_PATH_NO_CONVOLUTION_FILTER | FAST_PATH_NO_PAD_REPEAT | FAST_PATH_NO_REFLECT_REPEAT), bits_image_fetch_untransformed_32, bits_image_fetch_untransformed_float }, /* Affine, no alpha */ { PIXMAN_any, (FAST_PATH_NO_ALPHA_MAP | FAST_PATH_HAS_TRANSFORM | FAST_PATH_AFFINE_TRANSFORM), bits_image_fetch_affine_no_alpha_32, bits_image_fetch_affine_no_alpha_float, }, /* General */ { PIXMAN_any, 0, bits_image_fetch_general_32, bits_image_fetch_general_float, }, { PIXMAN_null }, }; static void bits_image_property_changed (pixman_image_t *image) { _pixman_bits_image_setup_accessors (&image->bits); } void _pixman_bits_image_src_iter_init (pixman_image_t *image, pixman_iter_t *iter) { pixman_format_code_t format = image->common.extended_format_code; uint32_t flags = image->common.flags; const fetcher_info_t *info; for (info = fetcher_info; info->format != PIXMAN_null; ++info) { if ((info->format == format || info->format == PIXMAN_any) && (info->flags & flags) == info->flags) { if (iter->iter_flags & ITER_NARROW) { iter->get_scanline = info->get_scanline_32; } else { iter->get_scanline = info->get_scanline_float; } return; } } /* Just in case we somehow didn't find a scanline function */ iter->get_scanline = _pixman_iter_get_scanline_noop; } static uint32_t * dest_get_scanline_narrow (pixman_iter_t *iter, const uint32_t *mask) { pixman_image_t *image = iter->image; int x = iter->x; int y = iter->y; int width = iter->width; uint32_t * buffer = iter->buffer; image->bits.fetch_scanline_32 (&image->bits, x, y, width, buffer, mask); if (image->common.alpha_map) { uint32_t *alpha; if ((alpha = malloc (width * sizeof (uint32_t)))) { int i; x -= image->common.alpha_origin_x; y -= image->common.alpha_origin_y; image->common.alpha_map->fetch_scanline_32 ( image->common.alpha_map, x, y, width, alpha, mask); for (i = 0; i < width; ++i) { buffer[i] &= ~0xff000000; buffer[i] |= (alpha[i] & 0xff000000); } free (alpha); } } return iter->buffer; } static uint32_t * dest_get_scanline_wide (pixman_iter_t *iter, const uint32_t *mask) { bits_image_t * image = &iter->image->bits; int x = iter->x; int y = iter->y; int width = iter->width; argb_t * buffer = (argb_t *)iter->buffer; image->fetch_scanline_float ( image, x, y, width, (uint32_t *)buffer, mask); if (image->common.alpha_map) { argb_t *alpha; if ((alpha = malloc (width * sizeof (argb_t)))) { int i; x -= image->common.alpha_origin_x; y -= image->common.alpha_origin_y; image->common.alpha_map->fetch_scanline_float ( image->common.alpha_map, x, y, width, (uint32_t *)alpha, mask); for (i = 0; i < width; ++i) buffer[i].a = alpha[i].a; free (alpha); } } return iter->buffer; } static void dest_write_back_narrow (pixman_iter_t *iter) { bits_image_t * image = &iter->image->bits; int x = iter->x; int y = iter->y; int width = iter->width; const uint32_t *buffer = iter->buffer; image->store_scanline_32 (image, x, y, width, buffer); if (image->common.alpha_map) { x -= image->common.alpha_origin_x; y -= image->common.alpha_origin_y; image->common.alpha_map->store_scanline_32 ( image->common.alpha_map, x, y, width, buffer); } iter->y++; } static float dither_factor_blue_noise_64 (int x, int y) { float m = dither_blue_noise_64x64[((y & 0x3f) << 6) | (x & 0x3f)]; return m * (1. / 4096.f) + (1. / 8192.f); } static float dither_factor_bayer_8 (int x, int y) { uint32_t m; y ^= x; /* Compute reverse(interleave(xor(x mod n, y mod n), x mod n)) * Here n = 8 and `mod n` is the bottom 3 bits. */ m = ((y & 0x1) << 5) | ((x & 0x1) << 4) | ((y & 0x2) << 2) | ((x & 0x2) << 1) | ((y & 0x4) >> 1) | ((x & 0x4) >> 2); /* m is in range [0, 63]. We scale it to [0, 63.0f/64.0f], then * shift it to to [1.0f/128.0f, 127.0f/128.0f] so that 0 < d < 1. * This ensures exact values are not changed by dithering. */ return (float)(m) * (1 / 64.0f) + (1.0f / 128.0f); } typedef float (* dither_factor_t)(int x, int y); static force_inline float dither_apply_channel (float f, float d, float s) { /* float_to_unorm splits the [0, 1] segment in (1 << n_bits) * subsections of equal length; however unorm_to_float does not * map to the center of those sections. In fact, pixel value u is * mapped to: * * u u u 1 * -------------- = ---------- + -------------- * ---------- * 2^n_bits - 1 2^n_bits 2^n_bits - 1 2^n_bits * * Hence if f = u / (2^n_bits - 1) is exactly representable on a * n_bits palette, all the numbers between * * u * ---------- = f - f * 2^n_bits = f + (0 - f) * 2^n_bits * 2^n_bits * * and * * u + 1 * ---------- = f - (f - 1) * 2^n_bits = f + (1 - f) * 2^n_bits * 2^n_bits * * are also mapped back to u. * * Hence the following calculation ensures that we add as much * noise as possible without perturbing values which are exactly * representable in the target colorspace. Note that this corresponds to * mixing the original color with noise with a ratio of `1 / 2^n_bits`. */ return f + (d - f) * s; } static force_inline float dither_compute_scale (int n_bits) { // No dithering for wide formats if (n_bits == 0 || n_bits >= 32) return 0.f; return 1.f / (float)(1 << n_bits); } static const uint32_t * dither_apply_ordered (pixman_iter_t *iter, dither_factor_t factor) { bits_image_t *image = &iter->image->bits; int x = iter->x + image->dither_offset_x; int y = iter->y + image->dither_offset_y; int width = iter->width; argb_t *buffer = (argb_t *)iter->buffer; pixman_format_code_t format = image->format; int a_size = PIXMAN_FORMAT_A (format); int r_size = PIXMAN_FORMAT_R (format); int g_size = PIXMAN_FORMAT_G (format); int b_size = PIXMAN_FORMAT_B (format); float a_scale = dither_compute_scale (a_size); float r_scale = dither_compute_scale (r_size); float g_scale = dither_compute_scale (g_size); float b_scale = dither_compute_scale (b_size); int i; float d; for (i = 0; i < width; ++i) { d = factor (x + i, y); buffer->a = dither_apply_channel (buffer->a, d, a_scale); buffer->r = dither_apply_channel (buffer->r, d, r_scale); buffer->g = dither_apply_channel (buffer->g, d, g_scale); buffer->b = dither_apply_channel (buffer->b, d, b_scale); buffer++; } return iter->buffer; } static void dest_write_back_wide (pixman_iter_t *iter) { bits_image_t * image = &iter->image->bits; int x = iter->x; int y = iter->y; int width = iter->width; const uint32_t *buffer = iter->buffer; switch (image->dither) { case PIXMAN_DITHER_NONE: break; case PIXMAN_DITHER_GOOD: case PIXMAN_DITHER_BEST: case PIXMAN_DITHER_ORDERED_BLUE_NOISE_64: buffer = dither_apply_ordered (iter, dither_factor_blue_noise_64); break; case PIXMAN_DITHER_FAST: case PIXMAN_DITHER_ORDERED_BAYER_8: buffer = dither_apply_ordered (iter, dither_factor_bayer_8); break; } image->store_scanline_float (image, x, y, width, buffer); if (image->common.alpha_map) { x -= image->common.alpha_origin_x; y -= image->common.alpha_origin_y; image->common.alpha_map->store_scanline_float ( image->common.alpha_map, x, y, width, buffer); } iter->y++; } void _pixman_bits_image_dest_iter_init (pixman_image_t *image, pixman_iter_t *iter) { if (iter->iter_flags & ITER_NARROW) { if ((iter->iter_flags & (ITER_IGNORE_RGB | ITER_IGNORE_ALPHA)) == (ITER_IGNORE_RGB | ITER_IGNORE_ALPHA)) { iter->get_scanline = _pixman_iter_get_scanline_noop; } else { iter->get_scanline = dest_get_scanline_narrow; } iter->write_back = dest_write_back_narrow; } else { iter->get_scanline = dest_get_scanline_wide; iter->write_back = dest_write_back_wide; } } static uint32_t * create_bits (pixman_format_code_t format, int width, int height, int * rowstride_bytes, pixman_bool_t clear) { int stride; size_t buf_size; int bpp; /* what follows is a long-winded way, avoiding any possibility of integer * overflows, of saying: * stride = ((width * bpp + 0x1f) >> 5) * sizeof (uint32_t); */ bpp = PIXMAN_FORMAT_BPP (format); if (_pixman_multiply_overflows_int (width, bpp)) return NULL; stride = width * bpp; if (_pixman_addition_overflows_int (stride, 0x1f)) return NULL; stride += 0x1f; stride >>= 5; stride *= sizeof (uint32_t); if (_pixman_multiply_overflows_size (height, stride)) return NULL; buf_size = (size_t)height * stride; if (rowstride_bytes) *rowstride_bytes = stride; if (clear) return calloc (1, buf_size); else return malloc (buf_size); } pixman_bool_t _pixman_bits_image_init (pixman_image_t * image, pixman_format_code_t format, int width, int height, uint32_t * bits, int rowstride, pixman_bool_t clear) { uint32_t *free_me = NULL; if (PIXMAN_FORMAT_BPP (format) == 128) return_val_if_fail(!(rowstride % 4), FALSE); if (!bits && width && height) { int rowstride_bytes; free_me = bits = create_bits (format, width, height, &rowstride_bytes, clear); if (!bits) return FALSE; rowstride = rowstride_bytes / (int) sizeof (uint32_t); } _pixman_image_init (image); image->type = BITS; image->bits.format = format; image->bits.width = width; image->bits.height = height; image->bits.bits = bits; image->bits.free_me = free_me; image->bits.dither = PIXMAN_DITHER_NONE; image->bits.dither_offset_x = 0; image->bits.dither_offset_y = 0; image->bits.read_func = NULL; image->bits.write_func = NULL; image->bits.rowstride = rowstride; image->bits.indexed = NULL; image->common.property_changed = bits_image_property_changed; _pixman_image_reset_clip_region (image); return TRUE; } static pixman_image_t * create_bits_image_internal (pixman_format_code_t format, int width, int height, uint32_t * bits, int rowstride_bytes, pixman_bool_t clear) { pixman_image_t *image; /* must be a whole number of uint32_t's */ return_val_if_fail ( bits == NULL || (rowstride_bytes % sizeof (uint32_t)) == 0, NULL); return_val_if_fail (PIXMAN_FORMAT_BPP (format) >= PIXMAN_FORMAT_DEPTH (format), NULL); image = _pixman_image_allocate (); if (!image) return NULL; if (!_pixman_bits_image_init (image, format, width, height, bits, rowstride_bytes / (int) sizeof (uint32_t), clear)) { free (image); return NULL; } return image; } /* If bits is NULL, a buffer will be allocated and initialized to 0 */ PIXMAN_EXPORT pixman_image_t * pixman_image_create_bits (pixman_format_code_t format, int width, int height, uint32_t * bits, int rowstride_bytes) { return create_bits_image_internal ( format, width, height, bits, rowstride_bytes, TRUE); } /* If bits is NULL, a buffer will be allocated and _not_ initialized */ PIXMAN_EXPORT pixman_image_t * pixman_image_create_bits_no_clear (pixman_format_code_t format, int width, int height, uint32_t * bits, int rowstride_bytes) { return create_bits_image_internal ( format, width, height, bits, rowstride_bytes, FALSE); } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-combine-float.c0000664000175000017500000007763114712446423020605 0ustar00mattst88mattst88/* -*- Mode: c; c-basic-offset: 4; tab-width: 8; indent-tabs-mode: t; -*- */ /* * Copyright Âİ 2010, 2012 Soren Sandmann Pedersen * Copyright Âİ 2010, 2012 Red Hat, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Author: Soren Sandmann Pedersen (sandmann@cs.au.dk) */ #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include "pixman-private.h" #include "pixman-combine-float.h" /* Workaround for http://gcc.gnu.org/PR54965 */ /* GCC 4.6 has problems with force_inline, so just use normal inline instead */ #if defined(__GNUC__) && (__GNUC__ == 4) && (__GNUC_MINOR__ == 6) #undef force_inline #define force_inline __inline__ #endif typedef float (* combine_channel_t) (float sa, float s, float da, float d); static force_inline void combine_inner (pixman_bool_t component, float *dest, const float *src, const float *mask, int n_pixels, combine_channel_t combine_a, combine_channel_t combine_c) { int i; if (!mask) { for (i = 0; i < 4 * n_pixels; i += 4) { float sa = src[i + 0]; float sr = src[i + 1]; float sg = src[i + 2]; float sb = src[i + 3]; float da = dest[i + 0]; float dr = dest[i + 1]; float dg = dest[i + 2]; float db = dest[i + 3]; dest[i + 0] = combine_a (sa, sa, da, da); dest[i + 1] = combine_c (sa, sr, da, dr); dest[i + 2] = combine_c (sa, sg, da, dg); dest[i + 3] = combine_c (sa, sb, da, db); } } else { for (i = 0; i < 4 * n_pixels; i += 4) { float sa, sr, sg, sb; float ma, mr, mg, mb; float da, dr, dg, db; sa = src[i + 0]; sr = src[i + 1]; sg = src[i + 2]; sb = src[i + 3]; if (component) { ma = mask[i + 0]; mr = mask[i + 1]; mg = mask[i + 2]; mb = mask[i + 3]; sr *= mr; sg *= mg; sb *= mb; ma *= sa; mr *= sa; mg *= sa; mb *= sa; sa = ma; } else { ma = mask[i + 0]; sa *= ma; sr *= ma; sg *= ma; sb *= ma; ma = mr = mg = mb = sa; } da = dest[i + 0]; dr = dest[i + 1]; dg = dest[i + 2]; db = dest[i + 3]; dest[i + 0] = combine_a (ma, sa, da, da); dest[i + 1] = combine_c (mr, sr, da, dr); dest[i + 2] = combine_c (mg, sg, da, dg); dest[i + 3] = combine_c (mb, sb, da, db); } } } #define MAKE_COMBINER(name, component, combine_a, combine_c) \ static void \ combine_ ## name ## _float (pixman_implementation_t *imp, \ pixman_op_t op, \ float *dest, \ const float *src, \ const float *mask, \ int n_pixels) \ { \ combine_inner (component, dest, src, mask, n_pixels, \ combine_a, combine_c); \ } #define MAKE_COMBINERS(name, combine_a, combine_c) \ MAKE_COMBINER(name ## _ca, TRUE, combine_a, combine_c) \ MAKE_COMBINER(name ## _u, FALSE, combine_a, combine_c) /* * Porter/Duff operators */ #define CLAMP(f) \ (((f) < 0)? 0 : (((f) > 1.0) ? 1.0 : (f))) static force_inline float get_factor (combine_factor_t factor, float sa, float da) { float f = -1; switch (factor) { case ZERO: f = 0.0f; break; case ONE: f = 1.0f; break; case SRC_ALPHA: f = sa; break; case DEST_ALPHA: f = da; break; case INV_SA: f = 1 - sa; break; case INV_DA: f = 1 - da; break; case SA_OVER_DA: if (FLOAT_IS_ZERO (da)) f = 1.0f; else f = CLAMP (sa / da); break; case DA_OVER_SA: if (FLOAT_IS_ZERO (sa)) f = 1.0f; else f = CLAMP (da / sa); break; case INV_SA_OVER_DA: if (FLOAT_IS_ZERO (da)) f = 1.0f; else f = CLAMP ((1.0f - sa) / da); break; case INV_DA_OVER_SA: if (FLOAT_IS_ZERO (sa)) f = 1.0f; else f = CLAMP ((1.0f - da) / sa); break; case ONE_MINUS_SA_OVER_DA: if (FLOAT_IS_ZERO (da)) f = 0.0f; else f = CLAMP (1.0f - sa / da); break; case ONE_MINUS_DA_OVER_SA: if (FLOAT_IS_ZERO (sa)) f = 0.0f; else f = CLAMP (1.0f - da / sa); break; case ONE_MINUS_INV_DA_OVER_SA: if (FLOAT_IS_ZERO (sa)) f = 0.0f; else f = CLAMP (1.0f - (1.0f - da) / sa); break; case ONE_MINUS_INV_SA_OVER_DA: if (FLOAT_IS_ZERO (da)) f = 0.0f; else f = CLAMP (1.0f - (1.0f - sa) / da); break; } return f; } #define MAKE_PD_COMBINERS(name, a, b) \ static float \ pd_combine_ ## name (float sa, float s, float da, float d) \ { \ const float fa = get_factor (a, sa, da); \ const float fb = get_factor (b, sa, da); \ \ return MIN (1.0f, s * fa + d * fb); \ } \ \ MAKE_COMBINERS(name, pd_combine_ ## name, pd_combine_ ## name) MAKE_PD_COMBINERS (clear, ZERO, ZERO) MAKE_PD_COMBINERS (src, ONE, ZERO) MAKE_PD_COMBINERS (dst, ZERO, ONE) MAKE_PD_COMBINERS (over, ONE, INV_SA) MAKE_PD_COMBINERS (over_reverse, INV_DA, ONE) MAKE_PD_COMBINERS (in, DEST_ALPHA, ZERO) MAKE_PD_COMBINERS (in_reverse, ZERO, SRC_ALPHA) MAKE_PD_COMBINERS (out, INV_DA, ZERO) MAKE_PD_COMBINERS (out_reverse, ZERO, INV_SA) MAKE_PD_COMBINERS (atop, DEST_ALPHA, INV_SA) MAKE_PD_COMBINERS (atop_reverse, INV_DA, SRC_ALPHA) MAKE_PD_COMBINERS (xor, INV_DA, INV_SA) MAKE_PD_COMBINERS (add, ONE, ONE) MAKE_PD_COMBINERS (saturate, INV_DA_OVER_SA, ONE) MAKE_PD_COMBINERS (disjoint_clear, ZERO, ZERO) MAKE_PD_COMBINERS (disjoint_src, ONE, ZERO) MAKE_PD_COMBINERS (disjoint_dst, ZERO, ONE) MAKE_PD_COMBINERS (disjoint_over, ONE, INV_SA_OVER_DA) MAKE_PD_COMBINERS (disjoint_over_reverse, INV_DA_OVER_SA, ONE) MAKE_PD_COMBINERS (disjoint_in, ONE_MINUS_INV_DA_OVER_SA, ZERO) MAKE_PD_COMBINERS (disjoint_in_reverse, ZERO, ONE_MINUS_INV_SA_OVER_DA) MAKE_PD_COMBINERS (disjoint_out, INV_DA_OVER_SA, ZERO) MAKE_PD_COMBINERS (disjoint_out_reverse, ZERO, INV_SA_OVER_DA) MAKE_PD_COMBINERS (disjoint_atop, ONE_MINUS_INV_DA_OVER_SA, INV_SA_OVER_DA) MAKE_PD_COMBINERS (disjoint_atop_reverse, INV_DA_OVER_SA, ONE_MINUS_INV_SA_OVER_DA) MAKE_PD_COMBINERS (disjoint_xor, INV_DA_OVER_SA, INV_SA_OVER_DA) MAKE_PD_COMBINERS (conjoint_clear, ZERO, ZERO) MAKE_PD_COMBINERS (conjoint_src, ONE, ZERO) MAKE_PD_COMBINERS (conjoint_dst, ZERO, ONE) MAKE_PD_COMBINERS (conjoint_over, ONE, ONE_MINUS_SA_OVER_DA) MAKE_PD_COMBINERS (conjoint_over_reverse, ONE_MINUS_DA_OVER_SA, ONE) MAKE_PD_COMBINERS (conjoint_in, DA_OVER_SA, ZERO) MAKE_PD_COMBINERS (conjoint_in_reverse, ZERO, SA_OVER_DA) MAKE_PD_COMBINERS (conjoint_out, ONE_MINUS_DA_OVER_SA, ZERO) MAKE_PD_COMBINERS (conjoint_out_reverse, ZERO, ONE_MINUS_SA_OVER_DA) MAKE_PD_COMBINERS (conjoint_atop, DA_OVER_SA, ONE_MINUS_SA_OVER_DA) MAKE_PD_COMBINERS (conjoint_atop_reverse, ONE_MINUS_DA_OVER_SA, SA_OVER_DA) MAKE_PD_COMBINERS (conjoint_xor, ONE_MINUS_DA_OVER_SA, ONE_MINUS_SA_OVER_DA) /* * PDF blend modes: * * The following blend modes have been taken from the PDF ISO 32000 * specification, which at this point in time is available from * * http://www.adobe.com/devnet/pdf/pdf_reference.html * * The specific documents of interest are the PDF spec itself: * * http://wwwimages.adobe.com/www.adobe.com/content/dam/Adobe/en/devnet/pdf/pdfs/PDF32000_2008.pdf * * chapters 11.3.5 and 11.3.6 and a later supplement for Adobe Acrobat * 9.1 and Reader 9.1: * * http://wwwimages.adobe.com/www.adobe.com/content/dam/Adobe/en/devnet/pdf/pdfs/adobe_supplement_iso32000_1.pdf * * that clarifies the specifications for blend modes ColorDodge and * ColorBurn. * * The formula for computing the final pixel color given in 11.3.6 is: * * Îħr — Cr = (1 – Îħs) — Îħb — Cb + (1 – Îħb) — Îħs — Cs + Îħb — Îħs — B(Cb, Cs) * * with B() is the blend function. When B(Cb, Cs) = Cs, this formula * reduces to the regular OVER operator. * * Cs and Cb are not premultiplied, so in our implementation we instead * use: * * cr = (1 – Îħs) — cb + (1 – Îħb) — cs + Îħb — Îħs — B (cb/Îħb, cs/Îħs) * * where cr, cs, and cb are premultiplied colors, and where the * * Îħb — Îħs — B(cb/Îħb, cs/Îħs) * * part is first arithmetically simplified under the assumption that Îħb * and Îħs are not 0, and then updated to produce a meaningful result when * they are. * * For all the blend mode operators, the alpha channel is given by * * Îħr = Îħs + Îħb + Îħb — Îħs */ #define MAKE_SEPARABLE_PDF_COMBINERS(name) \ static float \ combine_ ## name ## _a (float sa, float s, float da, float d) \ { \ return da + sa - da * sa; \ } \ \ static float \ combine_ ## name ## _c (float sa, float s, float da, float d) \ { \ float f = (1 - sa) * d + (1 - da) * s; \ \ return f + blend_ ## name (sa, s, da, d); \ } \ \ MAKE_COMBINERS (name, combine_ ## name ## _a, combine_ ## name ## _c) /* * Multiply * * ad * as * B(d / ad, s / as) * = ad * as * d/ad * s/as * = d * s * */ static force_inline float blend_multiply (float sa, float s, float da, float d) { return d * s; } /* * Screen * * ad * as * B(d/ad, s/as) * = ad * as * (d/ad + s/as - s/as * d/ad) * = ad * s + as * d - s * d */ static force_inline float blend_screen (float sa, float s, float da, float d) { return d * sa + s * da - s * d; } /* * Overlay * * ad * as * B(d/ad, s/as) * = ad * as * Hardlight (s, d) * = if (d / ad < 0.5) * as * ad * Multiply (s/as, 2 * d/ad) * else * as * ad * Screen (s/as, 2 * d / ad - 1) * = if (d < 0.5 * ad) * as * ad * s/as * 2 * d /ad * else * as * ad * (s/as + 2 * d / ad - 1 - s / as * (2 * d / ad - 1)) * = if (2 * d < ad) * 2 * s * d * else * ad * s + 2 * as * d - as * ad - ad * s * (2 * d / ad - 1) * = if (2 * d < ad) * 2 * s * d * else * as * ad - 2 * (ad - d) * (as - s) */ static force_inline float blend_overlay (float sa, float s, float da, float d) { if (2 * d < da) return 2 * s * d; else return sa * da - 2 * (da - d) * (sa - s); } /* * Darken * * ad * as * B(d/ad, s/as) * = ad * as * MIN(d/ad, s/as) * = MIN (as * d, ad * s) */ static force_inline float blend_darken (float sa, float s, float da, float d) { s = s * da; d = d * sa; if (s > d) return d; else return s; } /* * Lighten * * ad * as * B(d/ad, s/as) * = ad * as * MAX(d/ad, s/as) * = MAX (as * d, ad * s) */ static force_inline float blend_lighten (float sa, float s, float da, float d) { s = s * da; d = d * sa; if (s > d) return s; else return d; } /* * Color dodge * * ad * as * B(d/ad, s/as) * = if d/ad = 0 * ad * as * 0 * else if (d/ad >= (1 - s/as) * ad * as * 1 * else * ad * as * ((d/ad) / (1 - s/as)) * = if d = 0 * 0 * elif as * d >= ad * (as - s) * ad * as * else * as * (as * d / (as - s)) * */ static force_inline float blend_color_dodge (float sa, float s, float da, float d) { if (FLOAT_IS_ZERO (d)) return 0.0f; else if (d * sa >= sa * da - s * da) return sa * da; else if (FLOAT_IS_ZERO (sa - s)) return sa * da; else return sa * sa * d / (sa - s); } /* * Color burn * * We modify the first clause "if d = 1" to "if d >= 1" since with * premultiplied colors d > 1 can actually happen. * * ad * as * B(d/ad, s/as) * = if d/ad >= 1 * ad * as * 1 * elif (1 - d/ad) >= s/as * ad * as * 0 * else * ad * as * (1 - ((1 - d/ad) / (s/as))) * = if d >= ad * ad * as * elif as * ad - as * d >= ad * s * 0 * else * ad * as - as * as * (ad - d) / s */ static force_inline float blend_color_burn (float sa, float s, float da, float d) { if (d >= da) return sa * da; else if (sa * (da - d) >= s * da) return 0.0f; else if (FLOAT_IS_ZERO (s)) return 0.0f; else return sa * (da - sa * (da - d) / s); } /* * Hard light * * ad * as * B(d/ad, s/as) * = if (s/as <= 0.5) * ad * as * Multiply (d/ad, 2 * s/as) * else * ad * as * Screen (d/ad, 2 * s/as - 1) * = if 2 * s <= as * ad * as * d/ad * 2 * s / as * else * ad * as * (d/ad + (2 * s/as - 1) + d/ad * (2 * s/as - 1)) * = if 2 * s <= as * 2 * s * d * else * as * ad - 2 * (ad - d) * (as - s) */ static force_inline float blend_hard_light (float sa, float s, float da, float d) { if (2 * s < sa) return 2 * s * d; else return sa * da - 2 * (da - d) * (sa - s); } /* * Soft light * * ad * as * B(d/ad, s/as) * = if (s/as <= 0.5) * ad * as * (d/ad - (1 - 2 * s/as) * d/ad * (1 - d/ad)) * else if (d/ad <= 0.25) * ad * as * (d/ad + (2 * s/as - 1) * ((((16 * d/ad - 12) * d/ad + 4) * d/ad) - d/ad)) * else * ad * as * (d/ad + (2 * s/as - 1) * sqrt (d/ad)) * = if (2 * s <= as) * d * as - d * (ad - d) * (as - 2 * s) / ad; * else if (4 * d <= ad) * (2 * s - as) * d * ((16 * d / ad - 12) * d / ad + 3); * else * d * as + (sqrt (d * ad) - d) * (2 * s - as); */ static force_inline float blend_soft_light (float sa, float s, float da, float d) { if (2 * s <= sa) { if (FLOAT_IS_ZERO (da)) return d * sa; else return d * sa - d * (da - d) * (sa - 2 * s) / da; } else { if (FLOAT_IS_ZERO (da)) { return d * sa; } else { if (4 * d <= da) return d * sa + (2 * s - sa) * d * ((16 * d / da - 12) * d / da + 3); else return d * sa + (sqrtf (d * da) - d) * (2 * s - sa); } } } /* * Difference * * ad * as * B(s/as, d/ad) * = ad * as * abs (s/as - d/ad) * = if (s/as <= d/ad) * ad * as * (d/ad - s/as) * else * ad * as * (s/as - d/ad) * = if (ad * s <= as * d) * as * d - ad * s * else * ad * s - as * d */ static force_inline float blend_difference (float sa, float s, float da, float d) { float dsa = d * sa; float sda = s * da; if (sda < dsa) return dsa - sda; else return sda - dsa; } /* * Exclusion * * ad * as * B(s/as, d/ad) * = ad * as * (d/ad + s/as - 2 * d/ad * s/as) * = as * d + ad * s - 2 * s * d */ static force_inline float blend_exclusion (float sa, float s, float da, float d) { return s * da + d * sa - 2 * d * s; } MAKE_SEPARABLE_PDF_COMBINERS (multiply) MAKE_SEPARABLE_PDF_COMBINERS (screen) MAKE_SEPARABLE_PDF_COMBINERS (overlay) MAKE_SEPARABLE_PDF_COMBINERS (darken) MAKE_SEPARABLE_PDF_COMBINERS (lighten) MAKE_SEPARABLE_PDF_COMBINERS (color_dodge) MAKE_SEPARABLE_PDF_COMBINERS (color_burn) MAKE_SEPARABLE_PDF_COMBINERS (hard_light) MAKE_SEPARABLE_PDF_COMBINERS (soft_light) MAKE_SEPARABLE_PDF_COMBINERS (difference) MAKE_SEPARABLE_PDF_COMBINERS (exclusion) /* * PDF nonseperable blend modes are implemented using the following functions * to operate in Hsl space, with Cmax, Cmid, Cmin referring to the max, mid * and min value of the red, green and blue components. * * LUM (C) = 0.3 — Cred + 0.59 — Cgreen + 0.11 — Cblue * * clip_color (C): * l = LUM (C) * min = Cmin * max = Cmax * if n < 0.0 * C = l + (((C – l) — l) ⁄ (l – min)) * if x > 1.0 * C = l + (((C – l) — (1 – l) ) ⁄ (max – l)) * return C * * set_lum (C, l): * d = l – LUM (C) * C += d * return clip_color (C) * * SAT (C) = CH_MAX (C) - CH_MIN (C) * * set_sat (C, s): * if Cmax > Cmin * Cmid = ( ( ( Cmid – Cmin ) — s ) ⁄ ( Cmax – Cmin ) ) * Cmax = s * else * Cmid = Cmax = 0.0 * Cmin = 0.0 * return C */ /* For premultiplied colors, we need to know what happens when C is * multiplied by a real number. LUM and SAT are linear: * * LUM (r — C) = r — LUM (C) SAT (r * C) = r * SAT (C) * * If we extend clip_color with an extra argument a and change * * if x >= 1.0 * * into * * if x >= a * * then clip_color is also linear: * * r * clip_color (C, a) = clip_color (r * C, r * a); * * for positive r. * * Similarly, we can extend set_lum with an extra argument that is just passed * on to clip_color: * * r * set_lum (C, l, a) * * = r — clip_color (C + l - LUM (C), a) * * = clip_color (r * C + r — l - r * LUM (C), r * a) * * = set_lum (r * C, r * l, r * a) * * Finally, set_sat: * * r * set_sat (C, s) = set_sat (x * C, r * s) * * The above holds for all non-zero x, because the x'es in the fraction for * C_mid cancel out. Specifically, it holds for x = r: * * r * set_sat (C, s) = set_sat (r * C, r * s) * */ typedef struct { float r; float g; float b; } rgb_t; static force_inline float minf (float a, float b) { return a < b? a : b; } static force_inline float maxf (float a, float b) { return a > b? a : b; } static force_inline float channel_min (const rgb_t *c) { return minf (minf (c->r, c->g), c->b); } static force_inline float channel_max (const rgb_t *c) { return maxf (maxf (c->r, c->g), c->b); } static force_inline float get_lum (const rgb_t *c) { return c->r * 0.3f + c->g * 0.59f + c->b * 0.11f; } static force_inline float get_sat (const rgb_t *c) { return channel_max (c) - channel_min (c); } static void clip_color (rgb_t *color, float a) { float l = get_lum (color); float n = channel_min (color); float x = channel_max (color); float t; if (n < 0.0f) { t = l - n; if (FLOAT_IS_ZERO (t)) { color->r = 0.0f; color->g = 0.0f; color->b = 0.0f; } else { color->r = l + (((color->r - l) * l) / t); color->g = l + (((color->g - l) * l) / t); color->b = l + (((color->b - l) * l) / t); } } if (x > a) { t = x - l; if (FLOAT_IS_ZERO (t)) { color->r = a; color->g = a; color->b = a; } else { color->r = l + (((color->r - l) * (a - l) / t)); color->g = l + (((color->g - l) * (a - l) / t)); color->b = l + (((color->b - l) * (a - l) / t)); } } } static void set_lum (rgb_t *color, float sa, float l) { float d = l - get_lum (color); color->r = color->r + d; color->g = color->g + d; color->b = color->b + d; clip_color (color, sa); } static void set_sat (rgb_t *src, float sat) { float *max, *mid, *min; float t; if (src->r > src->g) { if (src->r > src->b) { max = &(src->r); if (src->g > src->b) { mid = &(src->g); min = &(src->b); } else { mid = &(src->b); min = &(src->g); } } else { max = &(src->b); mid = &(src->r); min = &(src->g); } } else { if (src->r > src->b) { max = &(src->g); mid = &(src->r); min = &(src->b); } else { min = &(src->r); if (src->g > src->b) { max = &(src->g); mid = &(src->b); } else { max = &(src->b); mid = &(src->g); } } } t = *max - *min; if (FLOAT_IS_ZERO (t)) { *mid = *max = 0.0f; } else { *mid = ((*mid - *min) * sat) / t; *max = sat; } *min = 0.0f; } /* Hue: * * as * ad * B(s/as, d/as) * = as * ad * set_lum (set_sat (s/as, SAT (d/ad)), LUM (d/ad), 1) * = set_lum (set_sat (ad * s, as * SAT (d)), as * LUM (d), as * ad) * */ static force_inline void blend_hsl_hue (rgb_t *res, const rgb_t *dest, float da, const rgb_t *src, float sa) { res->r = src->r * da; res->g = src->g * da; res->b = src->b * da; set_sat (res, get_sat (dest) * sa); set_lum (res, sa * da, get_lum (dest) * sa); } /* * Saturation * * as * ad * B(s/as, d/ad) * = as * ad * set_lum (set_sat (d/ad, SAT (s/as)), LUM (d/ad), 1) * = set_lum (as * ad * set_sat (d/ad, SAT (s/as)), * as * LUM (d), as * ad) * = set_lum (set_sat (as * d, ad * SAT (s), as * LUM (d), as * ad)) */ static force_inline void blend_hsl_saturation (rgb_t *res, const rgb_t *dest, float da, const rgb_t *src, float sa) { res->r = dest->r * sa; res->g = dest->g * sa; res->b = dest->b * sa; set_sat (res, get_sat (src) * da); set_lum (res, sa * da, get_lum (dest) * sa); } /* * Color * * as * ad * B(s/as, d/as) * = as * ad * set_lum (s/as, LUM (d/ad), 1) * = set_lum (s * ad, as * LUM (d), as * ad) */ static force_inline void blend_hsl_color (rgb_t *res, const rgb_t *dest, float da, const rgb_t *src, float sa) { res->r = src->r * da; res->g = src->g * da; res->b = src->b * da; set_lum (res, sa * da, get_lum (dest) * sa); } /* * Luminosity * * as * ad * B(s/as, d/ad) * = as * ad * set_lum (d/ad, LUM (s/as), 1) * = set_lum (as * d, ad * LUM (s), as * ad) */ static force_inline void blend_hsl_luminosity (rgb_t *res, const rgb_t *dest, float da, const rgb_t *src, float sa) { res->r = dest->r * sa; res->g = dest->g * sa; res->b = dest->b * sa; set_lum (res, sa * da, get_lum (src) * da); } #define MAKE_NON_SEPARABLE_PDF_COMBINERS(name) \ static void \ combine_ ## name ## _u_float (pixman_implementation_t *imp, \ pixman_op_t op, \ float *dest, \ const float *src, \ const float *mask, \ int n_pixels) \ { \ int i; \ \ for (i = 0; i < 4 * n_pixels; i += 4) \ { \ float sa, da; \ rgb_t sc, dc, rc; \ \ sa = src[i + 0]; \ sc.r = src[i + 1]; \ sc.g = src[i + 2]; \ sc.b = src[i + 3]; \ \ da = dest[i + 0]; \ dc.r = dest[i + 1]; \ dc.g = dest[i + 2]; \ dc.b = dest[i + 3]; \ \ if (mask) \ { \ float ma = mask[i + 0]; \ \ /* Component alpha is not supported for HSL modes */ \ sa *= ma; \ sc.r *= ma; \ sc.g *= ma; \ sc.b *= ma; \ } \ \ blend_ ## name (&rc, &dc, da, &sc, sa); \ \ dest[i + 0] = sa + da - sa * da; \ dest[i + 1] = (1 - sa) * dc.r + (1 - da) * sc.r + rc.r; \ dest[i + 2] = (1 - sa) * dc.g + (1 - da) * sc.g + rc.g; \ dest[i + 3] = (1 - sa) * dc.b + (1 - da) * sc.b + rc.b; \ } \ } MAKE_NON_SEPARABLE_PDF_COMBINERS(hsl_hue) MAKE_NON_SEPARABLE_PDF_COMBINERS(hsl_saturation) MAKE_NON_SEPARABLE_PDF_COMBINERS(hsl_color) MAKE_NON_SEPARABLE_PDF_COMBINERS(hsl_luminosity) void _pixman_setup_combiner_functions_float (pixman_implementation_t *imp) { /* Unified alpha */ imp->combine_float[PIXMAN_OP_CLEAR] = combine_clear_u_float; imp->combine_float[PIXMAN_OP_SRC] = combine_src_u_float; imp->combine_float[PIXMAN_OP_DST] = combine_dst_u_float; imp->combine_float[PIXMAN_OP_OVER] = combine_over_u_float; imp->combine_float[PIXMAN_OP_OVER_REVERSE] = combine_over_reverse_u_float; imp->combine_float[PIXMAN_OP_IN] = combine_in_u_float; imp->combine_float[PIXMAN_OP_IN_REVERSE] = combine_in_reverse_u_float; imp->combine_float[PIXMAN_OP_OUT] = combine_out_u_float; imp->combine_float[PIXMAN_OP_OUT_REVERSE] = combine_out_reverse_u_float; imp->combine_float[PIXMAN_OP_ATOP] = combine_atop_u_float; imp->combine_float[PIXMAN_OP_ATOP_REVERSE] = combine_atop_reverse_u_float; imp->combine_float[PIXMAN_OP_XOR] = combine_xor_u_float; imp->combine_float[PIXMAN_OP_ADD] = combine_add_u_float; imp->combine_float[PIXMAN_OP_SATURATE] = combine_saturate_u_float; /* Disjoint, unified */ imp->combine_float[PIXMAN_OP_DISJOINT_CLEAR] = combine_disjoint_clear_u_float; imp->combine_float[PIXMAN_OP_DISJOINT_SRC] = combine_disjoint_src_u_float; imp->combine_float[PIXMAN_OP_DISJOINT_DST] = combine_disjoint_dst_u_float; imp->combine_float[PIXMAN_OP_DISJOINT_OVER] = combine_disjoint_over_u_float; imp->combine_float[PIXMAN_OP_DISJOINT_OVER_REVERSE] = combine_disjoint_over_reverse_u_float; imp->combine_float[PIXMAN_OP_DISJOINT_IN] = combine_disjoint_in_u_float; imp->combine_float[PIXMAN_OP_DISJOINT_IN_REVERSE] = combine_disjoint_in_reverse_u_float; imp->combine_float[PIXMAN_OP_DISJOINT_OUT] = combine_disjoint_out_u_float; imp->combine_float[PIXMAN_OP_DISJOINT_OUT_REVERSE] = combine_disjoint_out_reverse_u_float; imp->combine_float[PIXMAN_OP_DISJOINT_ATOP] = combine_disjoint_atop_u_float; imp->combine_float[PIXMAN_OP_DISJOINT_ATOP_REVERSE] = combine_disjoint_atop_reverse_u_float; imp->combine_float[PIXMAN_OP_DISJOINT_XOR] = combine_disjoint_xor_u_float; /* Conjoint, unified */ imp->combine_float[PIXMAN_OP_CONJOINT_CLEAR] = combine_conjoint_clear_u_float; imp->combine_float[PIXMAN_OP_CONJOINT_SRC] = combine_conjoint_src_u_float; imp->combine_float[PIXMAN_OP_CONJOINT_DST] = combine_conjoint_dst_u_float; imp->combine_float[PIXMAN_OP_CONJOINT_OVER] = combine_conjoint_over_u_float; imp->combine_float[PIXMAN_OP_CONJOINT_OVER_REVERSE] = combine_conjoint_over_reverse_u_float; imp->combine_float[PIXMAN_OP_CONJOINT_IN] = combine_conjoint_in_u_float; imp->combine_float[PIXMAN_OP_CONJOINT_IN_REVERSE] = combine_conjoint_in_reverse_u_float; imp->combine_float[PIXMAN_OP_CONJOINT_OUT] = combine_conjoint_out_u_float; imp->combine_float[PIXMAN_OP_CONJOINT_OUT_REVERSE] = combine_conjoint_out_reverse_u_float; imp->combine_float[PIXMAN_OP_CONJOINT_ATOP] = combine_conjoint_atop_u_float; imp->combine_float[PIXMAN_OP_CONJOINT_ATOP_REVERSE] = combine_conjoint_atop_reverse_u_float; imp->combine_float[PIXMAN_OP_CONJOINT_XOR] = combine_conjoint_xor_u_float; /* PDF operators, unified */ imp->combine_float[PIXMAN_OP_MULTIPLY] = combine_multiply_u_float; imp->combine_float[PIXMAN_OP_SCREEN] = combine_screen_u_float; imp->combine_float[PIXMAN_OP_OVERLAY] = combine_overlay_u_float; imp->combine_float[PIXMAN_OP_DARKEN] = combine_darken_u_float; imp->combine_float[PIXMAN_OP_LIGHTEN] = combine_lighten_u_float; imp->combine_float[PIXMAN_OP_COLOR_DODGE] = combine_color_dodge_u_float; imp->combine_float[PIXMAN_OP_COLOR_BURN] = combine_color_burn_u_float; imp->combine_float[PIXMAN_OP_HARD_LIGHT] = combine_hard_light_u_float; imp->combine_float[PIXMAN_OP_SOFT_LIGHT] = combine_soft_light_u_float; imp->combine_float[PIXMAN_OP_DIFFERENCE] = combine_difference_u_float; imp->combine_float[PIXMAN_OP_EXCLUSION] = combine_exclusion_u_float; imp->combine_float[PIXMAN_OP_HSL_HUE] = combine_hsl_hue_u_float; imp->combine_float[PIXMAN_OP_HSL_SATURATION] = combine_hsl_saturation_u_float; imp->combine_float[PIXMAN_OP_HSL_COLOR] = combine_hsl_color_u_float; imp->combine_float[PIXMAN_OP_HSL_LUMINOSITY] = combine_hsl_luminosity_u_float; /* Component alpha combiners */ imp->combine_float_ca[PIXMAN_OP_CLEAR] = combine_clear_ca_float; imp->combine_float_ca[PIXMAN_OP_SRC] = combine_src_ca_float; imp->combine_float_ca[PIXMAN_OP_DST] = combine_dst_ca_float; imp->combine_float_ca[PIXMAN_OP_OVER] = combine_over_ca_float; imp->combine_float_ca[PIXMAN_OP_OVER_REVERSE] = combine_over_reverse_ca_float; imp->combine_float_ca[PIXMAN_OP_IN] = combine_in_ca_float; imp->combine_float_ca[PIXMAN_OP_IN_REVERSE] = combine_in_reverse_ca_float; imp->combine_float_ca[PIXMAN_OP_OUT] = combine_out_ca_float; imp->combine_float_ca[PIXMAN_OP_OUT_REVERSE] = combine_out_reverse_ca_float; imp->combine_float_ca[PIXMAN_OP_ATOP] = combine_atop_ca_float; imp->combine_float_ca[PIXMAN_OP_ATOP_REVERSE] = combine_atop_reverse_ca_float; imp->combine_float_ca[PIXMAN_OP_XOR] = combine_xor_ca_float; imp->combine_float_ca[PIXMAN_OP_ADD] = combine_add_ca_float; imp->combine_float_ca[PIXMAN_OP_SATURATE] = combine_saturate_ca_float; /* Disjoint CA */ imp->combine_float_ca[PIXMAN_OP_DISJOINT_CLEAR] = combine_disjoint_clear_ca_float; imp->combine_float_ca[PIXMAN_OP_DISJOINT_SRC] = combine_disjoint_src_ca_float; imp->combine_float_ca[PIXMAN_OP_DISJOINT_DST] = combine_disjoint_dst_ca_float; imp->combine_float_ca[PIXMAN_OP_DISJOINT_OVER] = combine_disjoint_over_ca_float; imp->combine_float_ca[PIXMAN_OP_DISJOINT_OVER_REVERSE] = combine_disjoint_over_reverse_ca_float; imp->combine_float_ca[PIXMAN_OP_DISJOINT_IN] = combine_disjoint_in_ca_float; imp->combine_float_ca[PIXMAN_OP_DISJOINT_IN_REVERSE] = combine_disjoint_in_reverse_ca_float; imp->combine_float_ca[PIXMAN_OP_DISJOINT_OUT] = combine_disjoint_out_ca_float; imp->combine_float_ca[PIXMAN_OP_DISJOINT_OUT_REVERSE] = combine_disjoint_out_reverse_ca_float; imp->combine_float_ca[PIXMAN_OP_DISJOINT_ATOP] = combine_disjoint_atop_ca_float; imp->combine_float_ca[PIXMAN_OP_DISJOINT_ATOP_REVERSE] = combine_disjoint_atop_reverse_ca_float; imp->combine_float_ca[PIXMAN_OP_DISJOINT_XOR] = combine_disjoint_xor_ca_float; /* Conjoint CA */ imp->combine_float_ca[PIXMAN_OP_CONJOINT_CLEAR] = combine_conjoint_clear_ca_float; imp->combine_float_ca[PIXMAN_OP_CONJOINT_SRC] = combine_conjoint_src_ca_float; imp->combine_float_ca[PIXMAN_OP_CONJOINT_DST] = combine_conjoint_dst_ca_float; imp->combine_float_ca[PIXMAN_OP_CONJOINT_OVER] = combine_conjoint_over_ca_float; imp->combine_float_ca[PIXMAN_OP_CONJOINT_OVER_REVERSE] = combine_conjoint_over_reverse_ca_float; imp->combine_float_ca[PIXMAN_OP_CONJOINT_IN] = combine_conjoint_in_ca_float; imp->combine_float_ca[PIXMAN_OP_CONJOINT_IN_REVERSE] = combine_conjoint_in_reverse_ca_float; imp->combine_float_ca[PIXMAN_OP_CONJOINT_OUT] = combine_conjoint_out_ca_float; imp->combine_float_ca[PIXMAN_OP_CONJOINT_OUT_REVERSE] = combine_conjoint_out_reverse_ca_float; imp->combine_float_ca[PIXMAN_OP_CONJOINT_ATOP] = combine_conjoint_atop_ca_float; imp->combine_float_ca[PIXMAN_OP_CONJOINT_ATOP_REVERSE] = combine_conjoint_atop_reverse_ca_float; imp->combine_float_ca[PIXMAN_OP_CONJOINT_XOR] = combine_conjoint_xor_ca_float; /* PDF operators CA */ imp->combine_float_ca[PIXMAN_OP_MULTIPLY] = combine_multiply_ca_float; imp->combine_float_ca[PIXMAN_OP_SCREEN] = combine_screen_ca_float; imp->combine_float_ca[PIXMAN_OP_OVERLAY] = combine_overlay_ca_float; imp->combine_float_ca[PIXMAN_OP_DARKEN] = combine_darken_ca_float; imp->combine_float_ca[PIXMAN_OP_LIGHTEN] = combine_lighten_ca_float; imp->combine_float_ca[PIXMAN_OP_COLOR_DODGE] = combine_color_dodge_ca_float; imp->combine_float_ca[PIXMAN_OP_COLOR_BURN] = combine_color_burn_ca_float; imp->combine_float_ca[PIXMAN_OP_HARD_LIGHT] = combine_hard_light_ca_float; imp->combine_float_ca[PIXMAN_OP_SOFT_LIGHT] = combine_soft_light_ca_float; imp->combine_float_ca[PIXMAN_OP_DIFFERENCE] = combine_difference_ca_float; imp->combine_float_ca[PIXMAN_OP_EXCLUSION] = combine_exclusion_ca_float; /* It is not clear that these make sense, so make them noops for now */ imp->combine_float_ca[PIXMAN_OP_HSL_HUE] = combine_dst_u_float; imp->combine_float_ca[PIXMAN_OP_HSL_SATURATION] = combine_dst_u_float; imp->combine_float_ca[PIXMAN_OP_HSL_COLOR] = combine_dst_u_float; imp->combine_float_ca[PIXMAN_OP_HSL_LUMINOSITY] = combine_dst_u_float; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-combine-float.h0000664000175000017500000000345014712446423020576 0ustar00mattst88mattst88/* -*- Mode: c; c-basic-offset: 4; tab-width: 8; indent-tabs-mode: t; -*- */ /* * Copyright Âİ 2010, 2012 Soren Sandmann Pedersen * Copyright Âİ 2010, 2012 Red Hat, Inc. * Copyright Âİ 2024 Filip Wasil, Samsung Electronics * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Author: Soren Sandmann Pedersen (sandmann@cs.au.dk) */ #ifndef __PIXMAN_COMBINE_FLOAT_H__ #define __PIXMAN_COMBINE_FLOAT_H__ /* * Porter/Duff operators */ typedef enum { ZERO, ONE, SRC_ALPHA, DEST_ALPHA, INV_SA, INV_DA, SA_OVER_DA, DA_OVER_SA, INV_SA_OVER_DA, INV_DA_OVER_SA, ONE_MINUS_SA_OVER_DA, ONE_MINUS_DA_OVER_SA, ONE_MINUS_INV_DA_OVER_SA, ONE_MINUS_INV_SA_OVER_DA } combine_factor_t; #endif /*__PIXMAN_COMBINE_FLOAT_H__*/././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-combine32.c0000664000175000017500000007152514712446423017643 0ustar00mattst88mattst88/* * Copyright Âİ 2000 Keith Packard, member of The XFree86 Project, Inc. * 2005 Lars Knoll & Zack Rusin, Trolltech * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of Keith Packard not be used in * advertising or publicity pertaining to distribution of the software without * specific, written prior permission. Keith Packard makes no * representations about the suitability of this software for any purpose. It * is provided "as is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS * SOFTWARE. */ #ifdef HAVE_CONFIG_H #include #endif #include #include #include "pixman-private.h" #include "pixman-combine32.h" /* component alpha helper functions */ static void combine_mask_ca (uint32_t *src, uint32_t *mask) { uint32_t a = *mask; uint32_t x; uint16_t xa; if (!a) { *(src) = 0; return; } x = *(src); if (a == ~0) { x = x >> A_SHIFT; x |= x << G_SHIFT; x |= x << R_SHIFT; *(mask) = x; return; } xa = x >> A_SHIFT; UN8x4_MUL_UN8x4 (x, a); *(src) = x; UN8x4_MUL_UN8 (a, xa); *(mask) = a; } static void combine_mask_value_ca (uint32_t *src, const uint32_t *mask) { uint32_t a = *mask; uint32_t x; if (!a) { *(src) = 0; return; } if (a == ~0) return; x = *(src); UN8x4_MUL_UN8x4 (x, a); *(src) = x; } static void combine_mask_alpha_ca (const uint32_t *src, uint32_t *mask) { uint32_t a = *(mask); uint32_t x; if (!a) return; x = *(src) >> A_SHIFT; if (x == MASK) return; if (a == ~0) { x |= x << G_SHIFT; x |= x << R_SHIFT; *(mask) = x; return; } UN8x4_MUL_UN8 (a, x); *(mask) = a; } /* * There are two ways of handling alpha -- either as a single unified value or * a separate value for each component, hence each macro must have two * versions. The unified alpha version has a 'u' at the end of the name, * the component version has a 'ca'. Similarly, functions which deal with * this difference will have two versions using the same convention. */ static force_inline uint32_t combine_mask (const uint32_t *src, const uint32_t *mask, int i) { uint32_t s, m; if (mask) { m = *(mask + i) >> A_SHIFT; if (!m) return 0; } s = *(src + i); if (mask) UN8x4_MUL_UN8 (s, m); return s; } static void combine_clear (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { memset (dest, 0, width * sizeof (uint32_t)); } static void combine_dst (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { return; } static void combine_src_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { int i; if (!mask) { memcpy (dest, src, width * sizeof (uint32_t)); } else { for (i = 0; i < width; ++i) { uint32_t s = combine_mask (src, mask, i); *(dest + i) = s; } } } static void combine_over_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { int i; if (!mask) { for (i = 0; i < width; ++i) { uint32_t s = *(src + i); uint32_t a = ALPHA_8 (s); if (a == 0xFF) { *(dest + i) = s; } else if (s) { uint32_t d = *(dest + i); uint32_t ia = a ^ 0xFF; UN8x4_MUL_UN8_ADD_UN8x4 (d, ia, s); *(dest + i) = d; } } } else { for (i = 0; i < width; ++i) { uint32_t m = ALPHA_8 (*(mask + i)); if (m == 0xFF) { uint32_t s = *(src + i); uint32_t a = ALPHA_8 (s); if (a == 0xFF) { *(dest + i) = s; } else if (s) { uint32_t d = *(dest + i); uint32_t ia = a ^ 0xFF; UN8x4_MUL_UN8_ADD_UN8x4 (d, ia, s); *(dest + i) = d; } } else if (m) { uint32_t s = *(src + i); if (s) { uint32_t d = *(dest + i); UN8x4_MUL_UN8 (s, m); UN8x4_MUL_UN8_ADD_UN8x4 (d, ALPHA_8 (~s), s); *(dest + i) = d; } } } } } static void combine_over_reverse_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { int i; for (i = 0; i < width; ++i) { uint32_t s = combine_mask (src, mask, i); uint32_t d = *(dest + i); uint32_t ia = ALPHA_8 (~*(dest + i)); UN8x4_MUL_UN8_ADD_UN8x4 (s, ia, d); *(dest + i) = s; } } static void combine_in_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { int i; for (i = 0; i < width; ++i) { uint32_t s = combine_mask (src, mask, i); uint32_t a = ALPHA_8 (*(dest + i)); UN8x4_MUL_UN8 (s, a); *(dest + i) = s; } } static void combine_in_reverse_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { int i; for (i = 0; i < width; ++i) { uint32_t s = combine_mask (src, mask, i); uint32_t d = *(dest + i); uint32_t a = ALPHA_8 (s); UN8x4_MUL_UN8 (d, a); *(dest + i) = d; } } static void combine_out_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { int i; for (i = 0; i < width; ++i) { uint32_t s = combine_mask (src, mask, i); uint32_t a = ALPHA_8 (~*(dest + i)); UN8x4_MUL_UN8 (s, a); *(dest + i) = s; } } static void combine_out_reverse_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { int i; for (i = 0; i < width; ++i) { uint32_t s = combine_mask (src, mask, i); uint32_t d = *(dest + i); uint32_t a = ALPHA_8 (~s); UN8x4_MUL_UN8 (d, a); *(dest + i) = d; } } static void combine_atop_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { int i; for (i = 0; i < width; ++i) { uint32_t s = combine_mask (src, mask, i); uint32_t d = *(dest + i); uint32_t dest_a = ALPHA_8 (d); uint32_t src_ia = ALPHA_8 (~s); UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_a, d, src_ia); *(dest + i) = s; } } static void combine_atop_reverse_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { int i; for (i = 0; i < width; ++i) { uint32_t s = combine_mask (src, mask, i); uint32_t d = *(dest + i); uint32_t src_a = ALPHA_8 (s); uint32_t dest_ia = ALPHA_8 (~d); UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_ia, d, src_a); *(dest + i) = s; } } static void combine_xor_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { int i; for (i = 0; i < width; ++i) { uint32_t s = combine_mask (src, mask, i); uint32_t d = *(dest + i); uint32_t src_ia = ALPHA_8 (~s); uint32_t dest_ia = ALPHA_8 (~d); UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_ia, d, src_ia); *(dest + i) = s; } } static void combine_add_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { int i; for (i = 0; i < width; ++i) { uint32_t s = combine_mask (src, mask, i); uint32_t d = *(dest + i); UN8x4_ADD_UN8x4 (d, s); *(dest + i) = d; } } /* * PDF blend modes: * * The following blend modes have been taken from the PDF ISO 32000 * specification, which at this point in time is available from * * http://www.adobe.com/devnet/pdf/pdf_reference.html * * The specific documents of interest are the PDF spec itself: * * http://wwwimages.adobe.com/www.adobe.com/content/dam/Adobe/en/devnet/pdf/pdfs/PDF32000_2008.pdf * * chapters 11.3.5 and 11.3.6 and a later supplement for Adobe Acrobat * 9.1 and Reader 9.1: * * http://wwwimages.adobe.com/www.adobe.com/content/dam/Adobe/en/devnet/pdf/pdfs/adobe_supplement_iso32000_1.pdf * * that clarifies the specifications for blend modes ColorDodge and * ColorBurn. * * The formula for computing the final pixel color given in 11.3.6 is: * * Îħr — Cr = (1 – Îħs) — Îħb — Cb + (1 – Îħb) — Îħs — Cs + Îħb — Îħs — B(Cb, Cs) * * with B() is the blend function. When B(Cb, Cs) = Cs, this formula * reduces to the regular OVER operator. * * Cs and Cb are not premultiplied, so in our implementation we instead * use: * * cr = (1 – Îħs) — cb + (1 – Îħb) — cs + Îħb — Îħs — B (cb/Îħb, cs/Îħs) * * where cr, cs, and cb are premultiplied colors, and where the * * Îħb — Îħs — B(cb/Îħb, cs/Îħs) * * part is first arithmetically simplified under the assumption that Îħb * and Îħs are not 0, and then updated to produce a meaningful result when * they are. * * For all the blend mode operators, the alpha channel is given by * * Îħr = Îħs + Îħb + Îħb — Îħs */ /* * Multiply * * ad * as * B(d / ad, s / as) * = ad * as * d/ad * s/as * = d * s * */ static void combine_multiply_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { int i; for (i = 0; i < width; ++i) { uint32_t s = combine_mask (src, mask, i); uint32_t d = *(dest + i); uint32_t ss = s; uint32_t src_ia = ALPHA_8 (~s); uint32_t dest_ia = ALPHA_8 (~d); UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (ss, dest_ia, d, src_ia); UN8x4_MUL_UN8x4 (d, s); UN8x4_ADD_UN8x4 (d, ss); *(dest + i) = d; } } static void combine_multiply_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { int i; for (i = 0; i < width; ++i) { uint32_t m = *(mask + i); uint32_t s = *(src + i); uint32_t d = *(dest + i); uint32_t r = d; uint32_t dest_ia = ALPHA_8 (~d); combine_mask_ca (&s, &m); UN8x4_MUL_UN8x4_ADD_UN8x4_MUL_UN8 (r, ~m, s, dest_ia); UN8x4_MUL_UN8x4 (d, s); UN8x4_ADD_UN8x4 (r, d); *(dest + i) = r; } } #define CLAMP(v, low, high) \ do \ { \ if (v < (low)) \ v = (low); \ if (v > (high)) \ v = (high); \ } while (0) #define PDF_SEPARABLE_BLEND_MODE(name) \ static void \ combine_ ## name ## _u (pixman_implementation_t *imp, \ pixman_op_t op, \ uint32_t * dest, \ const uint32_t * src, \ const uint32_t * mask, \ int width) \ { \ int i; \ for (i = 0; i < width; ++i) \ { \ uint32_t s = combine_mask (src, mask, i); \ uint32_t d = *(dest + i); \ uint8_t sa = ALPHA_8 (s); \ uint8_t isa = ~sa; \ uint8_t da = ALPHA_8 (d); \ uint8_t ida = ~da; \ uint32_t ra, rr, rg, rb; \ \ ra = da * 0xff + sa * 0xff - sa * da; \ rr = isa * RED_8 (d) + ida * RED_8 (s); \ rg = isa * GREEN_8 (d) + ida * GREEN_8 (s); \ rb = isa * BLUE_8 (d) + ida * BLUE_8 (s); \ \ rr += blend_ ## name (RED_8 (d), da, RED_8 (s), sa); \ rg += blend_ ## name (GREEN_8 (d), da, GREEN_8 (s), sa); \ rb += blend_ ## name (BLUE_8 (d), da, BLUE_8 (s), sa); \ \ CLAMP (ra, 0, 255 * 255); \ CLAMP (rr, 0, 255 * 255); \ CLAMP (rg, 0, 255 * 255); \ CLAMP (rb, 0, 255 * 255); \ \ ra = DIV_ONE_UN8 (ra); \ rr = DIV_ONE_UN8 (rr); \ rg = DIV_ONE_UN8 (rg); \ rb = DIV_ONE_UN8 (rb); \ \ *(dest + i) = ra << 24 | rr << 16 | rg << 8 | rb; \ } \ } \ \ static void \ combine_ ## name ## _ca (pixman_implementation_t *imp, \ pixman_op_t op, \ uint32_t * dest, \ const uint32_t * src, \ const uint32_t * mask, \ int width) \ { \ int i; \ for (i = 0; i < width; ++i) \ { \ uint32_t m = *(mask + i); \ uint32_t s = *(src + i); \ uint32_t d = *(dest + i); \ uint8_t da = ALPHA_8 (d); \ uint8_t ida = ~da; \ uint32_t ra, rr, rg, rb; \ uint8_t ira, iga, iba; \ \ combine_mask_ca (&s, &m); \ \ ira = ~RED_8 (m); \ iga = ~GREEN_8 (m); \ iba = ~BLUE_8 (m); \ \ ra = da * 0xff + ALPHA_8 (s) * 0xff - ALPHA_8 (s) * da; \ rr = ira * RED_8 (d) + ida * RED_8 (s); \ rg = iga * GREEN_8 (d) + ida * GREEN_8 (s); \ rb = iba * BLUE_8 (d) + ida * BLUE_8 (s); \ \ rr += blend_ ## name (RED_8 (d), da, RED_8 (s), RED_8 (m)); \ rg += blend_ ## name (GREEN_8 (d), da, GREEN_8 (s), GREEN_8 (m)); \ rb += blend_ ## name (BLUE_8 (d), da, BLUE_8 (s), BLUE_8 (m)); \ \ CLAMP (ra, 0, 255 * 255); \ CLAMP (rr, 0, 255 * 255); \ CLAMP (rg, 0, 255 * 255); \ CLAMP (rb, 0, 255 * 255); \ \ ra = DIV_ONE_UN8 (ra); \ rr = DIV_ONE_UN8 (rr); \ rg = DIV_ONE_UN8 (rg); \ rb = DIV_ONE_UN8 (rb); \ \ *(dest + i) = ra << 24 | rr << 16 | rg << 8 | rb; \ } \ } /* * Screen * * ad * as * B(d/ad, s/as) * = ad * as * (d/ad + s/as - s/as * d/ad) * = ad * s + as * d - s * d */ static inline int32_t blend_screen (int32_t d, int32_t ad, int32_t s, int32_t as) { return s * ad + d * as - s * d; } PDF_SEPARABLE_BLEND_MODE (screen) /* * Overlay * * ad * as * B(d/ad, s/as) * = ad * as * Hardlight (s, d) * = if (d / ad < 0.5) * as * ad * Multiply (s/as, 2 * d/ad) * else * as * ad * Screen (s/as, 2 * d / ad - 1) * = if (d < 0.5 * ad) * as * ad * s/as * 2 * d /ad * else * as * ad * (s/as + 2 * d / ad - 1 - s / as * (2 * d / ad - 1)) * = if (2 * d < ad) * 2 * s * d * else * ad * s + 2 * as * d - as * ad - ad * s * (2 * d / ad - 1) * = if (2 * d < ad) * 2 * s * d * else * as * ad - 2 * (ad - d) * (as - s) */ static inline int32_t blend_overlay (int32_t d, int32_t ad, int32_t s, int32_t as) { uint32_t r; if (2 * d < ad) r = 2 * s * d; else r = as * ad - 2 * (ad - d) * (as - s); return r; } PDF_SEPARABLE_BLEND_MODE (overlay) /* * Darken * * ad * as * B(d/ad, s/as) * = ad * as * MIN(d/ad, s/as) * = MIN (as * d, ad * s) */ static inline int32_t blend_darken (int32_t d, int32_t ad, int32_t s, int32_t as) { s = ad * s; d = as * d; return s > d ? d : s; } PDF_SEPARABLE_BLEND_MODE (darken) /* * Lighten * * ad * as * B(d/ad, s/as) * = ad * as * MAX(d/ad, s/as) * = MAX (as * d, ad * s) */ static inline int32_t blend_lighten (int32_t d, int32_t ad, int32_t s, int32_t as) { s = ad * s; d = as * d; return s > d ? s : d; } PDF_SEPARABLE_BLEND_MODE (lighten) /* * Hard light * * ad * as * B(d/ad, s/as) * = if (s/as <= 0.5) * ad * as * Multiply (d/ad, 2 * s/as) * else * ad * as * Screen (d/ad, 2 * s/as - 1) * = if 2 * s <= as * ad * as * d/ad * 2 * s / as * else * ad * as * (d/ad + (2 * s/as - 1) + d/ad * (2 * s/as - 1)) * = if 2 * s <= as * 2 * s * d * else * as * ad - 2 * (ad - d) * (as - s) */ static inline int32_t blend_hard_light (int32_t d, int32_t ad, int32_t s, int32_t as) { if (2 * s < as) return 2 * s * d; else return as * ad - 2 * (ad - d) * (as - s); } PDF_SEPARABLE_BLEND_MODE (hard_light) /* * Difference * * ad * as * B(s/as, d/ad) * = ad * as * abs (s/as - d/ad) * = if (s/as <= d/ad) * ad * as * (d/ad - s/as) * else * ad * as * (s/as - d/ad) * = if (ad * s <= as * d) * as * d - ad * s * else * ad * s - as * d */ static inline int32_t blend_difference (int32_t d, int32_t ad, int32_t s, int32_t as) { int32_t das = d * as; int32_t sad = s * ad; if (sad < das) return das - sad; else return sad - das; } PDF_SEPARABLE_BLEND_MODE (difference) /* * Exclusion * * ad * as * B(s/as, d/ad) * = ad * as * (d/ad + s/as - 2 * d/ad * s/as) * = as * d + ad * s - 2 * s * d */ /* This can be made faster by writing it directly and not using * PDF_SEPARABLE_BLEND_MODE, but that's a performance optimization */ static inline int32_t blend_exclusion (int32_t d, int32_t ad, int32_t s, int32_t as) { return s * ad + d * as - 2 * d * s; } PDF_SEPARABLE_BLEND_MODE (exclusion) #undef PDF_SEPARABLE_BLEND_MODE /* Component alpha combiners */ static void combine_clear_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { memset (dest, 0, width * sizeof(uint32_t)); } static void combine_src_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { int i; for (i = 0; i < width; ++i) { uint32_t s = *(src + i); uint32_t m = *(mask + i); combine_mask_value_ca (&s, &m); *(dest + i) = s; } } static void combine_over_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { int i; for (i = 0; i < width; ++i) { uint32_t s = *(src + i); uint32_t m = *(mask + i); uint32_t a; combine_mask_ca (&s, &m); a = ~m; if (a) { uint32_t d = *(dest + i); UN8x4_MUL_UN8x4_ADD_UN8x4 (d, a, s); s = d; } *(dest + i) = s; } } static void combine_over_reverse_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { int i; for (i = 0; i < width; ++i) { uint32_t d = *(dest + i); uint32_t a = ~d >> A_SHIFT; if (a) { uint32_t s = *(src + i); uint32_t m = *(mask + i); UN8x4_MUL_UN8x4 (s, m); UN8x4_MUL_UN8_ADD_UN8x4 (s, a, d); *(dest + i) = s; } } } static void combine_in_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { int i; for (i = 0; i < width; ++i) { uint32_t d = *(dest + i); uint16_t a = d >> A_SHIFT; uint32_t s = 0; if (a) { uint32_t m = *(mask + i); s = *(src + i); combine_mask_value_ca (&s, &m); if (a != MASK) UN8x4_MUL_UN8 (s, a); } *(dest + i) = s; } } static void combine_in_reverse_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { int i; for (i = 0; i < width; ++i) { uint32_t s = *(src + i); uint32_t m = *(mask + i); uint32_t a; combine_mask_alpha_ca (&s, &m); a = m; if (a != ~0) { uint32_t d = 0; if (a) { d = *(dest + i); UN8x4_MUL_UN8x4 (d, a); } *(dest + i) = d; } } } static void combine_out_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { int i; for (i = 0; i < width; ++i) { uint32_t d = *(dest + i); uint16_t a = ~d >> A_SHIFT; uint32_t s = 0; if (a) { uint32_t m = *(mask + i); s = *(src + i); combine_mask_value_ca (&s, &m); if (a != MASK) UN8x4_MUL_UN8 (s, a); } *(dest + i) = s; } } static void combine_out_reverse_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { int i; for (i = 0; i < width; ++i) { uint32_t s = *(src + i); uint32_t m = *(mask + i); uint32_t a; combine_mask_alpha_ca (&s, &m); a = ~m; if (a != ~0) { uint32_t d = 0; if (a) { d = *(dest + i); UN8x4_MUL_UN8x4 (d, a); } *(dest + i) = d; } } } static void combine_atop_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { int i; for (i = 0; i < width; ++i) { uint32_t d = *(dest + i); uint32_t s = *(src + i); uint32_t m = *(mask + i); uint32_t ad; uint16_t as = d >> A_SHIFT; combine_mask_ca (&s, &m); ad = ~m; UN8x4_MUL_UN8x4_ADD_UN8x4_MUL_UN8 (d, ad, s, as); *(dest + i) = d; } } static void combine_atop_reverse_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { int i; for (i = 0; i < width; ++i) { uint32_t d = *(dest + i); uint32_t s = *(src + i); uint32_t m = *(mask + i); uint32_t ad; uint16_t as = ~d >> A_SHIFT; combine_mask_ca (&s, &m); ad = m; UN8x4_MUL_UN8x4_ADD_UN8x4_MUL_UN8 (d, ad, s, as); *(dest + i) = d; } } static void combine_xor_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { int i; for (i = 0; i < width; ++i) { uint32_t d = *(dest + i); uint32_t s = *(src + i); uint32_t m = *(mask + i); uint32_t ad; uint16_t as = ~d >> A_SHIFT; combine_mask_ca (&s, &m); ad = ~m; UN8x4_MUL_UN8x4_ADD_UN8x4_MUL_UN8 (d, ad, s, as); *(dest + i) = d; } } static void combine_add_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { int i; for (i = 0; i < width; ++i) { uint32_t s = *(src + i); uint32_t m = *(mask + i); uint32_t d = *(dest + i); combine_mask_value_ca (&s, &m); UN8x4_ADD_UN8x4 (d, s); *(dest + i) = d; } } void _pixman_setup_combiner_functions_32 (pixman_implementation_t *imp) { /* Unified alpha */ imp->combine_32[PIXMAN_OP_CLEAR] = combine_clear; imp->combine_32[PIXMAN_OP_SRC] = combine_src_u; imp->combine_32[PIXMAN_OP_DST] = combine_dst; imp->combine_32[PIXMAN_OP_OVER] = combine_over_u; imp->combine_32[PIXMAN_OP_OVER_REVERSE] = combine_over_reverse_u; imp->combine_32[PIXMAN_OP_IN] = combine_in_u; imp->combine_32[PIXMAN_OP_IN_REVERSE] = combine_in_reverse_u; imp->combine_32[PIXMAN_OP_OUT] = combine_out_u; imp->combine_32[PIXMAN_OP_OUT_REVERSE] = combine_out_reverse_u; imp->combine_32[PIXMAN_OP_ATOP] = combine_atop_u; imp->combine_32[PIXMAN_OP_ATOP_REVERSE] = combine_atop_reverse_u; imp->combine_32[PIXMAN_OP_XOR] = combine_xor_u; imp->combine_32[PIXMAN_OP_ADD] = combine_add_u; imp->combine_32[PIXMAN_OP_MULTIPLY] = combine_multiply_u; imp->combine_32[PIXMAN_OP_SCREEN] = combine_screen_u; imp->combine_32[PIXMAN_OP_OVERLAY] = combine_overlay_u; imp->combine_32[PIXMAN_OP_DARKEN] = combine_darken_u; imp->combine_32[PIXMAN_OP_LIGHTEN] = combine_lighten_u; imp->combine_32[PIXMAN_OP_HARD_LIGHT] = combine_hard_light_u; imp->combine_32[PIXMAN_OP_DIFFERENCE] = combine_difference_u; imp->combine_32[PIXMAN_OP_EXCLUSION] = combine_exclusion_u; /* Component alpha combiners */ imp->combine_32_ca[PIXMAN_OP_CLEAR] = combine_clear_ca; imp->combine_32_ca[PIXMAN_OP_SRC] = combine_src_ca; /* dest */ imp->combine_32_ca[PIXMAN_OP_OVER] = combine_over_ca; imp->combine_32_ca[PIXMAN_OP_OVER_REVERSE] = combine_over_reverse_ca; imp->combine_32_ca[PIXMAN_OP_IN] = combine_in_ca; imp->combine_32_ca[PIXMAN_OP_IN_REVERSE] = combine_in_reverse_ca; imp->combine_32_ca[PIXMAN_OP_OUT] = combine_out_ca; imp->combine_32_ca[PIXMAN_OP_OUT_REVERSE] = combine_out_reverse_ca; imp->combine_32_ca[PIXMAN_OP_ATOP] = combine_atop_ca; imp->combine_32_ca[PIXMAN_OP_ATOP_REVERSE] = combine_atop_reverse_ca; imp->combine_32_ca[PIXMAN_OP_XOR] = combine_xor_ca; imp->combine_32_ca[PIXMAN_OP_ADD] = combine_add_ca; imp->combine_32_ca[PIXMAN_OP_MULTIPLY] = combine_multiply_ca; imp->combine_32_ca[PIXMAN_OP_SCREEN] = combine_screen_ca; imp->combine_32_ca[PIXMAN_OP_OVERLAY] = combine_overlay_ca; imp->combine_32_ca[PIXMAN_OP_DARKEN] = combine_darken_ca; imp->combine_32_ca[PIXMAN_OP_LIGHTEN] = combine_lighten_ca; imp->combine_32_ca[PIXMAN_OP_HARD_LIGHT] = combine_hard_light_ca; imp->combine_32_ca[PIXMAN_OP_DIFFERENCE] = combine_difference_ca; imp->combine_32_ca[PIXMAN_OP_EXCLUSION] = combine_exclusion_ca; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-combine32.h0000664000175000017500000001565514712446423017652 0ustar00mattst88mattst88#define COMPONENT_SIZE 8 #define MASK 0xff #define ONE_HALF 0x80 #define A_SHIFT 8 * 3 #define R_SHIFT 8 * 2 #define G_SHIFT 8 #define A_MASK 0xff000000 #define R_MASK 0xff0000 #define G_MASK 0xff00 #define RB_MASK 0xff00ff #define AG_MASK 0xff00ff00 #define RB_ONE_HALF 0x800080 #define RB_MASK_PLUS_ONE 0x1000100 #define ALPHA_8(x) ((x) >> A_SHIFT) #define RED_8(x) (((x) >> R_SHIFT) & MASK) #define GREEN_8(x) (((x) >> G_SHIFT) & MASK) #define BLUE_8(x) ((x) & MASK) /* * ARMv6 has UQADD8 instruction, which implements unsigned saturated * addition for 8-bit values packed in 32-bit registers. It is very useful * for UN8x4_ADD_UN8x4, UN8_rb_ADD_UN8_rb and ADD_UN8 macros (which would * otherwise need a lot of arithmetic operations to simulate this operation). * Since most of the major ARM linux distros are built for ARMv7, we are * much less dependent on runtime CPU detection and can get practical * benefits from conditional compilation here for a lot of users. */ #if defined(USE_GCC_INLINE_ASM) && defined(__arm__) && \ !defined(__aarch64__) && (!defined(__thumb__) || defined(__thumb2__)) #if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \ defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || \ defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) || \ defined(__ARM_ARCH_6M__) || defined(__ARM_ARCH_7__) || \ defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || \ defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7EM__) static force_inline uint32_t un8x4_add_un8x4 (uint32_t x, uint32_t y) { uint32_t t; asm ("uqadd8 %0, %1, %2" : "=r" (t) : "%r" (x), "r" (y)); return t; } #define UN8x4_ADD_UN8x4(x, y) \ ((x) = un8x4_add_un8x4 ((x), (y))) #define UN8_rb_ADD_UN8_rb(x, y, t) \ ((t) = un8x4_add_un8x4 ((x), (y)), (x) = (t)) #define ADD_UN8(x, y, t) \ ((t) = (x), un8x4_add_un8x4 ((t), (y))) #endif #endif /*****************************************************************************/ /* * Helper macros. */ #define MUL_UN8(a, b, t) \ ((t) = (a) * (uint16_t)(b) + ONE_HALF, ((((t) >> G_SHIFT ) + (t) ) >> G_SHIFT )) #define DIV_UN8(a, b) \ (((uint16_t) (a) * MASK + ((b) / 2)) / (b)) #ifndef ADD_UN8 #define ADD_UN8(x, y, t) \ ((t) = (x) + (y), \ (uint32_t) (uint8_t) ((t) | (0 - ((t) >> G_SHIFT)))) #endif #define DIV_ONE_UN8(x) \ (((x) + ONE_HALF + (((x) + ONE_HALF) >> G_SHIFT)) >> G_SHIFT) /* * The methods below use some tricks to be able to do two color * components at the same time. */ /* * x_rb = (x_rb * a) / 255 */ #define UN8_rb_MUL_UN8(x, a, t) \ do \ { \ t = ((x) & RB_MASK) * (a); \ t += RB_ONE_HALF; \ x = (t + ((t >> G_SHIFT) & RB_MASK)) >> G_SHIFT; \ x &= RB_MASK; \ } while (0) /* * x_rb = min (x_rb + y_rb, 255) */ #ifndef UN8_rb_ADD_UN8_rb #define UN8_rb_ADD_UN8_rb(x, y, t) \ do \ { \ t = ((x) + (y)); \ t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK); \ x = (t & RB_MASK); \ } while (0) #endif /* * x_rb = (x_rb * a_rb) / 255 */ #define UN8_rb_MUL_UN8_rb(x, a, t) \ do \ { \ t = (x & MASK) * (a & MASK); \ t |= (x & R_MASK) * ((a >> R_SHIFT) & MASK); \ t += RB_ONE_HALF; \ t = (t + ((t >> G_SHIFT) & RB_MASK)) >> G_SHIFT; \ x = t & RB_MASK; \ } while (0) /* * x_c = (x_c * a) / 255 */ #define UN8x4_MUL_UN8(x, a) \ do \ { \ uint32_t r1__, r2__, t__; \ \ r1__ = (x); \ UN8_rb_MUL_UN8 (r1__, (a), t__); \ \ r2__ = (x) >> G_SHIFT; \ UN8_rb_MUL_UN8 (r2__, (a), t__); \ \ (x) = r1__ | (r2__ << G_SHIFT); \ } while (0) /* * x_c = (x_c * a) / 255 + y_c */ #define UN8x4_MUL_UN8_ADD_UN8x4(x, a, y) \ do \ { \ uint32_t r1__, r2__, r3__, t__; \ \ r1__ = (x); \ r2__ = (y) & RB_MASK; \ UN8_rb_MUL_UN8 (r1__, (a), t__); \ UN8_rb_ADD_UN8_rb (r1__, r2__, t__); \ \ r2__ = (x) >> G_SHIFT; \ r3__ = ((y) >> G_SHIFT) & RB_MASK; \ UN8_rb_MUL_UN8 (r2__, (a), t__); \ UN8_rb_ADD_UN8_rb (r2__, r3__, t__); \ \ (x) = r1__ | (r2__ << G_SHIFT); \ } while (0) /* * x_c = (x_c * a + y_c * b) / 255 */ #define UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8(x, a, y, b) \ do \ { \ uint32_t r1__, r2__, r3__, t__; \ \ r1__ = (x); \ r2__ = (y); \ UN8_rb_MUL_UN8 (r1__, (a), t__); \ UN8_rb_MUL_UN8 (r2__, (b), t__); \ UN8_rb_ADD_UN8_rb (r1__, r2__, t__); \ \ r2__ = ((x) >> G_SHIFT); \ r3__ = ((y) >> G_SHIFT); \ UN8_rb_MUL_UN8 (r2__, (a), t__); \ UN8_rb_MUL_UN8 (r3__, (b), t__); \ UN8_rb_ADD_UN8_rb (r2__, r3__, t__); \ \ (x) = r1__ | (r2__ << G_SHIFT); \ } while (0) /* * x_c = (x_c * a_c) / 255 */ #define UN8x4_MUL_UN8x4(x, a) \ do \ { \ uint32_t r1__, r2__, r3__, t__; \ \ r1__ = (x); \ r2__ = (a); \ UN8_rb_MUL_UN8_rb (r1__, r2__, t__); \ \ r2__ = (x) >> G_SHIFT; \ r3__ = (a) >> G_SHIFT; \ UN8_rb_MUL_UN8_rb (r2__, r3__, t__); \ \ (x) = r1__ | (r2__ << G_SHIFT); \ } while (0) /* * x_c = (x_c * a_c) / 255 + y_c */ #define UN8x4_MUL_UN8x4_ADD_UN8x4(x, a, y) \ do \ { \ uint32_t r1__, r2__, r3__, t__; \ \ r1__ = (x); \ r2__ = (a); \ UN8_rb_MUL_UN8_rb (r1__, r2__, t__); \ r2__ = (y) & RB_MASK; \ UN8_rb_ADD_UN8_rb (r1__, r2__, t__); \ \ r2__ = ((x) >> G_SHIFT); \ r3__ = ((a) >> G_SHIFT); \ UN8_rb_MUL_UN8_rb (r2__, r3__, t__); \ r3__ = ((y) >> G_SHIFT) & RB_MASK; \ UN8_rb_ADD_UN8_rb (r2__, r3__, t__); \ \ (x) = r1__ | (r2__ << G_SHIFT); \ } while (0) /* * x_c = (x_c * a_c + y_c * b) / 255 */ #define UN8x4_MUL_UN8x4_ADD_UN8x4_MUL_UN8(x, a, y, b) \ do \ { \ uint32_t r1__, r2__, r3__, t__; \ \ r1__ = (x); \ r2__ = (a); \ UN8_rb_MUL_UN8_rb (r1__, r2__, t__); \ r2__ = (y); \ UN8_rb_MUL_UN8 (r2__, (b), t__); \ UN8_rb_ADD_UN8_rb (r1__, r2__, t__); \ \ r2__ = (x) >> G_SHIFT; \ r3__ = (a) >> G_SHIFT; \ UN8_rb_MUL_UN8_rb (r2__, r3__, t__); \ r3__ = (y) >> G_SHIFT; \ UN8_rb_MUL_UN8 (r3__, (b), t__); \ UN8_rb_ADD_UN8_rb (r2__, r3__, t__); \ \ x = r1__ | (r2__ << G_SHIFT); \ } while (0) /* x_c = min(x_c + y_c, 255) */ #ifndef UN8x4_ADD_UN8x4 #define UN8x4_ADD_UN8x4(x, y) \ do \ { \ uint32_t r1__, r2__, r3__, t__; \ \ r1__ = (x) & RB_MASK; \ r2__ = (y) & RB_MASK; \ UN8_rb_ADD_UN8_rb (r1__, r2__, t__); \ \ r2__ = ((x) >> G_SHIFT) & RB_MASK; \ r3__ = ((y) >> G_SHIFT) & RB_MASK; \ UN8_rb_ADD_UN8_rb (r2__, r3__, t__); \ \ x = r1__ | (r2__ << G_SHIFT); \ } while (0) #endif ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-compiler.h0000664000175000017500000001433714712446423017677 0ustar00mattst88mattst88/* Pixman uses some non-standard compiler features. This file ensures * they exist * * The features are: * * FUNC must be defined to expand to the current function * PIXMAN_EXPORT should be defined to whatever is required to * export functions from a shared library * limits limits for various types must be defined * inline must be defined * force_inline must be defined */ #if defined (__GNUC__) # define FUNC ((const char*) (__PRETTY_FUNCTION__)) #elif defined (__sun) || (defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) # define FUNC ((const char*) (__func__)) #else # define FUNC ((const char*) ("???")) #endif #if defined (__GNUC__) # define unlikely(expr) __builtin_expect ((expr), 0) #else # define unlikely(expr) (expr) #endif #if defined (__GNUC__) # define MAYBE_UNUSED __attribute__((unused)) #else # define MAYBE_UNUSED #endif #ifndef INT16_MIN # define INT16_MIN (-32767-1) #endif #ifndef INT16_MAX # define INT16_MAX (32767) #endif #ifndef INT32_MIN # define INT32_MIN (-2147483647-1) #endif #ifndef INT32_MAX # define INT32_MAX (2147483647) #endif #ifndef UINT32_MIN # define UINT32_MIN (0) #endif #ifndef UINT32_MAX # define UINT32_MAX (4294967295U) #endif #ifndef INT64_MIN # define INT64_MIN (-9223372036854775807-1) #endif #ifndef INT64_MAX # define INT64_MAX (9223372036854775807) #endif #ifndef SIZE_MAX # define SIZE_MAX ((size_t)-1) #endif #ifndef M_PI # define M_PI 3.14159265358979323846 #endif #ifdef _MSC_VER /* 'inline' is available only in C++ in MSVC */ # define inline __inline # define force_inline __forceinline # define noinline __declspec(noinline) #elif defined __GNUC__ || (defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590)) # define inline __inline__ # define force_inline __inline__ __attribute__ ((__always_inline__)) # define noinline __attribute__((noinline)) #else # ifndef force_inline # define force_inline inline # endif # ifndef noinline # define noinline # endif #endif /* GCC visibility */ #if defined(__GNUC__) && __GNUC__ >= 4 && !defined(_WIN32) # define PIXMAN_EXPORT __attribute__ ((visibility("default"))) /* Sun Studio 8 visibility */ #elif defined(__SUNPRO_C) && (__SUNPRO_C >= 0x550) # define PIXMAN_EXPORT __global #elif defined (_MSC_VER) || defined(__MINGW32__) # define PIXMAN_EXPORT PIXMAN_API #else # define PIXMAN_EXPORT #endif /* member offsets */ #define CONTAINER_OF(type, member, data) \ ((type *)(((uint8_t *)data) - offsetof (type, member))) /* TLS */ #if defined(PIXMAN_NO_TLS) # define PIXMAN_DEFINE_THREAD_LOCAL(type, name) \ static type name; # define PIXMAN_GET_THREAD_LOCAL(name) \ (&name) #elif defined(TLS) # define PIXMAN_DEFINE_THREAD_LOCAL(type, name) \ static TLS type name; # define PIXMAN_GET_THREAD_LOCAL(name) \ (&name) #elif defined(__MINGW32__) # define _NO_W32_PSEUDO_MODIFIERS # include # define PIXMAN_DEFINE_THREAD_LOCAL(type, name) \ static volatile int tls_ ## name ## _initialized = 0; \ static void *tls_ ## name ## _mutex = NULL; \ static unsigned tls_ ## name ## _index; \ \ static type * \ tls_ ## name ## _alloc (void) \ { \ type *value = calloc (1, sizeof (type)); \ if (value) \ TlsSetValue (tls_ ## name ## _index, value); \ return value; \ } \ \ static force_inline type * \ tls_ ## name ## _get (void) \ { \ type *value; \ if (!tls_ ## name ## _initialized) \ { \ if (!tls_ ## name ## _mutex) \ { \ void *mutex = CreateMutexA (NULL, 0, NULL); \ if (InterlockedCompareExchangePointer ( \ &tls_ ## name ## _mutex, mutex, NULL) != NULL) \ { \ CloseHandle (mutex); \ } \ } \ WaitForSingleObject (tls_ ## name ## _mutex, 0xFFFFFFFF); \ if (!tls_ ## name ## _initialized) \ { \ tls_ ## name ## _index = TlsAlloc (); \ tls_ ## name ## _initialized = 1; \ } \ ReleaseMutex (tls_ ## name ## _mutex); \ } \ if (tls_ ## name ## _index == 0xFFFFFFFF) \ return NULL; \ value = TlsGetValue (tls_ ## name ## _index); \ if (!value) \ value = tls_ ## name ## _alloc (); \ return value; \ } # define PIXMAN_GET_THREAD_LOCAL(name) \ tls_ ## name ## _get () #elif defined(_MSC_VER) # define PIXMAN_DEFINE_THREAD_LOCAL(type, name) \ static __declspec(thread) type name; # define PIXMAN_GET_THREAD_LOCAL(name) \ (&name) #elif defined(HAVE_PTHREADS) #include # define PIXMAN_DEFINE_THREAD_LOCAL(type, name) \ static pthread_once_t tls_ ## name ## _once_control = PTHREAD_ONCE_INIT; \ static pthread_key_t tls_ ## name ## _key; \ \ static void \ tls_ ## name ## _destroy_value (void *value) \ { \ free (value); \ } \ \ static void \ tls_ ## name ## _make_key (void) \ { \ pthread_key_create (&tls_ ## name ## _key, \ tls_ ## name ## _destroy_value); \ } \ \ static type * \ tls_ ## name ## _alloc (void) \ { \ type *value = calloc (1, sizeof (type)); \ if (value) \ pthread_setspecific (tls_ ## name ## _key, value); \ return value; \ } \ \ static force_inline type * \ tls_ ## name ## _get (void) \ { \ type *value = NULL; \ if (pthread_once (&tls_ ## name ## _once_control, \ tls_ ## name ## _make_key) == 0) \ { \ value = pthread_getspecific (tls_ ## name ## _key); \ if (!value) \ value = tls_ ## name ## _alloc (); \ } \ return value; \ } # define PIXMAN_GET_THREAD_LOCAL(name) \ tls_ ## name ## _get () #else # error "Unknown thread local support for this system. Pixman will not work with multiple threads. Define PIXMAN_NO_TLS to acknowledge and accept this limitation and compile pixman without thread-safety support." #endif ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-conical-gradient.c0000664000175000017500000001303614712446423021256 0ustar00mattst88mattst88/* * Copyright Âİ 2000 SuSE, Inc. * Copyright Âİ 2007 Red Hat, Inc. * Copyright Âİ 2000 Keith Packard, member of The XFree86 Project, Inc. * 2005 Lars Knoll & Zack Rusin, Trolltech * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of Keith Packard not be used in * advertising or publicity pertaining to distribution of the software without * specific, written prior permission. Keith Packard makes no * representations about the suitability of this software for any purpose. It * is provided "as is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS * SOFTWARE. */ #ifdef HAVE_CONFIG_H #include #endif #include #include #include "pixman-private.h" static force_inline double coordinates_to_parameter (double x, double y, double angle) { double t; t = atan2 (y, x) + angle; while (t < 0) t += 2 * M_PI; while (t >= 2 * M_PI) t -= 2 * M_PI; return 1 - t * (1 / (2 * M_PI)); /* Scale t to [0, 1] and * make rotation CCW */ } static uint32_t * conical_get_scanline (pixman_iter_t *iter, const uint32_t *mask, int Bpp, pixman_gradient_walker_write_t write_pixel) { pixman_image_t *image = iter->image; int x = iter->x; int y = iter->y; int width = iter->width; uint32_t *buffer = iter->buffer; gradient_t *gradient = (gradient_t *)image; conical_gradient_t *conical = (conical_gradient_t *)image; uint32_t *end = buffer + width * (Bpp / 4); pixman_gradient_walker_t walker; pixman_bool_t affine = TRUE; double cx = 1.; double cy = 0.; double cz = 0.; double rx = x + 0.5; double ry = y + 0.5; double rz = 1.; _pixman_gradient_walker_init (&walker, gradient, image->common.repeat); if (image->common.transform) { pixman_vector_t v; /* reference point is the center of the pixel */ v.vector[0] = pixman_int_to_fixed (x) + pixman_fixed_1 / 2; v.vector[1] = pixman_int_to_fixed (y) + pixman_fixed_1 / 2; v.vector[2] = pixman_fixed_1; if (!pixman_transform_point_3d (image->common.transform, &v)) return iter->buffer; cx = image->common.transform->matrix[0][0] / 65536.; cy = image->common.transform->matrix[1][0] / 65536.; cz = image->common.transform->matrix[2][0] / 65536.; rx = v.vector[0] / 65536.; ry = v.vector[1] / 65536.; rz = v.vector[2] / 65536.; affine = image->common.transform->matrix[2][0] == 0 && v.vector[2] == pixman_fixed_1; } if (affine) { rx -= conical->center.x / 65536.; ry -= conical->center.y / 65536.; while (buffer < end) { if (!mask || *mask++) { double t = coordinates_to_parameter (rx, ry, conical->angle); write_pixel (&walker, (pixman_fixed_48_16_t)pixman_double_to_fixed (t), buffer); } buffer += (Bpp / 4); rx += cx; ry += cy; } } else { while (buffer < end) { double x, y; if (!mask || *mask++) { double t; if (rz != 0) { x = rx / rz; y = ry / rz; } else { x = y = 0.; } x -= conical->center.x / 65536.; y -= conical->center.y / 65536.; t = coordinates_to_parameter (x, y, conical->angle); write_pixel (&walker, (pixman_fixed_48_16_t)pixman_double_to_fixed (t), buffer); } buffer += (Bpp / 4); rx += cx; ry += cy; rz += cz; } } iter->y++; return iter->buffer; } static uint32_t * conical_get_scanline_narrow (pixman_iter_t *iter, const uint32_t *mask) { return conical_get_scanline (iter, mask, 4, _pixman_gradient_walker_write_narrow); } static uint32_t * conical_get_scanline_wide (pixman_iter_t *iter, const uint32_t *mask) { return conical_get_scanline (iter, NULL, 16, _pixman_gradient_walker_write_wide); } void _pixman_conical_gradient_iter_init (pixman_image_t *image, pixman_iter_t *iter) { if (iter->iter_flags & ITER_NARROW) iter->get_scanline = conical_get_scanline_narrow; else iter->get_scanline = conical_get_scanline_wide; } PIXMAN_EXPORT pixman_image_t * pixman_image_create_conical_gradient (const pixman_point_fixed_t * center, pixman_fixed_t angle, const pixman_gradient_stop_t *stops, int n_stops) { pixman_image_t *image = _pixman_image_allocate (); conical_gradient_t *conical; if (!image) return NULL; conical = &image->conical; if (!_pixman_init_gradient (&conical->common, stops, n_stops)) { free (image); return NULL; } angle = MOD (angle, pixman_int_to_fixed (360)); image->type = CONICAL; conical->center = *center; conical->angle = (pixman_fixed_to_double (angle) / 180.0) * M_PI; return image; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-edge-accessors.c0000664000175000017500000000006714712446423020742 0ustar00mattst88mattst88 #define PIXMAN_FB_ACCESSORS #include "pixman-edge.c" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-edge-imp.h0000664000175000017500000001062114712446423017544 0ustar00mattst88mattst88/* * Copyright Âİ 2004 Keith Packard * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of Keith Packard not be used in * advertising or publicity pertaining to distribution of the software without * specific, written prior permission. Keith Packard makes no * representations about the suitability of this software for any purpose. It * is provided "as is" without express or implied warranty. * * KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ #ifndef rasterize_span #endif static void RASTERIZE_EDGES (pixman_image_t *image, pixman_edge_t *l, pixman_edge_t *r, pixman_fixed_t t, pixman_fixed_t b) { pixman_fixed_t y = t; uint32_t *line; uint32_t *buf = (image)->bits.bits; int stride = (image)->bits.rowstride; int width = (image)->bits.width; line = buf + pixman_fixed_to_int (y) * stride; for (;;) { pixman_fixed_t lx; pixman_fixed_t rx; int lxi; int rxi; lx = l->x; rx = r->x; #if N_BITS == 1 /* For the non-antialiased case, round the coordinates up, in effect * sampling just slightly to the left of the pixel. This is so that * when the sample point lies exactly on the line, we round towards * north-west. * * (The AA case does a similar adjustment in RENDER_SAMPLES_X) */ lx += X_FRAC_FIRST(1) - pixman_fixed_e; rx += X_FRAC_FIRST(1) - pixman_fixed_e; #endif /* clip X */ if (lx < 0) lx = 0; if (pixman_fixed_to_int (rx) >= width) #if N_BITS == 1 rx = pixman_int_to_fixed (width); #else /* Use the last pixel of the scanline, covered 100%. * We can't use the first pixel following the scanline, * because accessing it could result in a buffer overrun. */ rx = pixman_int_to_fixed (width) - 1; #endif /* Skip empty (or backwards) sections */ if (rx > lx) { /* Find pixel bounds for span */ lxi = pixman_fixed_to_int (lx); rxi = pixman_fixed_to_int (rx); #if N_BITS == 1 { #define LEFT_MASK(x) \ (((x) & 0x1f) ? \ SCREEN_SHIFT_RIGHT (0xffffffff, (x) & 0x1f) : 0) #define RIGHT_MASK(x) \ (((32 - (x)) & 0x1f) ? \ SCREEN_SHIFT_LEFT (0xffffffff, (32 - (x)) & 0x1f) : 0) #define MASK_BITS(x,w,l,n,r) { \ n = (w); \ r = RIGHT_MASK ((x) + n); \ l = LEFT_MASK (x); \ if (l) { \ n -= 32 - ((x) & 0x1f); \ if (n < 0) { \ n = 0; \ l &= r; \ r = 0; \ } \ } \ n >>= 5; \ } uint32_t *a = line; uint32_t startmask; uint32_t endmask; int nmiddle; int width = rxi - lxi; int x = lxi; a += x >> 5; x &= 0x1f; MASK_BITS (x, width, startmask, nmiddle, endmask); if (startmask) { WRITE(image, a, READ(image, a) | startmask); a++; } while (nmiddle--) WRITE(image, a++, 0xffffffff); if (endmask) WRITE(image, a, READ(image, a) | endmask); } #else { DEFINE_ALPHA(line,lxi); int lxs; int rxs; /* Sample coverage for edge pixels */ lxs = RENDER_SAMPLES_X (lx, N_BITS); rxs = RENDER_SAMPLES_X (rx, N_BITS); /* Add coverage across row */ if (lxi == rxi) { ADD_ALPHA (rxs - lxs); } else { int xi; ADD_ALPHA (N_X_FRAC(N_BITS) - lxs); STEP_ALPHA; for (xi = lxi + 1; xi < rxi; xi++) { ADD_ALPHA (N_X_FRAC(N_BITS)); STEP_ALPHA; } ADD_ALPHA (rxs); } } #endif } if (y == b) break; #if N_BITS > 1 if (pixman_fixed_frac (y) != Y_FRAC_LAST(N_BITS)) { RENDER_EDGE_STEP_SMALL (l); RENDER_EDGE_STEP_SMALL (r); y += STEP_Y_SMALL(N_BITS); } else #endif { RENDER_EDGE_STEP_BIG (l); RENDER_EDGE_STEP_BIG (r); y += STEP_Y_BIG(N_BITS); line += stride; } } } #undef rasterize_span ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-edge.c0000664000175000017500000002514414712446423016762 0ustar00mattst88mattst88/* * Copyright Âİ 2004 Keith Packard * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of Keith Packard not be used in * advertising or publicity pertaining to distribution of the software without * specific, written prior permission. Keith Packard makes no * representations about the suitability of this software for any purpose. It * is provided "as is" without express or implied warranty. * * KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ #ifdef HAVE_CONFIG_H #include #endif #include #include "pixman-private.h" #include "pixman-accessor.h" /* * Step across a small sample grid gap */ #define RENDER_EDGE_STEP_SMALL(edge) \ { \ edge->x += edge->stepx_small; \ edge->e += edge->dx_small; \ if (edge->e > 0) \ { \ edge->e -= edge->dy; \ edge->x += edge->signdx; \ } \ } /* * Step across a large sample grid gap */ #define RENDER_EDGE_STEP_BIG(edge) \ { \ edge->x += edge->stepx_big; \ edge->e += edge->dx_big; \ if (edge->e > 0) \ { \ edge->e -= edge->dy; \ edge->x += edge->signdx; \ } \ } #ifdef PIXMAN_FB_ACCESSORS #define PIXMAN_RASTERIZE_EDGES pixman_rasterize_edges_accessors #else #define PIXMAN_RASTERIZE_EDGES pixman_rasterize_edges_no_accessors #endif /* * 4 bit alpha */ #define N_BITS 4 #define RASTERIZE_EDGES rasterize_edges_4 #ifndef WORDS_BIGENDIAN #define SHIFT_4(o) ((o) << 2) #else #define SHIFT_4(o) ((1 - (o)) << 2) #endif #define GET_4(x, o) (((x) >> SHIFT_4 (o)) & 0xf) #define PUT_4(x, o, v) \ (((x) & ~(0xf << SHIFT_4 (o))) | (((v) & 0xf) << SHIFT_4 (o))) #define DEFINE_ALPHA(line, x) \ uint8_t *__ap = (uint8_t *) line + ((x) >> 1); \ int __ao = (x) & 1 #define STEP_ALPHA ((__ap += __ao), (__ao ^= 1)) #define ADD_ALPHA(a) \ { \ uint8_t __o = READ (image, __ap); \ uint8_t __a = (a) + GET_4 (__o, __ao); \ WRITE (image, __ap, PUT_4 (__o, __ao, __a | (0 - ((__a) >> 4)))); \ } #include "pixman-edge-imp.h" #undef ADD_ALPHA #undef STEP_ALPHA #undef DEFINE_ALPHA #undef RASTERIZE_EDGES #undef N_BITS /* * 1 bit alpha */ #define N_BITS 1 #define RASTERIZE_EDGES rasterize_edges_1 #include "pixman-edge-imp.h" #undef RASTERIZE_EDGES #undef N_BITS /* * 8 bit alpha */ static force_inline uint8_t clip255 (int x) { if (x > 255) return 255; return x; } #define ADD_SATURATE_8(buf, val, length) \ do \ { \ int i__ = (length); \ uint8_t *buf__ = (buf); \ int val__ = (val); \ \ while (i__--) \ { \ WRITE (image, (buf__), clip255 (READ (image, (buf__)) + (val__))); \ (buf__)++; \ } \ } while (0) /* * We want to detect the case where we add the same value to a long * span of pixels. The triangles on the end are filled in while we * count how many sub-pixel scanlines contribute to the middle section. * * +--------------------------+ * fill_height =| \ / * +------------------+ * |================| * fill_start fill_end */ static void rasterize_edges_8 (pixman_image_t *image, pixman_edge_t * l, pixman_edge_t * r, pixman_fixed_t t, pixman_fixed_t b) { pixman_fixed_t y = t; uint32_t *line; int fill_start = -1, fill_end = -1; int fill_size = 0; uint32_t *buf = (image)->bits.bits; int stride = (image)->bits.rowstride; int width = (image)->bits.width; line = buf + pixman_fixed_to_int (y) * stride; for (;;) { uint8_t *ap = (uint8_t *) line; pixman_fixed_t lx, rx; int lxi, rxi; /* clip X */ lx = l->x; if (lx < 0) lx = 0; rx = r->x; if (pixman_fixed_to_int (rx) >= width) { /* Use the last pixel of the scanline, covered 100%. * We can't use the first pixel following the scanline, * because accessing it could result in a buffer overrun. */ rx = pixman_int_to_fixed (width) - 1; } /* Skip empty (or backwards) sections */ if (rx > lx) { int lxs, rxs; /* Find pixel bounds for span. */ lxi = pixman_fixed_to_int (lx); rxi = pixman_fixed_to_int (rx); /* Sample coverage for edge pixels */ lxs = RENDER_SAMPLES_X (lx, 8); rxs = RENDER_SAMPLES_X (rx, 8); /* Add coverage across row */ if (lxi == rxi) { WRITE (image, ap + lxi, clip255 (READ (image, ap + lxi) + rxs - lxs)); } else { WRITE (image, ap + lxi, clip255 (READ (image, ap + lxi) + N_X_FRAC (8) - lxs)); /* Move forward so that lxi/rxi is the pixel span */ lxi++; /* Don't bother trying to optimize the fill unless * the span is longer than 4 pixels. */ if (rxi - lxi > 4) { if (fill_start < 0) { fill_start = lxi; fill_end = rxi; fill_size++; } else { if (lxi >= fill_end || rxi < fill_start) { /* We're beyond what we saved, just fill it */ ADD_SATURATE_8 (ap + fill_start, fill_size * N_X_FRAC (8), fill_end - fill_start); fill_start = lxi; fill_end = rxi; fill_size = 1; } else { /* Update fill_start */ if (lxi > fill_start) { ADD_SATURATE_8 (ap + fill_start, fill_size * N_X_FRAC (8), lxi - fill_start); fill_start = lxi; } else if (lxi < fill_start) { ADD_SATURATE_8 (ap + lxi, N_X_FRAC (8), fill_start - lxi); } /* Update fill_end */ if (rxi < fill_end) { ADD_SATURATE_8 (ap + rxi, fill_size * N_X_FRAC (8), fill_end - rxi); fill_end = rxi; } else if (fill_end < rxi) { ADD_SATURATE_8 (ap + fill_end, N_X_FRAC (8), rxi - fill_end); } fill_size++; } } } else { ADD_SATURATE_8 (ap + lxi, N_X_FRAC (8), rxi - lxi); } WRITE (image, ap + rxi, clip255 (READ (image, ap + rxi) + rxs)); } } if (y == b) { /* We're done, make sure we clean up any remaining fill. */ if (fill_start != fill_end) { if (fill_size == N_Y_FRAC (8)) { MEMSET_WRAPPED (image, ap + fill_start, 0xff, fill_end - fill_start); } else { ADD_SATURATE_8 (ap + fill_start, fill_size * N_X_FRAC (8), fill_end - fill_start); } } break; } if (pixman_fixed_frac (y) != Y_FRAC_LAST (8)) { RENDER_EDGE_STEP_SMALL (l); RENDER_EDGE_STEP_SMALL (r); y += STEP_Y_SMALL (8); } else { RENDER_EDGE_STEP_BIG (l); RENDER_EDGE_STEP_BIG (r); y += STEP_Y_BIG (8); if (fill_start != fill_end) { if (fill_size == N_Y_FRAC (8)) { MEMSET_WRAPPED (image, ap + fill_start, 0xff, fill_end - fill_start); } else { ADD_SATURATE_8 (ap + fill_start, fill_size * N_X_FRAC (8), fill_end - fill_start); } fill_start = fill_end = -1; fill_size = 0; } line += stride; } } } #ifndef PIXMAN_FB_ACCESSORS static #endif void PIXMAN_RASTERIZE_EDGES (pixman_image_t *image, pixman_edge_t * l, pixman_edge_t * r, pixman_fixed_t t, pixman_fixed_t b) { switch (PIXMAN_FORMAT_BPP (image->bits.format)) { case 1: rasterize_edges_1 (image, l, r, t, b); break; case 4: rasterize_edges_4 (image, l, r, t, b); break; case 8: rasterize_edges_8 (image, l, r, t, b); break; default: break; } } #ifndef PIXMAN_FB_ACCESSORS PIXMAN_EXPORT void pixman_rasterize_edges (pixman_image_t *image, pixman_edge_t * l, pixman_edge_t * r, pixman_fixed_t t, pixman_fixed_t b) { return_if_fail (image->type == BITS); return_if_fail (PIXMAN_FORMAT_TYPE (image->bits.format) == PIXMAN_TYPE_A); if (image->bits.read_func || image->bits.write_func) pixman_rasterize_edges_accessors (image, l, r, t, b); else pixman_rasterize_edges_no_accessors (image, l, r, t, b); } #endif ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-fast-path.c0000664000175000017500000027232414712446423017751 0ustar00mattst88mattst88/* -*- Mode: c; c-basic-offset: 4; tab-width: 8; indent-tabs-mode: t; -*- */ /* * Copyright Âİ 2000 SuSE, Inc. * Copyright Âİ 2007 Red Hat, Inc. * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of SuSE not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. SuSE makes no representations about the * suitability of this software for any purpose. It is provided "as is" * without express or implied warranty. * * SuSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL SuSE * BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * * Author: Keith Packard, SuSE, Inc. */ #ifdef HAVE_CONFIG_H #include #endif #include #include #include "pixman-private.h" #include "pixman-combine32.h" #include "pixman-inlines.h" static force_inline uint32_t fetch_24 (uint8_t *a) { if (((uintptr_t)a) & 1) { #ifdef WORDS_BIGENDIAN return (*a << 16) | (*(uint16_t *)(a + 1)); #else return *a | (*(uint16_t *)(a + 1) << 8); #endif } else { #ifdef WORDS_BIGENDIAN return (*(uint16_t *)a << 8) | *(a + 2); #else return *(uint16_t *)a | (*(a + 2) << 16); #endif } } static force_inline void store_24 (uint8_t *a, uint32_t v) { if (((uintptr_t)a) & 1) { #ifdef WORDS_BIGENDIAN *a = (uint8_t) (v >> 16); *(uint16_t *)(a + 1) = (uint16_t) (v); #else *a = (uint8_t) (v); *(uint16_t *)(a + 1) = (uint16_t) (v >> 8); #endif } else { #ifdef WORDS_BIGENDIAN *(uint16_t *)a = (uint16_t)(v >> 8); *(a + 2) = (uint8_t)v; #else *(uint16_t *)a = (uint16_t)v; *(a + 2) = (uint8_t)(v >> 16); #endif } } static force_inline uint32_t over (uint32_t src, uint32_t dest) { uint32_t a = ~src >> 24; UN8x4_MUL_UN8_ADD_UN8x4 (dest, a, src); return dest; } static force_inline uint32_t in (uint32_t x, uint8_t y) { uint16_t a = y; UN8x4_MUL_UN8 (x, a); return x; } /* * Naming convention: * * op_src_mask_dest */ static void fast_composite_over_x888_8_8888 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t *src, *src_line; uint32_t *dst, *dst_line; uint8_t *mask, *mask_line; int src_stride, mask_stride, dst_stride; uint8_t m; uint32_t s, d; int32_t w; PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1); PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); while (height--) { src = src_line; src_line += src_stride; dst = dst_line; dst_line += dst_stride; mask = mask_line; mask_line += mask_stride; w = width; while (w--) { m = *mask++; if (m) { s = *src | 0xff000000; if (m == 0xff) { *dst = s; } else { d = in (s, m); *dst = over (d, *dst); } } src++; dst++; } } } static void fast_composite_in_n_8_8 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t src, srca; uint8_t *dst_line, *dst; uint8_t *mask_line, *mask, m; int dst_stride, mask_stride; int32_t w; uint16_t t; src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); srca = src >> 24; PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1); if (srca == 0xff) { while (height--) { dst = dst_line; dst_line += dst_stride; mask = mask_line; mask_line += mask_stride; w = width; while (w--) { m = *mask++; if (m == 0) *dst = 0; else if (m != 0xff) *dst = MUL_UN8 (m, *dst, t); dst++; } } } else { while (height--) { dst = dst_line; dst_line += dst_stride; mask = mask_line; mask_line += mask_stride; w = width; while (w--) { m = *mask++; m = MUL_UN8 (m, srca, t); if (m == 0) *dst = 0; else if (m != 0xff) *dst = MUL_UN8 (m, *dst, t); dst++; } } } } static void fast_composite_in_8_8 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint8_t *dst_line, *dst; uint8_t *src_line, *src; int dst_stride, src_stride; int32_t w; uint8_t s; uint16_t t; PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1); PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1); while (height--) { dst = dst_line; dst_line += dst_stride; src = src_line; src_line += src_stride; w = width; while (w--) { s = *src++; if (s == 0) *dst = 0; else if (s != 0xff) *dst = MUL_UN8 (s, *dst, t); dst++; } } } static void fast_composite_over_n_8_8888 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t src, srca; uint32_t *dst_line, *dst, d; uint8_t *mask_line, *mask, m; int dst_stride, mask_stride; int32_t w; src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); srca = src >> 24; if (src == 0) return; PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1); while (height--) { dst = dst_line; dst_line += dst_stride; mask = mask_line; mask_line += mask_stride; w = width; while (w--) { m = *mask++; if (m == 0xff) { if (srca == 0xff) *dst = src; else *dst = over (src, *dst); } else if (m) { d = in (src, m); *dst = over (d, *dst); } dst++; } } } static void fast_composite_add_n_8888_8888_ca (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t src, s; uint32_t *dst_line, *dst, d; uint32_t *mask_line, *mask, ma; int dst_stride, mask_stride; int32_t w; src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); if (src == 0) return; PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1); while (height--) { dst = dst_line; dst_line += dst_stride; mask = mask_line; mask_line += mask_stride; w = width; while (w--) { ma = *mask++; if (ma) { d = *dst; s = src; UN8x4_MUL_UN8x4_ADD_UN8x4 (s, ma, d); *dst = s; } dst++; } } } static void fast_composite_over_n_8888_8888_ca (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t src, srca, s; uint32_t *dst_line, *dst, d; uint32_t *mask_line, *mask, ma; int dst_stride, mask_stride; int32_t w; src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); srca = src >> 24; if (src == 0) return; PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1); while (height--) { dst = dst_line; dst_line += dst_stride; mask = mask_line; mask_line += mask_stride; w = width; while (w--) { ma = *mask++; if (ma == 0xffffffff) { if (srca == 0xff) *dst = src; else *dst = over (src, *dst); } else if (ma) { d = *dst; s = src; UN8x4_MUL_UN8x4 (s, ma); UN8x4_MUL_UN8 (ma, srca); ma = ~ma; UN8x4_MUL_UN8x4_ADD_UN8x4 (d, ma, s); *dst = d; } dst++; } } } static void fast_composite_over_n_8_0888 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t src, srca; uint8_t *dst_line, *dst; uint32_t d; uint8_t *mask_line, *mask, m; int dst_stride, mask_stride; int32_t w; src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); srca = src >> 24; if (src == 0) return; PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 3); PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1); while (height--) { dst = dst_line; dst_line += dst_stride; mask = mask_line; mask_line += mask_stride; w = width; while (w--) { m = *mask++; if (m == 0xff) { if (srca == 0xff) { d = src; } else { d = fetch_24 (dst); d = over (src, d); } store_24 (dst, d); } else if (m) { d = over (in (src, m), fetch_24 (dst)); store_24 (dst, d); } dst += 3; } } } static void fast_composite_over_n_8_0565 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t src, srca; uint16_t *dst_line, *dst; uint32_t d; uint8_t *mask_line, *mask, m; int dst_stride, mask_stride; int32_t w; src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); srca = src >> 24; if (src == 0) return; PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1); while (height--) { dst = dst_line; dst_line += dst_stride; mask = mask_line; mask_line += mask_stride; w = width; while (w--) { m = *mask++; if (m == 0xff) { if (srca == 0xff) { d = src; } else { d = *dst; d = over (src, convert_0565_to_0888 (d)); } *dst = convert_8888_to_0565 (d); } else if (m) { d = *dst; d = over (in (src, m), convert_0565_to_0888 (d)); *dst = convert_8888_to_0565 (d); } dst++; } } } static void fast_composite_over_n_8888_0565_ca (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t src, srca, s; uint16_t src16; uint16_t *dst_line, *dst; uint32_t d; uint32_t *mask_line, *mask, ma; int dst_stride, mask_stride; int32_t w; src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); srca = src >> 24; if (src == 0) return; src16 = convert_8888_to_0565 (src); PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1); while (height--) { dst = dst_line; dst_line += dst_stride; mask = mask_line; mask_line += mask_stride; w = width; while (w--) { ma = *mask++; if (ma == 0xffffffff) { if (srca == 0xff) { *dst = src16; } else { d = *dst; d = over (src, convert_0565_to_0888 (d)); *dst = convert_8888_to_0565 (d); } } else if (ma) { d = *dst; d = convert_0565_to_0888 (d); s = src; UN8x4_MUL_UN8x4 (s, ma); UN8x4_MUL_UN8 (ma, srca); ma = ~ma; UN8x4_MUL_UN8x4_ADD_UN8x4 (d, ma, s); *dst = convert_8888_to_0565 (d); } dst++; } } } static void fast_composite_over_8888_8888 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t *dst_line, *dst; uint32_t *src_line, *src, s; int dst_stride, src_stride; uint8_t a; int32_t w; PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); while (height--) { dst = dst_line; dst_line += dst_stride; src = src_line; src_line += src_stride; w = width; while (w--) { s = *src++; a = s >> 24; if (a == 0xff) *dst = s; else if (s) *dst = over (s, *dst); dst++; } } } static void fast_composite_src_x888_8888 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t *dst_line, *dst; uint32_t *src_line, *src; int dst_stride, src_stride; int32_t w; PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); while (height--) { dst = dst_line; dst_line += dst_stride; src = src_line; src_line += src_stride; w = width; while (w--) *dst++ = (*src++) | 0xff000000; } } #if 0 static void fast_composite_over_8888_0888 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint8_t *dst_line, *dst; uint32_t d; uint32_t *src_line, *src, s; uint8_t a; int dst_stride, src_stride; int32_t w; PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 3); PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); while (height--) { dst = dst_line; dst_line += dst_stride; src = src_line; src_line += src_stride; w = width; while (w--) { s = *src++; a = s >> 24; if (a) { if (a == 0xff) d = s; else d = over (s, fetch_24 (dst)); store_24 (dst, d); } dst += 3; } } } #endif static void fast_composite_over_8888_0565 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint16_t *dst_line, *dst; uint32_t d; uint32_t *src_line, *src, s; uint8_t a; int dst_stride, src_stride; int32_t w; PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1); while (height--) { dst = dst_line; dst_line += dst_stride; src = src_line; src_line += src_stride; w = width; while (w--) { s = *src++; a = s >> 24; if (s) { if (a == 0xff) { d = s; } else { d = *dst; d = over (s, convert_0565_to_0888 (d)); } *dst = convert_8888_to_0565 (d); } dst++; } } } static void fast_composite_add_8_8 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint8_t *dst_line, *dst; uint8_t *src_line, *src; int dst_stride, src_stride; int32_t w; uint8_t s, d; uint16_t t; PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1); PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1); while (height--) { dst = dst_line; dst_line += dst_stride; src = src_line; src_line += src_stride; w = width; while (w--) { s = *src++; if (s) { if (s != 0xff) { d = *dst; t = d + s; s = t | (0 - (t >> 8)); } *dst = s; } dst++; } } } static void fast_composite_add_0565_0565 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint16_t *dst_line, *dst; uint32_t d; uint16_t *src_line, *src; uint32_t s; int dst_stride, src_stride; int32_t w; PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint16_t, src_stride, src_line, 1); PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1); while (height--) { dst = dst_line; dst_line += dst_stride; src = src_line; src_line += src_stride; w = width; while (w--) { s = *src++; if (s) { d = *dst; s = convert_0565_to_8888 (s); if (d) { d = convert_0565_to_8888 (d); UN8x4_ADD_UN8x4 (s, d); } *dst = convert_8888_to_0565 (s); } dst++; } } } static void fast_composite_add_8888_8888 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t *dst_line, *dst; uint32_t *src_line, *src; int dst_stride, src_stride; int32_t w; uint32_t s, d; PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); while (height--) { dst = dst_line; dst_line += dst_stride; src = src_line; src_line += src_stride; w = width; while (w--) { s = *src++; if (s) { if (s != 0xffffffff) { d = *dst; if (d) UN8x4_ADD_UN8x4 (s, d); } *dst = s; } dst++; } } } static void fast_composite_add_n_8_8 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint8_t *dst_line, *dst; uint8_t *mask_line, *mask; int dst_stride, mask_stride; int32_t w; uint32_t src; uint8_t sa; PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1); src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); sa = (src >> 24); while (height--) { dst = dst_line; dst_line += dst_stride; mask = mask_line; mask_line += mask_stride; w = width; while (w--) { uint16_t tmp; uint16_t a; uint32_t m, d; uint32_t r; a = *mask++; d = *dst; m = MUL_UN8 (sa, a, tmp); r = ADD_UN8 (m, d, tmp); *dst++ = r; } } } #ifdef WORDS_BIGENDIAN #define CREATE_BITMASK(n) (0x80000000 >> (n)) #define UPDATE_BITMASK(n) ((n) >> 1) #else #define CREATE_BITMASK(n) (1U << (n)) #define UPDATE_BITMASK(n) ((n) << 1) #endif #define TEST_BIT(p, n) \ (*((p) + ((n) >> 5)) & CREATE_BITMASK ((n) & 31)) #define SET_BIT(p, n) \ do { *((p) + ((n) >> 5)) |= CREATE_BITMASK ((n) & 31); } while (0); static void fast_composite_add_1_1 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t *dst_line, *dst; uint32_t *src_line, *src; int dst_stride, src_stride; int32_t w; PIXMAN_IMAGE_GET_LINE (src_image, 0, src_y, uint32_t, src_stride, src_line, 1); PIXMAN_IMAGE_GET_LINE (dest_image, 0, dest_y, uint32_t, dst_stride, dst_line, 1); while (height--) { dst = dst_line; dst_line += dst_stride; src = src_line; src_line += src_stride; w = width; while (w--) { /* * TODO: improve performance by processing uint32_t data instead * of individual bits */ if (TEST_BIT (src, src_x + w)) SET_BIT (dst, dest_x + w); } } } static void fast_composite_over_n_1_8888 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t src, srca; uint32_t *dst, *dst_line; uint32_t *mask, *mask_line; int mask_stride, dst_stride; uint32_t bitcache, bitmask; int32_t w; if (width <= 0) return; src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); srca = src >> 24; if (src == 0) return; PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE (mask_image, 0, mask_y, uint32_t, mask_stride, mask_line, 1); mask_line += mask_x >> 5; if (srca == 0xff) { while (height--) { dst = dst_line; dst_line += dst_stride; mask = mask_line; mask_line += mask_stride; w = width; bitcache = *mask++; bitmask = CREATE_BITMASK (mask_x & 31); while (w--) { if (bitmask == 0) { bitcache = *mask++; bitmask = CREATE_BITMASK (0); } if (bitcache & bitmask) *dst = src; bitmask = UPDATE_BITMASK (bitmask); dst++; } } } else { while (height--) { dst = dst_line; dst_line += dst_stride; mask = mask_line; mask_line += mask_stride; w = width; bitcache = *mask++; bitmask = CREATE_BITMASK (mask_x & 31); while (w--) { if (bitmask == 0) { bitcache = *mask++; bitmask = CREATE_BITMASK (0); } if (bitcache & bitmask) *dst = over (src, *dst); bitmask = UPDATE_BITMASK (bitmask); dst++; } } } } static void fast_composite_over_n_1_0565 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t src, srca; uint16_t *dst, *dst_line; uint32_t *mask, *mask_line; int mask_stride, dst_stride; uint32_t bitcache, bitmask; int32_t w; uint32_t d; uint16_t src565; if (width <= 0) return; src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); srca = src >> 24; if (src == 0) return; PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE (mask_image, 0, mask_y, uint32_t, mask_stride, mask_line, 1); mask_line += mask_x >> 5; if (srca == 0xff) { src565 = convert_8888_to_0565 (src); while (height--) { dst = dst_line; dst_line += dst_stride; mask = mask_line; mask_line += mask_stride; w = width; bitcache = *mask++; bitmask = CREATE_BITMASK (mask_x & 31); while (w--) { if (bitmask == 0) { bitcache = *mask++; bitmask = CREATE_BITMASK (0); } if (bitcache & bitmask) *dst = src565; bitmask = UPDATE_BITMASK (bitmask); dst++; } } } else { while (height--) { dst = dst_line; dst_line += dst_stride; mask = mask_line; mask_line += mask_stride; w = width; bitcache = *mask++; bitmask = CREATE_BITMASK (mask_x & 31); while (w--) { if (bitmask == 0) { bitcache = *mask++; bitmask = CREATE_BITMASK (0); } if (bitcache & bitmask) { d = over (src, convert_0565_to_0888 (*dst)); *dst = convert_8888_to_0565 (d); } bitmask = UPDATE_BITMASK (bitmask); dst++; } } } } /* * Simple bitblt */ static void fast_composite_solid_fill (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t src; src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); if (dest_image->bits.format == PIXMAN_a1) { src = src >> 31; } else if (dest_image->bits.format == PIXMAN_a8) { src = src >> 24; } else if (dest_image->bits.format == PIXMAN_r5g6b5 || dest_image->bits.format == PIXMAN_b5g6r5) { src = convert_8888_to_0565 (src); } pixman_fill (dest_image->bits.bits, dest_image->bits.rowstride, PIXMAN_FORMAT_BPP (dest_image->bits.format), dest_x, dest_y, width, height, src); } static void fast_composite_src_memcpy (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); int bpp = PIXMAN_FORMAT_BPP (dest_image->bits.format) / 8; uint32_t n_bytes = width * bpp; int dst_stride, src_stride; uint8_t *dst; uint8_t *src; src_stride = src_image->bits.rowstride * 4; dst_stride = dest_image->bits.rowstride * 4; src = (uint8_t *)src_image->bits.bits + src_y * src_stride + src_x * bpp; dst = (uint8_t *)dest_image->bits.bits + dest_y * dst_stride + dest_x * bpp; while (height--) { memcpy (dst, src, n_bytes); dst += dst_stride; src += src_stride; } } FAST_NEAREST (8888_8888_cover, 8888, 8888, uint32_t, uint32_t, SRC, COVER) FAST_NEAREST (8888_8888_none, 8888, 8888, uint32_t, uint32_t, SRC, NONE) FAST_NEAREST (8888_8888_pad, 8888, 8888, uint32_t, uint32_t, SRC, PAD) FAST_NEAREST (8888_8888_normal, 8888, 8888, uint32_t, uint32_t, SRC, NORMAL) FAST_NEAREST (x888_8888_cover, x888, 8888, uint32_t, uint32_t, SRC, COVER) FAST_NEAREST (x888_8888_pad, x888, 8888, uint32_t, uint32_t, SRC, PAD) FAST_NEAREST (x888_8888_normal, x888, 8888, uint32_t, uint32_t, SRC, NORMAL) FAST_NEAREST (8888_8888_cover, 8888, 8888, uint32_t, uint32_t, OVER, COVER) FAST_NEAREST (8888_8888_none, 8888, 8888, uint32_t, uint32_t, OVER, NONE) FAST_NEAREST (8888_8888_pad, 8888, 8888, uint32_t, uint32_t, OVER, PAD) FAST_NEAREST (8888_8888_normal, 8888, 8888, uint32_t, uint32_t, OVER, NORMAL) FAST_NEAREST (8888_565_cover, 8888, 0565, uint32_t, uint16_t, SRC, COVER) FAST_NEAREST (8888_565_none, 8888, 0565, uint32_t, uint16_t, SRC, NONE) FAST_NEAREST (8888_565_pad, 8888, 0565, uint32_t, uint16_t, SRC, PAD) FAST_NEAREST (8888_565_normal, 8888, 0565, uint32_t, uint16_t, SRC, NORMAL) FAST_NEAREST (565_565_normal, 0565, 0565, uint16_t, uint16_t, SRC, NORMAL) FAST_NEAREST (8888_565_cover, 8888, 0565, uint32_t, uint16_t, OVER, COVER) FAST_NEAREST (8888_565_none, 8888, 0565, uint32_t, uint16_t, OVER, NONE) FAST_NEAREST (8888_565_pad, 8888, 0565, uint32_t, uint16_t, OVER, PAD) FAST_NEAREST (8888_565_normal, 8888, 0565, uint32_t, uint16_t, OVER, NORMAL) #define REPEAT_MIN_WIDTH 32 static void fast_composite_tiled_repeat (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); pixman_composite_func_t func; pixman_format_code_t mask_format; uint32_t src_flags, mask_flags; int32_t sx, sy; int32_t width_remain; int32_t num_pixels; int32_t src_width; int32_t i, j; pixman_image_t extended_src_image; uint32_t extended_src[REPEAT_MIN_WIDTH * 2]; pixman_bool_t need_src_extension; uint32_t *src_line; int32_t src_stride; int32_t src_bpp; pixman_composite_info_t info2 = *info; src_flags = (info->src_flags & ~FAST_PATH_NORMAL_REPEAT) | FAST_PATH_SAMPLES_COVER_CLIP_NEAREST; if (mask_image) { mask_format = mask_image->common.extended_format_code; mask_flags = info->mask_flags; } else { mask_format = PIXMAN_null; mask_flags = FAST_PATH_IS_OPAQUE; } _pixman_implementation_lookup_composite ( imp->toplevel, info->op, src_image->common.extended_format_code, src_flags, mask_format, mask_flags, dest_image->common.extended_format_code, info->dest_flags, &imp, &func); src_bpp = PIXMAN_FORMAT_BPP (src_image->bits.format); if (src_image->bits.width < REPEAT_MIN_WIDTH && (src_bpp == 32 || src_bpp == 16 || src_bpp == 8) && !src_image->bits.indexed) { sx = src_x; sx = MOD (sx, src_image->bits.width); sx += width; src_width = 0; while (src_width < REPEAT_MIN_WIDTH && src_width <= sx) src_width += src_image->bits.width; src_stride = (src_width * (src_bpp >> 3) + 3) / (int) sizeof (uint32_t); /* Initialize/validate stack-allocated temporary image */ _pixman_bits_image_init (&extended_src_image, src_image->bits.format, src_width, 1, &extended_src[0], src_stride, FALSE); _pixman_image_validate (&extended_src_image); info2.src_image = &extended_src_image; need_src_extension = TRUE; } else { src_width = src_image->bits.width; need_src_extension = FALSE; } sx = src_x; sy = src_y; while (--height >= 0) { sx = MOD (sx, src_width); sy = MOD (sy, src_image->bits.height); if (need_src_extension) { if (src_bpp == 32) { PIXMAN_IMAGE_GET_LINE (src_image, 0, sy, uint32_t, src_stride, src_line, 1); for (i = 0; i < src_width; ) { for (j = 0; j < src_image->bits.width; j++, i++) extended_src[i] = src_line[j]; } } else if (src_bpp == 16) { uint16_t *src_line_16; PIXMAN_IMAGE_GET_LINE (src_image, 0, sy, uint16_t, src_stride, src_line_16, 1); src_line = (uint32_t*)src_line_16; for (i = 0; i < src_width; ) { for (j = 0; j < src_image->bits.width; j++, i++) ((uint16_t*)extended_src)[i] = ((uint16_t*)src_line)[j]; } } else if (src_bpp == 8) { uint8_t *src_line_8; PIXMAN_IMAGE_GET_LINE (src_image, 0, sy, uint8_t, src_stride, src_line_8, 1); src_line = (uint32_t*)src_line_8; for (i = 0; i < src_width; ) { for (j = 0; j < src_image->bits.width; j++, i++) ((uint8_t*)extended_src)[i] = ((uint8_t*)src_line)[j]; } } info2.src_y = 0; } else { info2.src_y = sy; } width_remain = width; while (width_remain > 0) { num_pixels = src_width - sx; if (num_pixels > width_remain) num_pixels = width_remain; info2.src_x = sx; info2.width = num_pixels; info2.height = 1; func (imp, &info2); width_remain -= num_pixels; info2.mask_x += num_pixels; info2.dest_x += num_pixels; sx = 0; } sx = src_x; sy++; info2.mask_x = info->mask_x; info2.mask_y++; info2.dest_x = info->dest_x; info2.dest_y++; } if (need_src_extension) _pixman_image_fini (&extended_src_image); } /* Use more unrolling for src_0565_0565 because it is typically CPU bound */ static force_inline void scaled_nearest_scanline_565_565_SRC (uint16_t * dst, const uint16_t * src, int32_t w, pixman_fixed_t vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx, pixman_bool_t fully_transparent_src) { uint16_t tmp1, tmp2, tmp3, tmp4; while ((w -= 4) >= 0) { tmp1 = *(src + pixman_fixed_to_int (vx)); vx += unit_x; tmp2 = *(src + pixman_fixed_to_int (vx)); vx += unit_x; tmp3 = *(src + pixman_fixed_to_int (vx)); vx += unit_x; tmp4 = *(src + pixman_fixed_to_int (vx)); vx += unit_x; *dst++ = tmp1; *dst++ = tmp2; *dst++ = tmp3; *dst++ = tmp4; } if (w & 2) { tmp1 = *(src + pixman_fixed_to_int (vx)); vx += unit_x; tmp2 = *(src + pixman_fixed_to_int (vx)); vx += unit_x; *dst++ = tmp1; *dst++ = tmp2; } if (w & 1) *dst = *(src + pixman_fixed_to_int (vx)); } FAST_NEAREST_MAINLOOP (565_565_cover_SRC, scaled_nearest_scanline_565_565_SRC, uint16_t, uint16_t, COVER) FAST_NEAREST_MAINLOOP (565_565_none_SRC, scaled_nearest_scanline_565_565_SRC, uint16_t, uint16_t, NONE) FAST_NEAREST_MAINLOOP (565_565_pad_SRC, scaled_nearest_scanline_565_565_SRC, uint16_t, uint16_t, PAD) static force_inline uint32_t fetch_nearest (pixman_repeat_t src_repeat, pixman_format_code_t format, uint32_t *src, int x, int src_width) { if (repeat (src_repeat, &x, src_width)) { if (format == PIXMAN_x8r8g8b8 || format == PIXMAN_x8b8g8r8) return *(src + x) | 0xff000000; else return *(src + x); } else { return 0; } } static force_inline void combine_over (uint32_t s, uint32_t *dst) { if (s) { uint8_t ia = 0xff - (s >> 24); if (ia) UN8x4_MUL_UN8_ADD_UN8x4 (*dst, ia, s); else *dst = s; } } static force_inline void combine_src (uint32_t s, uint32_t *dst) { *dst = s; } static void fast_composite_scaled_nearest (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t *dst_line; uint32_t *src_line; int dst_stride, src_stride; int src_width, src_height; pixman_repeat_t src_repeat; pixman_fixed_t unit_x, unit_y; pixman_format_code_t src_format; pixman_vector_t v; pixman_fixed_t vy; PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); /* pass in 0 instead of src_x and src_y because src_x and src_y need to be * transformed from destination space to source space */ PIXMAN_IMAGE_GET_LINE (src_image, 0, 0, uint32_t, src_stride, src_line, 1); /* reference point is the center of the pixel */ v.vector[0] = pixman_int_to_fixed (src_x) + pixman_fixed_1 / 2; v.vector[1] = pixman_int_to_fixed (src_y) + pixman_fixed_1 / 2; v.vector[2] = pixman_fixed_1; if (!pixman_transform_point_3d (src_image->common.transform, &v)) return; unit_x = src_image->common.transform->matrix[0][0]; unit_y = src_image->common.transform->matrix[1][1]; /* Round down to closest integer, ensuring that 0.5 rounds to 0, not 1 */ v.vector[0] -= pixman_fixed_e; v.vector[1] -= pixman_fixed_e; src_height = src_image->bits.height; src_width = src_image->bits.width; src_repeat = src_image->common.repeat; src_format = src_image->bits.format; vy = v.vector[1]; while (height--) { pixman_fixed_t vx = v.vector[0]; int y = pixman_fixed_to_int (vy); uint32_t *dst = dst_line; dst_line += dst_stride; /* adjust the y location by a unit vector in the y direction * this is equivalent to transforming y+1 of the destination point to source space */ vy += unit_y; if (!repeat (src_repeat, &y, src_height)) { if (op == PIXMAN_OP_SRC) memset (dst, 0, sizeof (*dst) * width); } else { int w = width; uint32_t *src = src_line + y * src_stride; while (w >= 2) { uint32_t s1, s2; int x1, x2; x1 = pixman_fixed_to_int (vx); vx += unit_x; x2 = pixman_fixed_to_int (vx); vx += unit_x; w -= 2; s1 = fetch_nearest (src_repeat, src_format, src, x1, src_width); s2 = fetch_nearest (src_repeat, src_format, src, x2, src_width); if (op == PIXMAN_OP_OVER) { combine_over (s1, dst++); combine_over (s2, dst++); } else { combine_src (s1, dst++); combine_src (s2, dst++); } } while (w--) { uint32_t s; int x; x = pixman_fixed_to_int (vx); vx += unit_x; s = fetch_nearest (src_repeat, src_format, src, x, src_width); if (op == PIXMAN_OP_OVER) combine_over (s, dst++); else combine_src (s, dst++); } } } } #define CACHE_LINE_SIZE 64 #define FAST_SIMPLE_ROTATE(suffix, pix_type) \ \ static void \ blt_rotated_90_trivial_##suffix (pix_type *dst, \ int dst_stride, \ const pix_type *src, \ int src_stride, \ int w, \ int h) \ { \ int x, y; \ for (y = 0; y < h; y++) \ { \ const pix_type *s = src + (h - y - 1); \ pix_type *d = dst + dst_stride * y; \ for (x = 0; x < w; x++) \ { \ *d++ = *s; \ s += src_stride; \ } \ } \ } \ \ static void \ blt_rotated_270_trivial_##suffix (pix_type *dst, \ int dst_stride, \ const pix_type *src, \ int src_stride, \ int w, \ int h) \ { \ int x, y; \ for (y = 0; y < h; y++) \ { \ const pix_type *s = src + src_stride * (w - 1) + y; \ pix_type *d = dst + dst_stride * y; \ for (x = 0; x < w; x++) \ { \ *d++ = *s; \ s -= src_stride; \ } \ } \ } \ \ static void \ blt_rotated_90_##suffix (pix_type *dst, \ int dst_stride, \ const pix_type *src, \ int src_stride, \ int W, \ int H) \ { \ int x; \ int leading_pixels = 0, trailing_pixels = 0; \ const int TILE_SIZE = CACHE_LINE_SIZE / sizeof(pix_type); \ \ /* \ * split processing into handling destination as TILE_SIZExH cache line \ * aligned vertical stripes (optimistically assuming that destination \ * stride is a multiple of cache line, if not - it will be just a bit \ * slower) \ */ \ \ if ((uintptr_t)dst & (CACHE_LINE_SIZE - 1)) \ { \ leading_pixels = TILE_SIZE - (((uintptr_t)dst & \ (CACHE_LINE_SIZE - 1)) / sizeof(pix_type)); \ if (leading_pixels > W) \ leading_pixels = W; \ \ /* unaligned leading part NxH (where N < TILE_SIZE) */ \ blt_rotated_90_trivial_##suffix ( \ dst, \ dst_stride, \ src, \ src_stride, \ leading_pixels, \ H); \ \ dst += leading_pixels; \ src += leading_pixels * src_stride; \ W -= leading_pixels; \ } \ \ if ((uintptr_t)(dst + W) & (CACHE_LINE_SIZE - 1)) \ { \ trailing_pixels = (((uintptr_t)(dst + W) & \ (CACHE_LINE_SIZE - 1)) / sizeof(pix_type)); \ if (trailing_pixels > W) \ trailing_pixels = W; \ W -= trailing_pixels; \ } \ \ for (x = 0; x < W; x += TILE_SIZE) \ { \ /* aligned middle part TILE_SIZExH */ \ blt_rotated_90_trivial_##suffix ( \ dst + x, \ dst_stride, \ src + src_stride * x, \ src_stride, \ TILE_SIZE, \ H); \ } \ \ if (trailing_pixels) \ { \ /* unaligned trailing part NxH (where N < TILE_SIZE) */ \ blt_rotated_90_trivial_##suffix ( \ dst + W, \ dst_stride, \ src + W * src_stride, \ src_stride, \ trailing_pixels, \ H); \ } \ } \ \ static void \ blt_rotated_270_##suffix (pix_type *dst, \ int dst_stride, \ const pix_type *src, \ int src_stride, \ int W, \ int H) \ { \ int x; \ int leading_pixels = 0, trailing_pixels = 0; \ const int TILE_SIZE = CACHE_LINE_SIZE / sizeof(pix_type); \ \ /* \ * split processing into handling destination as TILE_SIZExH cache line \ * aligned vertical stripes (optimistically assuming that destination \ * stride is a multiple of cache line, if not - it will be just a bit \ * slower) \ */ \ \ if ((uintptr_t)dst & (CACHE_LINE_SIZE - 1)) \ { \ leading_pixels = TILE_SIZE - (((uintptr_t)dst & \ (CACHE_LINE_SIZE - 1)) / sizeof(pix_type)); \ if (leading_pixels > W) \ leading_pixels = W; \ \ /* unaligned leading part NxH (where N < TILE_SIZE) */ \ blt_rotated_270_trivial_##suffix ( \ dst, \ dst_stride, \ src + src_stride * (W - leading_pixels), \ src_stride, \ leading_pixels, \ H); \ \ dst += leading_pixels; \ W -= leading_pixels; \ } \ \ if ((uintptr_t)(dst + W) & (CACHE_LINE_SIZE - 1)) \ { \ trailing_pixels = (((uintptr_t)(dst + W) & \ (CACHE_LINE_SIZE - 1)) / sizeof(pix_type)); \ if (trailing_pixels > W) \ trailing_pixels = W; \ W -= trailing_pixels; \ src += trailing_pixels * src_stride; \ } \ \ for (x = 0; x < W; x += TILE_SIZE) \ { \ /* aligned middle part TILE_SIZExH */ \ blt_rotated_270_trivial_##suffix ( \ dst + x, \ dst_stride, \ src + src_stride * (W - x - TILE_SIZE), \ src_stride, \ TILE_SIZE, \ H); \ } \ \ if (trailing_pixels) \ { \ /* unaligned trailing part NxH (where N < TILE_SIZE) */ \ blt_rotated_270_trivial_##suffix ( \ dst + W, \ dst_stride, \ src - trailing_pixels * src_stride, \ src_stride, \ trailing_pixels, \ H); \ } \ } \ \ static void \ fast_composite_rotate_90_##suffix (pixman_implementation_t *imp, \ pixman_composite_info_t *info) \ { \ PIXMAN_COMPOSITE_ARGS (info); \ pix_type *dst_line; \ pix_type *src_line; \ int dst_stride, src_stride; \ int src_x_t, src_y_t; \ \ PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, pix_type, \ dst_stride, dst_line, 1); \ src_x_t = -src_y + pixman_fixed_to_int ( \ src_image->common.transform->matrix[0][2] + \ pixman_fixed_1 / 2 - pixman_fixed_e) - height;\ src_y_t = src_x + pixman_fixed_to_int ( \ src_image->common.transform->matrix[1][2] + \ pixman_fixed_1 / 2 - pixman_fixed_e); \ PIXMAN_IMAGE_GET_LINE (src_image, src_x_t, src_y_t, pix_type, \ src_stride, src_line, 1); \ blt_rotated_90_##suffix (dst_line, dst_stride, src_line, src_stride, \ width, height); \ } \ \ static void \ fast_composite_rotate_270_##suffix (pixman_implementation_t *imp, \ pixman_composite_info_t *info) \ { \ PIXMAN_COMPOSITE_ARGS (info); \ pix_type *dst_line; \ pix_type *src_line; \ int dst_stride, src_stride; \ int src_x_t, src_y_t; \ \ PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, pix_type, \ dst_stride, dst_line, 1); \ src_x_t = src_y + pixman_fixed_to_int ( \ src_image->common.transform->matrix[0][2] + \ pixman_fixed_1 / 2 - pixman_fixed_e); \ src_y_t = -src_x + pixman_fixed_to_int ( \ src_image->common.transform->matrix[1][2] + \ pixman_fixed_1 / 2 - pixman_fixed_e) - width; \ PIXMAN_IMAGE_GET_LINE (src_image, src_x_t, src_y_t, pix_type, \ src_stride, src_line, 1); \ blt_rotated_270_##suffix (dst_line, dst_stride, src_line, src_stride, \ width, height); \ } FAST_SIMPLE_ROTATE (8, uint8_t) FAST_SIMPLE_ROTATE (565, uint16_t) FAST_SIMPLE_ROTATE (8888, uint32_t) static const pixman_fast_path_t c_fast_paths[] = { PIXMAN_STD_FAST_PATH (OVER, solid, a8, r5g6b5, fast_composite_over_n_8_0565), PIXMAN_STD_FAST_PATH (OVER, solid, a8, b5g6r5, fast_composite_over_n_8_0565), PIXMAN_STD_FAST_PATH (OVER, solid, a8, r8g8b8, fast_composite_over_n_8_0888), PIXMAN_STD_FAST_PATH (OVER, solid, a8, b8g8r8, fast_composite_over_n_8_0888), PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8r8g8b8, fast_composite_over_n_8_8888), PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8r8g8b8, fast_composite_over_n_8_8888), PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8b8g8r8, fast_composite_over_n_8_8888), PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8b8g8r8, fast_composite_over_n_8_8888), PIXMAN_STD_FAST_PATH (OVER, solid, a1, a8r8g8b8, fast_composite_over_n_1_8888), PIXMAN_STD_FAST_PATH (OVER, solid, a1, x8r8g8b8, fast_composite_over_n_1_8888), PIXMAN_STD_FAST_PATH (OVER, solid, a1, a8b8g8r8, fast_composite_over_n_1_8888), PIXMAN_STD_FAST_PATH (OVER, solid, a1, x8b8g8r8, fast_composite_over_n_1_8888), PIXMAN_STD_FAST_PATH (OVER, solid, a1, r5g6b5, fast_composite_over_n_1_0565), PIXMAN_STD_FAST_PATH (OVER, solid, a1, b5g6r5, fast_composite_over_n_1_0565), PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, a8r8g8b8, fast_composite_over_n_8888_8888_ca), PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, x8r8g8b8, fast_composite_over_n_8888_8888_ca), PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, r5g6b5, fast_composite_over_n_8888_0565_ca), PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, a8b8g8r8, fast_composite_over_n_8888_8888_ca), PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, x8b8g8r8, fast_composite_over_n_8888_8888_ca), PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, b5g6r5, fast_composite_over_n_8888_0565_ca), PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, a8, x8r8g8b8, fast_composite_over_x888_8_8888), PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, a8, a8r8g8b8, fast_composite_over_x888_8_8888), PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, a8, x8b8g8r8, fast_composite_over_x888_8_8888), PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, a8, a8b8g8r8, fast_composite_over_x888_8_8888), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, a8r8g8b8, fast_composite_over_8888_8888), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, x8r8g8b8, fast_composite_over_8888_8888), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, r5g6b5, fast_composite_over_8888_0565), PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, a8b8g8r8, fast_composite_over_8888_8888), PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, x8b8g8r8, fast_composite_over_8888_8888), PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, b5g6r5, fast_composite_over_8888_0565), PIXMAN_STD_FAST_PATH (ADD, r5g6b5, null, r5g6b5, fast_composite_add_0565_0565), PIXMAN_STD_FAST_PATH (ADD, b5g6r5, null, b5g6r5, fast_composite_add_0565_0565), PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, null, a8r8g8b8, fast_composite_add_8888_8888), PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, null, a8b8g8r8, fast_composite_add_8888_8888), PIXMAN_STD_FAST_PATH (ADD, a8, null, a8, fast_composite_add_8_8), PIXMAN_STD_FAST_PATH (ADD, a1, null, a1, fast_composite_add_1_1), PIXMAN_STD_FAST_PATH_CA (ADD, solid, a8r8g8b8, a8r8g8b8, fast_composite_add_n_8888_8888_ca), PIXMAN_STD_FAST_PATH (ADD, solid, a8, a8, fast_composite_add_n_8_8), PIXMAN_STD_FAST_PATH (SRC, solid, null, a8r8g8b8, fast_composite_solid_fill), PIXMAN_STD_FAST_PATH (SRC, solid, null, x8r8g8b8, fast_composite_solid_fill), PIXMAN_STD_FAST_PATH (SRC, solid, null, a8b8g8r8, fast_composite_solid_fill), PIXMAN_STD_FAST_PATH (SRC, solid, null, x8b8g8r8, fast_composite_solid_fill), PIXMAN_STD_FAST_PATH (SRC, solid, null, a1, fast_composite_solid_fill), PIXMAN_STD_FAST_PATH (SRC, solid, null, a8, fast_composite_solid_fill), PIXMAN_STD_FAST_PATH (SRC, solid, null, r5g6b5, fast_composite_solid_fill), PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, a8r8g8b8, fast_composite_src_x888_8888), PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, a8b8g8r8, fast_composite_src_x888_8888), PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, x8r8g8b8, fast_composite_src_memcpy), PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, a8r8g8b8, fast_composite_src_memcpy), PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, x8r8g8b8, fast_composite_src_memcpy), PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, x8b8g8r8, fast_composite_src_memcpy), PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, a8b8g8r8, fast_composite_src_memcpy), PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, x8b8g8r8, fast_composite_src_memcpy), PIXMAN_STD_FAST_PATH (SRC, b8g8r8a8, null, b8g8r8x8, fast_composite_src_memcpy), PIXMAN_STD_FAST_PATH (SRC, b8g8r8a8, null, b8g8r8a8, fast_composite_src_memcpy), PIXMAN_STD_FAST_PATH (SRC, b8g8r8x8, null, b8g8r8x8, fast_composite_src_memcpy), PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, r5g6b5, fast_composite_src_memcpy), PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, b5g6r5, fast_composite_src_memcpy), PIXMAN_STD_FAST_PATH (SRC, r8g8b8, null, r8g8b8, fast_composite_src_memcpy), PIXMAN_STD_FAST_PATH (SRC, b8g8r8, null, b8g8r8, fast_composite_src_memcpy), PIXMAN_STD_FAST_PATH (SRC, x1r5g5b5, null, x1r5g5b5, fast_composite_src_memcpy), PIXMAN_STD_FAST_PATH (SRC, a1r5g5b5, null, x1r5g5b5, fast_composite_src_memcpy), PIXMAN_STD_FAST_PATH (SRC, a8, null, a8, fast_composite_src_memcpy), PIXMAN_STD_FAST_PATH (IN, a8, null, a8, fast_composite_in_8_8), PIXMAN_STD_FAST_PATH (IN, solid, a8, a8, fast_composite_in_n_8_8), SIMPLE_NEAREST_FAST_PATH (SRC, x8r8g8b8, x8r8g8b8, 8888_8888), SIMPLE_NEAREST_FAST_PATH (SRC, a8r8g8b8, x8r8g8b8, 8888_8888), SIMPLE_NEAREST_FAST_PATH (SRC, x8b8g8r8, x8b8g8r8, 8888_8888), SIMPLE_NEAREST_FAST_PATH (SRC, a8b8g8r8, x8b8g8r8, 8888_8888), SIMPLE_NEAREST_FAST_PATH (SRC, a8r8g8b8, a8r8g8b8, 8888_8888), SIMPLE_NEAREST_FAST_PATH (SRC, a8b8g8r8, a8b8g8r8, 8888_8888), SIMPLE_NEAREST_FAST_PATH (SRC, x8r8g8b8, r5g6b5, 8888_565), SIMPLE_NEAREST_FAST_PATH (SRC, a8r8g8b8, r5g6b5, 8888_565), SIMPLE_NEAREST_FAST_PATH (SRC, r5g6b5, r5g6b5, 565_565), SIMPLE_NEAREST_FAST_PATH_COVER (SRC, x8r8g8b8, a8r8g8b8, x888_8888), SIMPLE_NEAREST_FAST_PATH_COVER (SRC, x8b8g8r8, a8b8g8r8, x888_8888), SIMPLE_NEAREST_FAST_PATH_PAD (SRC, x8r8g8b8, a8r8g8b8, x888_8888), SIMPLE_NEAREST_FAST_PATH_PAD (SRC, x8b8g8r8, a8b8g8r8, x888_8888), SIMPLE_NEAREST_FAST_PATH_NORMAL (SRC, x8r8g8b8, a8r8g8b8, x888_8888), SIMPLE_NEAREST_FAST_PATH_NORMAL (SRC, x8b8g8r8, a8b8g8r8, x888_8888), SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8, 8888_8888), SIMPLE_NEAREST_FAST_PATH (OVER, a8b8g8r8, x8b8g8r8, 8888_8888), SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, 8888_8888), SIMPLE_NEAREST_FAST_PATH (OVER, a8b8g8r8, a8b8g8r8, 8888_8888), SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, r5g6b5, 8888_565), #define NEAREST_FAST_PATH(op,s,d) \ { PIXMAN_OP_ ## op, \ PIXMAN_ ## s, SCALED_NEAREST_FLAGS, \ PIXMAN_null, 0, \ PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ fast_composite_scaled_nearest, \ } NEAREST_FAST_PATH (SRC, x8r8g8b8, x8r8g8b8), NEAREST_FAST_PATH (SRC, a8r8g8b8, x8r8g8b8), NEAREST_FAST_PATH (SRC, x8b8g8r8, x8b8g8r8), NEAREST_FAST_PATH (SRC, a8b8g8r8, x8b8g8r8), NEAREST_FAST_PATH (SRC, x8r8g8b8, a8r8g8b8), NEAREST_FAST_PATH (SRC, a8r8g8b8, a8r8g8b8), NEAREST_FAST_PATH (SRC, x8b8g8r8, a8b8g8r8), NEAREST_FAST_PATH (SRC, a8b8g8r8, a8b8g8r8), NEAREST_FAST_PATH (OVER, x8r8g8b8, x8r8g8b8), NEAREST_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8), NEAREST_FAST_PATH (OVER, x8b8g8r8, x8b8g8r8), NEAREST_FAST_PATH (OVER, a8b8g8r8, x8b8g8r8), NEAREST_FAST_PATH (OVER, x8r8g8b8, a8r8g8b8), NEAREST_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8), NEAREST_FAST_PATH (OVER, x8b8g8r8, a8b8g8r8), NEAREST_FAST_PATH (OVER, a8b8g8r8, a8b8g8r8), #define SIMPLE_ROTATE_FLAGS(angle) \ (FAST_PATH_ROTATE_ ## angle ## _TRANSFORM | \ FAST_PATH_NEAREST_FILTER | \ FAST_PATH_SAMPLES_COVER_CLIP_NEAREST | \ FAST_PATH_STANDARD_FLAGS) #define SIMPLE_ROTATE_FAST_PATH(op,s,d,suffix) \ { PIXMAN_OP_ ## op, \ PIXMAN_ ## s, SIMPLE_ROTATE_FLAGS (90), \ PIXMAN_null, 0, \ PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ fast_composite_rotate_90_##suffix, \ }, \ { PIXMAN_OP_ ## op, \ PIXMAN_ ## s, SIMPLE_ROTATE_FLAGS (270), \ PIXMAN_null, 0, \ PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ fast_composite_rotate_270_##suffix, \ } SIMPLE_ROTATE_FAST_PATH (SRC, a8r8g8b8, a8r8g8b8, 8888), SIMPLE_ROTATE_FAST_PATH (SRC, a8r8g8b8, x8r8g8b8, 8888), SIMPLE_ROTATE_FAST_PATH (SRC, x8r8g8b8, x8r8g8b8, 8888), SIMPLE_ROTATE_FAST_PATH (SRC, r5g6b5, r5g6b5, 565), SIMPLE_ROTATE_FAST_PATH (SRC, a8, a8, 8), /* Simple repeat fast path entry. */ { PIXMAN_OP_any, PIXMAN_any, (FAST_PATH_STANDARD_FLAGS | FAST_PATH_ID_TRANSFORM | FAST_PATH_BITS_IMAGE | FAST_PATH_NORMAL_REPEAT), PIXMAN_any, 0, PIXMAN_any, FAST_PATH_STD_DEST_FLAGS, fast_composite_tiled_repeat }, { PIXMAN_OP_NONE }, }; #ifdef WORDS_BIGENDIAN #define A1_FILL_MASK(n, offs) (((1U << (n)) - 1) << (32 - (offs) - (n))) #else #define A1_FILL_MASK(n, offs) (((1U << (n)) - 1) << (offs)) #endif static force_inline void pixman_fill1_line (uint32_t *dst, int offs, int width, int v) { if (offs) { int leading_pixels = 32 - offs; if (leading_pixels >= width) { if (v) *dst |= A1_FILL_MASK (width, offs); else *dst &= ~A1_FILL_MASK (width, offs); return; } else { if (v) *dst++ |= A1_FILL_MASK (leading_pixels, offs); else *dst++ &= ~A1_FILL_MASK (leading_pixels, offs); width -= leading_pixels; } } while (width >= 32) { if (v) *dst++ = 0xFFFFFFFF; else *dst++ = 0; width -= 32; } if (width > 0) { if (v) *dst |= A1_FILL_MASK (width, 0); else *dst &= ~A1_FILL_MASK (width, 0); } } static void pixman_fill1 (uint32_t *bits, int stride, int x, int y, int width, int height, uint32_t filler) { uint32_t *dst = bits + y * stride + (x >> 5); int offs = x & 31; if (filler & 1) { while (height--) { pixman_fill1_line (dst, offs, width, 1); dst += stride; } } else { while (height--) { pixman_fill1_line (dst, offs, width, 0); dst += stride; } } } static void pixman_fill8 (uint32_t *bits, int stride, int x, int y, int width, int height, uint32_t filler) { int byte_stride = stride * (int) sizeof (uint32_t); uint8_t *dst = (uint8_t *) bits; uint8_t v = filler & 0xff; int i; dst = dst + y * byte_stride + x; while (height--) { for (i = 0; i < width; ++i) dst[i] = v; dst += byte_stride; } } static void pixman_fill16 (uint32_t *bits, int stride, int x, int y, int width, int height, uint32_t filler) { int short_stride = (stride * (int)sizeof (uint32_t)) / (int)sizeof (uint16_t); uint16_t *dst = (uint16_t *)bits; uint16_t v = filler & 0xffff; int i; dst = dst + y * short_stride + x; while (height--) { for (i = 0; i < width; ++i) dst[i] = v; dst += short_stride; } } static void pixman_fill32 (uint32_t *bits, int stride, int x, int y, int width, int height, uint32_t filler) { int i; bits = bits + y * stride + x; while (height--) { for (i = 0; i < width; ++i) bits[i] = filler; bits += stride; } } static pixman_bool_t fast_path_fill (pixman_implementation_t *imp, uint32_t * bits, int stride, int bpp, int x, int y, int width, int height, uint32_t filler) { switch (bpp) { case 1: pixman_fill1 (bits, stride, x, y, width, height, filler); break; case 8: pixman_fill8 (bits, stride, x, y, width, height, filler); break; case 16: pixman_fill16 (bits, stride, x, y, width, height, filler); break; case 32: pixman_fill32 (bits, stride, x, y, width, height, filler); break; default: return FALSE; } return TRUE; } /*****************************************************************************/ static uint32_t * fast_fetch_r5g6b5 (pixman_iter_t *iter, const uint32_t *mask) { int32_t w = iter->width; uint32_t *dst = iter->buffer; const uint16_t *src = (const uint16_t *)iter->bits; iter->bits += iter->stride; /* Align the source buffer at 4 bytes boundary */ if (w > 0 && ((uintptr_t)src & 3)) { *dst++ = convert_0565_to_8888 (*src++); w--; } /* Process two pixels per iteration */ while ((w -= 2) >= 0) { uint32_t sr, sb, sg, t0, t1; uint32_t s = *(const uint32_t *)src; src += 2; sr = (s >> 8) & 0x00F800F8; sb = (s << 3) & 0x00F800F8; sg = (s >> 3) & 0x00FC00FC; sr |= sr >> 5; sb |= sb >> 5; sg |= sg >> 6; t0 = ((sr << 16) & 0x00FF0000) | ((sg << 8) & 0x0000FF00) | (sb & 0xFF) | 0xFF000000; t1 = (sr & 0x00FF0000) | ((sg >> 8) & 0x0000FF00) | (sb >> 16) | 0xFF000000; #ifdef WORDS_BIGENDIAN *dst++ = t1; *dst++ = t0; #else *dst++ = t0; *dst++ = t1; #endif } if (w & 1) { *dst = convert_0565_to_8888 (*src); } return iter->buffer; } static uint32_t * fast_dest_fetch_noop (pixman_iter_t *iter, const uint32_t *mask) { iter->bits += iter->stride; return iter->buffer; } /* Helper function for a workaround, which tries to ensure that 0x1F001F * constant is always allocated in a register on RISC architectures. */ static force_inline uint32_t convert_8888_to_0565_workaround (uint32_t s, uint32_t x1F001F) { uint32_t a, b; a = (s >> 3) & x1F001F; b = s & 0xFC00; a |= a >> 5; a |= b >> 5; return a; } static void fast_write_back_r5g6b5 (pixman_iter_t *iter) { int32_t w = iter->width; uint16_t *dst = (uint16_t *)(iter->bits - iter->stride); const uint32_t *src = iter->buffer; /* Workaround to ensure that x1F001F variable is allocated in a register */ static volatile uint32_t volatile_x1F001F = 0x1F001F; uint32_t x1F001F = volatile_x1F001F; while ((w -= 4) >= 0) { uint32_t s1 = *src++; uint32_t s2 = *src++; uint32_t s3 = *src++; uint32_t s4 = *src++; *dst++ = convert_8888_to_0565_workaround (s1, x1F001F); *dst++ = convert_8888_to_0565_workaround (s2, x1F001F); *dst++ = convert_8888_to_0565_workaround (s3, x1F001F); *dst++ = convert_8888_to_0565_workaround (s4, x1F001F); } if (w & 2) { *dst++ = convert_8888_to_0565_workaround (*src++, x1F001F); *dst++ = convert_8888_to_0565_workaround (*src++, x1F001F); } if (w & 1) { *dst = convert_8888_to_0565_workaround (*src, x1F001F); } } typedef struct { int y; uint64_t * buffer; } line_t; typedef struct { line_t lines[2]; pixman_fixed_t y; pixman_fixed_t x; uint64_t data[1]; } bilinear_info_t; static void fetch_horizontal (bits_image_t *image, line_t *line, int y, pixman_fixed_t x, pixman_fixed_t ux, int n) { uint32_t *bits = image->bits + y * image->rowstride; int i; for (i = 0; i < n; ++i) { int x0 = pixman_fixed_to_int (x); int x1 = x0 + 1; int32_t dist_x; uint32_t left = *(bits + x0); uint32_t right = *(bits + x1); dist_x = pixman_fixed_to_bilinear_weight (x); dist_x <<= (8 - BILINEAR_INTERPOLATION_BITS); #if SIZEOF_LONG <= 4 { uint32_t lag, rag, ag; uint32_t lrb, rrb, rb; lag = (left & 0xff00ff00) >> 8; rag = (right & 0xff00ff00) >> 8; ag = (lag << 8) + dist_x * (rag - lag); lrb = (left & 0x00ff00ff); rrb = (right & 0x00ff00ff); rb = (lrb << 8) + dist_x * (rrb - lrb); *((uint32_t *)(line->buffer + i)) = ag; *((uint32_t *)(line->buffer + i) + 1) = rb; } #else { uint64_t lagrb, ragrb; uint32_t lag, rag; uint32_t lrb, rrb; lag = (left & 0xff00ff00); lrb = (left & 0x00ff00ff); rag = (right & 0xff00ff00); rrb = (right & 0x00ff00ff); lagrb = (((uint64_t)lag) << 24) | lrb; ragrb = (((uint64_t)rag) << 24) | rrb; line->buffer[i] = (lagrb << 8) + dist_x * (ragrb - lagrb); } #endif x += ux; } line->y = y; } static uint32_t * fast_fetch_bilinear_cover (pixman_iter_t *iter, const uint32_t *mask) { pixman_fixed_t fx, ux; bilinear_info_t *info = iter->data; line_t *line0, *line1; int y0, y1; int32_t dist_y; int i; COMPILE_TIME_ASSERT (BILINEAR_INTERPOLATION_BITS < 8); fx = info->x; ux = iter->image->common.transform->matrix[0][0]; y0 = pixman_fixed_to_int (info->y); y1 = y0 + 1; dist_y = pixman_fixed_to_bilinear_weight (info->y); dist_y <<= (8 - BILINEAR_INTERPOLATION_BITS); line0 = &info->lines[y0 & 0x01]; line1 = &info->lines[y1 & 0x01]; if (line0->y != y0) { fetch_horizontal ( &iter->image->bits, line0, y0, fx, ux, iter->width); } if (line1->y != y1) { fetch_horizontal ( &iter->image->bits, line1, y1, fx, ux, iter->width); } for (i = 0; i < iter->width; ++i) { #if SIZEOF_LONG <= 4 uint32_t ta, tr, tg, tb; uint32_t ba, br, bg, bb; uint32_t tag, trb; uint32_t bag, brb; uint32_t a, r, g, b; tag = *((uint32_t *)(line0->buffer + i)); trb = *((uint32_t *)(line0->buffer + i) + 1); bag = *((uint32_t *)(line1->buffer + i)); brb = *((uint32_t *)(line1->buffer + i) + 1); ta = tag >> 16; ba = bag >> 16; a = (ta << 8) + dist_y * (ba - ta); tr = trb >> 16; br = brb >> 16; r = (tr << 8) + dist_y * (br - tr); tg = tag & 0xffff; bg = bag & 0xffff; g = (tg << 8) + dist_y * (bg - tg); tb = trb & 0xffff; bb = brb & 0xffff; b = (tb << 8) + dist_y * (bb - tb); a = (a << 8) & 0xff000000; r = (r << 0) & 0x00ff0000; g = (g >> 8) & 0x0000ff00; b = (b >> 16) & 0x000000ff; #else uint64_t top = line0->buffer[i]; uint64_t bot = line1->buffer[i]; uint64_t tar = (top & 0xffff0000ffff0000ULL) >> 16; uint64_t bar = (bot & 0xffff0000ffff0000ULL) >> 16; uint64_t tgb = (top & 0x0000ffff0000ffffULL); uint64_t bgb = (bot & 0x0000ffff0000ffffULL); uint64_t ar, gb; uint32_t a, r, g, b; ar = (tar << 8) + dist_y * (bar - tar); gb = (tgb << 8) + dist_y * (bgb - tgb); a = ((ar >> 24) & 0xff000000); r = ((ar >> 0) & 0x00ff0000); g = ((gb >> 40) & 0x0000ff00); b = ((gb >> 16) & 0x000000ff); #endif iter->buffer[i] = a | r | g | b; } info->y += iter->image->common.transform->matrix[1][1]; return iter->buffer; } static void bilinear_cover_iter_fini (pixman_iter_t *iter) { free (iter->data); } static void fast_bilinear_cover_iter_init (pixman_iter_t *iter, const pixman_iter_info_t *iter_info) { int width = iter->width; bilinear_info_t *info; pixman_vector_t v; /* Reference point is the center of the pixel */ v.vector[0] = pixman_int_to_fixed (iter->x) + pixman_fixed_1 / 2; v.vector[1] = pixman_int_to_fixed (iter->y) + pixman_fixed_1 / 2; v.vector[2] = pixman_fixed_1; if (!pixman_transform_point_3d (iter->image->common.transform, &v)) goto fail; info = malloc (sizeof (*info) + (2 * width - 1) * sizeof (uint64_t)); if (!info) goto fail; info->x = v.vector[0] - pixman_fixed_1 / 2; info->y = v.vector[1] - pixman_fixed_1 / 2; /* It is safe to set the y coordinates to -1 initially * because COVER_CLIP_BILINEAR ensures that we will only * be asked to fetch lines in the [0, height) interval */ info->lines[0].y = -1; info->lines[0].buffer = &(info->data[0]); info->lines[1].y = -1; info->lines[1].buffer = &(info->data[width]); iter->get_scanline = fast_fetch_bilinear_cover; iter->fini = bilinear_cover_iter_fini; iter->data = info; return; fail: /* Something went wrong, either a bad matrix or OOM; in such cases, * we don't guarantee any particular rendering. */ _pixman_log_error ( FUNC, "Allocation failure or bad matrix, skipping rendering\n"); iter->get_scanline = _pixman_iter_get_scanline_noop; iter->fini = NULL; } static uint32_t * bits_image_fetch_bilinear_no_repeat_8888 (pixman_iter_t *iter, const uint32_t *mask) { pixman_image_t * ima = iter->image; int offset = iter->x; int line = iter->y++; int width = iter->width; uint32_t * buffer = iter->buffer; bits_image_t *bits = &ima->bits; pixman_fixed_t x_top, x_bottom, x; pixman_fixed_t ux_top, ux_bottom, ux; pixman_vector_t v; uint32_t top_mask, bottom_mask; uint32_t *top_row; uint32_t *bottom_row; uint32_t *end; uint32_t zero[2] = { 0, 0 }; uint32_t one = 1; int y, y1, y2; int disty; int mask_inc; int w; /* reference point is the center of the pixel */ v.vector[0] = pixman_int_to_fixed (offset) + pixman_fixed_1 / 2; v.vector[1] = pixman_int_to_fixed (line) + pixman_fixed_1 / 2; v.vector[2] = pixman_fixed_1; if (!pixman_transform_point_3d (bits->common.transform, &v)) return iter->buffer; ux = ux_top = ux_bottom = bits->common.transform->matrix[0][0]; x = x_top = x_bottom = v.vector[0] - pixman_fixed_1/2; y = v.vector[1] - pixman_fixed_1/2; disty = pixman_fixed_to_bilinear_weight (y); /* Load the pointers to the first and second lines from the source * image that bilinear code must read. * * The main trick in this code is about the check if any line are * outside of the image; * * When I realize that a line (any one) is outside, I change * the pointer to a dummy area with zeros. Once I change this, I * must be sure the pointer will not change, so I set the * variables to each pointer increments inside the loop. */ y1 = pixman_fixed_to_int (y); y2 = y1 + 1; if (y1 < 0 || y1 >= bits->height) { top_row = zero; x_top = 0; ux_top = 0; } else { top_row = bits->bits + y1 * bits->rowstride; x_top = x; ux_top = ux; } if (y2 < 0 || y2 >= bits->height) { bottom_row = zero; x_bottom = 0; ux_bottom = 0; } else { bottom_row = bits->bits + y2 * bits->rowstride; x_bottom = x; ux_bottom = ux; } /* Instead of checking whether the operation uses the mast in * each loop iteration, verify this only once and prepare the * variables to make the code smaller inside the loop. */ if (!mask) { mask_inc = 0; mask = &one; } else { /* If have a mask, prepare the variables to check it */ mask_inc = 1; } /* If both are zero, then the whole thing is zero */ if (top_row == zero && bottom_row == zero) { memset (buffer, 0, width * sizeof (uint32_t)); return iter->buffer; } else if (bits->format == PIXMAN_x8r8g8b8) { if (top_row == zero) { top_mask = 0; bottom_mask = 0xff000000; } else if (bottom_row == zero) { top_mask = 0xff000000; bottom_mask = 0; } else { top_mask = 0xff000000; bottom_mask = 0xff000000; } } else { top_mask = 0; bottom_mask = 0; } end = buffer + width; /* Zero fill to the left of the image */ while (buffer < end && x < pixman_fixed_minus_1) { *buffer++ = 0; x += ux; x_top += ux_top; x_bottom += ux_bottom; mask += mask_inc; } /* Left edge */ while (buffer < end && x < 0) { uint32_t tr, br; int32_t distx; tr = top_row[pixman_fixed_to_int (x_top) + 1] | top_mask; br = bottom_row[pixman_fixed_to_int (x_bottom) + 1] | bottom_mask; distx = pixman_fixed_to_bilinear_weight (x); *buffer++ = bilinear_interpolation (0, tr, 0, br, distx, disty); x += ux; x_top += ux_top; x_bottom += ux_bottom; mask += mask_inc; } /* Main part */ w = pixman_int_to_fixed (bits->width - 1); while (buffer < end && x < w) { if (*mask) { uint32_t tl, tr, bl, br; int32_t distx; tl = top_row [pixman_fixed_to_int (x_top)] | top_mask; tr = top_row [pixman_fixed_to_int (x_top) + 1] | top_mask; bl = bottom_row [pixman_fixed_to_int (x_bottom)] | bottom_mask; br = bottom_row [pixman_fixed_to_int (x_bottom) + 1] | bottom_mask; distx = pixman_fixed_to_bilinear_weight (x); *buffer = bilinear_interpolation (tl, tr, bl, br, distx, disty); } buffer++; x += ux; x_top += ux_top; x_bottom += ux_bottom; mask += mask_inc; } /* Right Edge */ w = pixman_int_to_fixed (bits->width); while (buffer < end && x < w) { if (*mask) { uint32_t tl, bl; int32_t distx; tl = top_row [pixman_fixed_to_int (x_top)] | top_mask; bl = bottom_row [pixman_fixed_to_int (x_bottom)] | bottom_mask; distx = pixman_fixed_to_bilinear_weight (x); *buffer = bilinear_interpolation (tl, 0, bl, 0, distx, disty); } buffer++; x += ux; x_top += ux_top; x_bottom += ux_bottom; mask += mask_inc; } /* Zero fill to the left of the image */ while (buffer < end) *buffer++ = 0; return iter->buffer; } typedef uint32_t (* convert_pixel_t) (const uint8_t *row, int x); static force_inline void bits_image_fetch_separable_convolution_affine (pixman_image_t * image, int offset, int line, int width, uint32_t * buffer, const uint32_t * mask, convert_pixel_t convert_pixel, pixman_format_code_t format, pixman_repeat_t repeat_mode) { bits_image_t *bits = &image->bits; pixman_fixed_t *params = image->common.filter_params; int cwidth = pixman_fixed_to_int (params[0]); int cheight = pixman_fixed_to_int (params[1]); int x_off = ((cwidth << 16) - pixman_fixed_1) >> 1; int y_off = ((cheight << 16) - pixman_fixed_1) >> 1; int x_phase_bits = pixman_fixed_to_int (params[2]); int y_phase_bits = pixman_fixed_to_int (params[3]); int x_phase_shift = 16 - x_phase_bits; int y_phase_shift = 16 - y_phase_bits; pixman_fixed_t vx, vy; pixman_fixed_t ux, uy; pixman_vector_t v; int k; /* reference point is the center of the pixel */ v.vector[0] = pixman_int_to_fixed (offset) + pixman_fixed_1 / 2; v.vector[1] = pixman_int_to_fixed (line) + pixman_fixed_1 / 2; v.vector[2] = pixman_fixed_1; if (!pixman_transform_point_3d (image->common.transform, &v)) return; ux = image->common.transform->matrix[0][0]; uy = image->common.transform->matrix[1][0]; vx = v.vector[0]; vy = v.vector[1]; for (k = 0; k < width; ++k) { pixman_fixed_t *y_params; int satot, srtot, sgtot, sbtot; pixman_fixed_t x, y; int32_t x1, x2, y1, y2; int32_t px, py; int i, j; if (mask && !mask[k]) goto next; /* Round x and y to the middle of the closest phase before continuing. This * ensures that the convolution matrix is aligned right, since it was * positioned relative to a particular phase (and not relative to whatever * exact fraction we happen to get here). */ x = ((vx >> x_phase_shift) << x_phase_shift) + ((1 << x_phase_shift) >> 1); y = ((vy >> y_phase_shift) << y_phase_shift) + ((1 << y_phase_shift) >> 1); px = (x & 0xffff) >> x_phase_shift; py = (y & 0xffff) >> y_phase_shift; x1 = pixman_fixed_to_int (x - pixman_fixed_e - x_off); y1 = pixman_fixed_to_int (y - pixman_fixed_e - y_off); x2 = x1 + cwidth; y2 = y1 + cheight; satot = srtot = sgtot = sbtot = 0; y_params = params + 4 + (1 << x_phase_bits) * cwidth + py * cheight; for (i = y1; i < y2; ++i) { pixman_fixed_t fy = *y_params++; if (fy) { pixman_fixed_t *x_params = params + 4 + px * cwidth; for (j = x1; j < x2; ++j) { pixman_fixed_t fx = *x_params++; int rx = j; int ry = i; if (fx) { pixman_fixed_t f; uint32_t pixel, mask; uint8_t *row; mask = PIXMAN_FORMAT_A (format)? 0 : 0xff000000; if (repeat_mode != PIXMAN_REPEAT_NONE) { repeat (repeat_mode, &rx, bits->width); repeat (repeat_mode, &ry, bits->height); row = (uint8_t *)(bits->bits + bits->rowstride * ry); pixel = convert_pixel (row, rx) | mask; } else { if (rx < 0 || ry < 0 || rx >= bits->width || ry >= bits->height) { pixel = 0; } else { row = (uint8_t *)(bits->bits + bits->rowstride * ry); pixel = convert_pixel (row, rx) | mask; } } f = ((pixman_fixed_32_32_t)fx * fy + 0x8000) >> 16; srtot += (int)RED_8 (pixel) * f; sgtot += (int)GREEN_8 (pixel) * f; sbtot += (int)BLUE_8 (pixel) * f; satot += (int)ALPHA_8 (pixel) * f; } } } } satot = (satot + 0x8000) >> 16; srtot = (srtot + 0x8000) >> 16; sgtot = (sgtot + 0x8000) >> 16; sbtot = (sbtot + 0x8000) >> 16; satot = CLIP (satot, 0, 0xff); srtot = CLIP (srtot, 0, 0xff); sgtot = CLIP (sgtot, 0, 0xff); sbtot = CLIP (sbtot, 0, 0xff); buffer[k] = (satot << 24) | (srtot << 16) | (sgtot << 8) | (sbtot << 0); next: vx += ux; vy += uy; } } static const uint32_t zero[2] = { 0, 0 }; static force_inline void bits_image_fetch_bilinear_affine (pixman_image_t * image, int offset, int line, int width, uint32_t * buffer, const uint32_t * mask, convert_pixel_t convert_pixel, pixman_format_code_t format, pixman_repeat_t repeat_mode) { pixman_fixed_t x, y; pixman_fixed_t ux, uy; pixman_vector_t v; bits_image_t *bits = &image->bits; int i; /* reference point is the center of the pixel */ v.vector[0] = pixman_int_to_fixed (offset) + pixman_fixed_1 / 2; v.vector[1] = pixman_int_to_fixed (line) + pixman_fixed_1 / 2; v.vector[2] = pixman_fixed_1; if (!pixman_transform_point_3d (image->common.transform, &v)) return; ux = image->common.transform->matrix[0][0]; uy = image->common.transform->matrix[1][0]; x = v.vector[0]; y = v.vector[1]; for (i = 0; i < width; ++i) { int x1, y1, x2, y2; uint32_t tl, tr, bl, br; int32_t distx, disty; int width = image->bits.width; int height = image->bits.height; const uint8_t *row1; const uint8_t *row2; if (mask && !mask[i]) goto next; x1 = x - pixman_fixed_1 / 2; y1 = y - pixman_fixed_1 / 2; distx = pixman_fixed_to_bilinear_weight (x1); disty = pixman_fixed_to_bilinear_weight (y1); y1 = pixman_fixed_to_int (y1); y2 = y1 + 1; x1 = pixman_fixed_to_int (x1); x2 = x1 + 1; if (repeat_mode != PIXMAN_REPEAT_NONE) { uint32_t mask; mask = PIXMAN_FORMAT_A (format)? 0 : 0xff000000; repeat (repeat_mode, &x1, width); repeat (repeat_mode, &y1, height); repeat (repeat_mode, &x2, width); repeat (repeat_mode, &y2, height); row1 = (uint8_t *)(bits->bits + bits->rowstride * y1); row2 = (uint8_t *)(bits->bits + bits->rowstride * y2); tl = convert_pixel (row1, x1) | mask; tr = convert_pixel (row1, x2) | mask; bl = convert_pixel (row2, x1) | mask; br = convert_pixel (row2, x2) | mask; } else { uint32_t mask1, mask2; int bpp; /* Note: PIXMAN_FORMAT_BPP() returns an unsigned value, * which means if you use it in expressions, those * expressions become unsigned themselves. Since * the variables below can be negative in some cases, * that will lead to crashes on 64 bit architectures. * * So this line makes sure bpp is signed */ bpp = PIXMAN_FORMAT_BPP (format); if (x1 >= width || x2 < 0 || y1 >= height || y2 < 0) { buffer[i] = 0; goto next; } if (y2 == 0) { row1 = (const uint8_t *)zero; mask1 = 0; } else { row1 = (uint8_t *)(bits->bits + bits->rowstride * y1); row1 += bpp / 8 * x1; mask1 = PIXMAN_FORMAT_A (format)? 0 : 0xff000000; } if (y1 == height - 1) { row2 = (const uint8_t *)zero; mask2 = 0; } else { row2 = (uint8_t *)(bits->bits + bits->rowstride * y2); row2 += bpp / 8 * x1; mask2 = PIXMAN_FORMAT_A (format)? 0 : 0xff000000; } if (x2 == 0) { tl = 0; bl = 0; } else { tl = convert_pixel (row1, 0) | mask1; bl = convert_pixel (row2, 0) | mask2; } if (x1 == width - 1) { tr = 0; br = 0; } else { tr = convert_pixel (row1, 1) | mask1; br = convert_pixel (row2, 1) | mask2; } } buffer[i] = bilinear_interpolation ( tl, tr, bl, br, distx, disty); next: x += ux; y += uy; } } static force_inline void bits_image_fetch_nearest_affine (pixman_image_t * image, int offset, int line, int width, uint32_t * buffer, const uint32_t * mask, convert_pixel_t convert_pixel, pixman_format_code_t format, pixman_repeat_t repeat_mode) { pixman_fixed_t x, y; pixman_fixed_t ux, uy; pixman_vector_t v; bits_image_t *bits = &image->bits; int i; /* reference point is the center of the pixel */ v.vector[0] = pixman_int_to_fixed (offset) + pixman_fixed_1 / 2; v.vector[1] = pixman_int_to_fixed (line) + pixman_fixed_1 / 2; v.vector[2] = pixman_fixed_1; if (!pixman_transform_point_3d (image->common.transform, &v)) return; ux = image->common.transform->matrix[0][0]; uy = image->common.transform->matrix[1][0]; x = v.vector[0]; y = v.vector[1]; for (i = 0; i < width; ++i) { int width, height, x0, y0; const uint8_t *row; if (mask && !mask[i]) goto next; width = image->bits.width; height = image->bits.height; x0 = pixman_fixed_to_int (x - pixman_fixed_e); y0 = pixman_fixed_to_int (y - pixman_fixed_e); if (repeat_mode == PIXMAN_REPEAT_NONE && (y0 < 0 || y0 >= height || x0 < 0 || x0 >= width)) { buffer[i] = 0; } else { uint32_t mask = PIXMAN_FORMAT_A (format)? 0 : 0xff000000; if (repeat_mode != PIXMAN_REPEAT_NONE) { repeat (repeat_mode, &x0, width); repeat (repeat_mode, &y0, height); } row = (uint8_t *)(bits->bits + bits->rowstride * y0); buffer[i] = convert_pixel (row, x0) | mask; } next: x += ux; y += uy; } } static force_inline uint32_t convert_a8r8g8b8 (const uint8_t *row, int x) { return *(((uint32_t *)row) + x); } static force_inline uint32_t convert_x8r8g8b8 (const uint8_t *row, int x) { return *(((uint32_t *)row) + x); } static force_inline uint32_t convert_a8 (const uint8_t *row, int x) { return (uint32_t) *(row + x) << 24; } static force_inline uint32_t convert_r5g6b5 (const uint8_t *row, int x) { return convert_0565_to_0888 (*((uint16_t *)row + x)); } #define MAKE_SEPARABLE_CONVOLUTION_FETCHER(name, format, repeat_mode) \ static uint32_t * \ bits_image_fetch_separable_convolution_affine_ ## name (pixman_iter_t *iter, \ const uint32_t * mask) \ { \ bits_image_fetch_separable_convolution_affine ( \ iter->image, \ iter->x, iter->y++, \ iter->width, \ iter->buffer, mask, \ convert_ ## format, \ PIXMAN_ ## format, \ repeat_mode); \ \ return iter->buffer; \ } #define MAKE_BILINEAR_FETCHER(name, format, repeat_mode) \ static uint32_t * \ bits_image_fetch_bilinear_affine_ ## name (pixman_iter_t *iter, \ const uint32_t * mask) \ { \ bits_image_fetch_bilinear_affine (iter->image, \ iter->x, iter->y++, \ iter->width, \ iter->buffer, mask, \ convert_ ## format, \ PIXMAN_ ## format, \ repeat_mode); \ return iter->buffer; \ } #define MAKE_NEAREST_FETCHER(name, format, repeat_mode) \ static uint32_t * \ bits_image_fetch_nearest_affine_ ## name (pixman_iter_t *iter, \ const uint32_t * mask) \ { \ bits_image_fetch_nearest_affine (iter->image, \ iter->x, iter->y++, \ iter->width, \ iter->buffer, mask, \ convert_ ## format, \ PIXMAN_ ## format, \ repeat_mode); \ return iter->buffer; \ } #define MAKE_FETCHERS(name, format, repeat_mode) \ MAKE_NEAREST_FETCHER (name, format, repeat_mode) \ MAKE_BILINEAR_FETCHER (name, format, repeat_mode) \ MAKE_SEPARABLE_CONVOLUTION_FETCHER (name, format, repeat_mode) MAKE_FETCHERS (pad_a8r8g8b8, a8r8g8b8, PIXMAN_REPEAT_PAD) MAKE_FETCHERS (none_a8r8g8b8, a8r8g8b8, PIXMAN_REPEAT_NONE) MAKE_FETCHERS (reflect_a8r8g8b8, a8r8g8b8, PIXMAN_REPEAT_REFLECT) MAKE_FETCHERS (normal_a8r8g8b8, a8r8g8b8, PIXMAN_REPEAT_NORMAL) MAKE_FETCHERS (pad_x8r8g8b8, x8r8g8b8, PIXMAN_REPEAT_PAD) MAKE_FETCHERS (none_x8r8g8b8, x8r8g8b8, PIXMAN_REPEAT_NONE) MAKE_FETCHERS (reflect_x8r8g8b8, x8r8g8b8, PIXMAN_REPEAT_REFLECT) MAKE_FETCHERS (normal_x8r8g8b8, x8r8g8b8, PIXMAN_REPEAT_NORMAL) MAKE_FETCHERS (pad_a8, a8, PIXMAN_REPEAT_PAD) MAKE_FETCHERS (none_a8, a8, PIXMAN_REPEAT_NONE) MAKE_FETCHERS (reflect_a8, a8, PIXMAN_REPEAT_REFLECT) MAKE_FETCHERS (normal_a8, a8, PIXMAN_REPEAT_NORMAL) MAKE_FETCHERS (pad_r5g6b5, r5g6b5, PIXMAN_REPEAT_PAD) MAKE_FETCHERS (none_r5g6b5, r5g6b5, PIXMAN_REPEAT_NONE) MAKE_FETCHERS (reflect_r5g6b5, r5g6b5, PIXMAN_REPEAT_REFLECT) MAKE_FETCHERS (normal_r5g6b5, r5g6b5, PIXMAN_REPEAT_NORMAL) #define IMAGE_FLAGS \ (FAST_PATH_STANDARD_FLAGS | FAST_PATH_ID_TRANSFORM | \ FAST_PATH_BITS_IMAGE | FAST_PATH_SAMPLES_COVER_CLIP_NEAREST) static const pixman_iter_info_t fast_iters[] = { { PIXMAN_r5g6b5, IMAGE_FLAGS, ITER_NARROW | ITER_SRC, _pixman_iter_init_bits_stride, fast_fetch_r5g6b5, NULL }, { PIXMAN_r5g6b5, FAST_PATH_STD_DEST_FLAGS, ITER_NARROW | ITER_DEST, _pixman_iter_init_bits_stride, fast_fetch_r5g6b5, fast_write_back_r5g6b5 }, { PIXMAN_r5g6b5, FAST_PATH_STD_DEST_FLAGS, ITER_NARROW | ITER_DEST | ITER_IGNORE_RGB | ITER_IGNORE_ALPHA, _pixman_iter_init_bits_stride, fast_dest_fetch_noop, fast_write_back_r5g6b5 }, { PIXMAN_a8r8g8b8, (FAST_PATH_STANDARD_FLAGS | FAST_PATH_SCALE_TRANSFORM | FAST_PATH_BILINEAR_FILTER | FAST_PATH_SAMPLES_COVER_CLIP_BILINEAR), ITER_NARROW | ITER_SRC, fast_bilinear_cover_iter_init, NULL, NULL }, #define FAST_BILINEAR_FLAGS \ (FAST_PATH_NO_ALPHA_MAP | \ FAST_PATH_NO_ACCESSORS | \ FAST_PATH_HAS_TRANSFORM | \ FAST_PATH_AFFINE_TRANSFORM | \ FAST_PATH_X_UNIT_POSITIVE | \ FAST_PATH_Y_UNIT_ZERO | \ FAST_PATH_NONE_REPEAT | \ FAST_PATH_BILINEAR_FILTER) { PIXMAN_a8r8g8b8, FAST_BILINEAR_FLAGS, ITER_NARROW | ITER_SRC, NULL, bits_image_fetch_bilinear_no_repeat_8888, NULL }, { PIXMAN_x8r8g8b8, FAST_BILINEAR_FLAGS, ITER_NARROW | ITER_SRC, NULL, bits_image_fetch_bilinear_no_repeat_8888, NULL }, #define GENERAL_BILINEAR_FLAGS \ (FAST_PATH_NO_ALPHA_MAP | \ FAST_PATH_NO_ACCESSORS | \ FAST_PATH_HAS_TRANSFORM | \ FAST_PATH_AFFINE_TRANSFORM | \ FAST_PATH_BILINEAR_FILTER) #define GENERAL_NEAREST_FLAGS \ (FAST_PATH_NO_ALPHA_MAP | \ FAST_PATH_NO_ACCESSORS | \ FAST_PATH_HAS_TRANSFORM | \ FAST_PATH_AFFINE_TRANSFORM | \ FAST_PATH_NEAREST_FILTER) #define GENERAL_SEPARABLE_CONVOLUTION_FLAGS \ (FAST_PATH_NO_ALPHA_MAP | \ FAST_PATH_NO_ACCESSORS | \ FAST_PATH_HAS_TRANSFORM | \ FAST_PATH_AFFINE_TRANSFORM | \ FAST_PATH_SEPARABLE_CONVOLUTION_FILTER) #define SEPARABLE_CONVOLUTION_AFFINE_FAST_PATH(name, format, repeat) \ { PIXMAN_ ## format, \ GENERAL_SEPARABLE_CONVOLUTION_FLAGS | FAST_PATH_ ## repeat ## _REPEAT, \ ITER_NARROW | ITER_SRC, \ NULL, bits_image_fetch_separable_convolution_affine_ ## name, NULL \ }, #define BILINEAR_AFFINE_FAST_PATH(name, format, repeat) \ { PIXMAN_ ## format, \ GENERAL_BILINEAR_FLAGS | FAST_PATH_ ## repeat ## _REPEAT, \ ITER_NARROW | ITER_SRC, \ NULL, bits_image_fetch_bilinear_affine_ ## name, NULL, \ }, #define NEAREST_AFFINE_FAST_PATH(name, format, repeat) \ { PIXMAN_ ## format, \ GENERAL_NEAREST_FLAGS | FAST_PATH_ ## repeat ## _REPEAT, \ ITER_NARROW | ITER_SRC, \ NULL, bits_image_fetch_nearest_affine_ ## name, NULL \ }, #define AFFINE_FAST_PATHS(name, format, repeat) \ NEAREST_AFFINE_FAST_PATH(name, format, repeat) \ BILINEAR_AFFINE_FAST_PATH(name, format, repeat) \ SEPARABLE_CONVOLUTION_AFFINE_FAST_PATH(name, format, repeat) AFFINE_FAST_PATHS (pad_a8r8g8b8, a8r8g8b8, PAD) AFFINE_FAST_PATHS (none_a8r8g8b8, a8r8g8b8, NONE) AFFINE_FAST_PATHS (reflect_a8r8g8b8, a8r8g8b8, REFLECT) AFFINE_FAST_PATHS (normal_a8r8g8b8, a8r8g8b8, NORMAL) AFFINE_FAST_PATHS (pad_x8r8g8b8, x8r8g8b8, PAD) AFFINE_FAST_PATHS (none_x8r8g8b8, x8r8g8b8, NONE) AFFINE_FAST_PATHS (reflect_x8r8g8b8, x8r8g8b8, REFLECT) AFFINE_FAST_PATHS (normal_x8r8g8b8, x8r8g8b8, NORMAL) AFFINE_FAST_PATHS (pad_a8, a8, PAD) AFFINE_FAST_PATHS (none_a8, a8, NONE) AFFINE_FAST_PATHS (reflect_a8, a8, REFLECT) AFFINE_FAST_PATHS (normal_a8, a8, NORMAL) AFFINE_FAST_PATHS (pad_r5g6b5, r5g6b5, PAD) AFFINE_FAST_PATHS (none_r5g6b5, r5g6b5, NONE) AFFINE_FAST_PATHS (reflect_r5g6b5, r5g6b5, REFLECT) AFFINE_FAST_PATHS (normal_r5g6b5, r5g6b5, NORMAL) { PIXMAN_null }, }; pixman_implementation_t * _pixman_implementation_create_fast_path (pixman_implementation_t *fallback) { pixman_implementation_t *imp = _pixman_implementation_create (fallback, c_fast_paths); imp->fill = fast_path_fill; imp->iter_info = fast_iters; return imp; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-filter.c0000664000175000017500000003177214712446423017347 0ustar00mattst88mattst88/* * Copyright 2012, Red Hat, Inc. * Copyright 2012, Soren Sandmann * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Author: Soren Sandmann */ #include #include #include #include #include #ifdef HAVE_CONFIG_H #include #endif #include "pixman-private.h" typedef double (* kernel_func_t) (double x); typedef struct { pixman_kernel_t kernel; kernel_func_t func; double width; } filter_info_t; static double impulse_kernel (double x) { return (x == 0.0)? 1.0 : 0.0; } static double box_kernel (double x) { return 1; } static double linear_kernel (double x) { return 1 - fabs (x); } static double gaussian_kernel (double x) { #define SQRT2 (1.4142135623730950488016887242096980785696718753769480) #define SIGMA (SQRT2 / 2.0) return exp (- x * x / (2 * SIGMA * SIGMA)) / (SIGMA * sqrt (2.0 * M_PI)); } static double sinc (double x) { if (x == 0.0) return 1.0; else return sin (M_PI * x) / (M_PI * x); } static double lanczos (double x, int n) { return sinc (x) * sinc (x * (1.0 / n)); } static double lanczos2_kernel (double x) { return lanczos (x, 2); } static double lanczos3_kernel (double x) { return lanczos (x, 3); } static double nice_kernel (double x) { return lanczos3_kernel (x * 0.75); } static double general_cubic (double x, double B, double C) { double ax = fabs(x); if (ax < 1) { return (((12 - 9 * B - 6 * C) * ax + (-18 + 12 * B + 6 * C)) * ax * ax + (6 - 2 * B)) / 6; } else if (ax < 2) { return ((((-B - 6 * C) * ax + (6 * B + 30 * C)) * ax + (-12 * B - 48 * C)) * ax + (8 * B + 24 * C)) / 6; } else { return 0; } } static double cubic_kernel (double x) { /* This is the Mitchell-Netravali filter. * * (0.0, 0.5) would give us the Catmull-Rom spline, * but that one seems to be indistinguishable from Lanczos2. */ return general_cubic (x, 1/3.0, 1/3.0); } static const filter_info_t filters[] = { { PIXMAN_KERNEL_IMPULSE, impulse_kernel, 0.0 }, { PIXMAN_KERNEL_BOX, box_kernel, 1.0 }, { PIXMAN_KERNEL_LINEAR, linear_kernel, 2.0 }, { PIXMAN_KERNEL_CUBIC, cubic_kernel, 4.0 }, { PIXMAN_KERNEL_GAUSSIAN, gaussian_kernel, 5.0 }, { PIXMAN_KERNEL_LANCZOS2, lanczos2_kernel, 4.0 }, { PIXMAN_KERNEL_LANCZOS3, lanczos3_kernel, 6.0 }, { PIXMAN_KERNEL_LANCZOS3_STRETCHED, nice_kernel, 8.0 }, }; /* This function scales @kernel2 by @scale, then * aligns @x1 in @kernel1 with @x2 in @kernel2 and * and integrates the product of the kernels across @width. * * This function assumes that the intervals are within * the kernels in question. E.g., the caller must not * try to integrate a linear kernel ouside of [-1:1] */ static double integral (pixman_kernel_t kernel1, double x1, pixman_kernel_t kernel2, double scale, double x2, double width) { if (kernel1 == PIXMAN_KERNEL_BOX && kernel2 == PIXMAN_KERNEL_BOX) { return width; } /* The LINEAR filter is not differentiable at 0, so if the * integration interval crosses zero, break it into two * separate integrals. */ else if (kernel1 == PIXMAN_KERNEL_LINEAR && x1 < 0 && x1 + width > 0) { return integral (kernel1, x1, kernel2, scale, x2, - x1) + integral (kernel1, 0, kernel2, scale, x2 - x1, width + x1); } else if (kernel2 == PIXMAN_KERNEL_LINEAR && x2 < 0 && x2 + width > 0) { return integral (kernel1, x1, kernel2, scale, x2, - x2) + integral (kernel1, x1 - x2, kernel2, scale, 0, width + x2); } else if (kernel1 == PIXMAN_KERNEL_IMPULSE) { assert (width == 0.0); return filters[kernel2].func (x2 * scale); } else if (kernel2 == PIXMAN_KERNEL_IMPULSE) { assert (width == 0.0); return filters[kernel1].func (x1); } else { /* Integration via Simpson's rule * See http://www.intmath.com/integration/6-simpsons-rule.php * 12 segments (6 cubic approximations) seems to produce best * result for lanczos3.linear, which was the combination that * showed the most errors. This makes sense as the lanczos3 * filter is 6 wide. */ #define N_SEGMENTS 12 #define SAMPLE(a1, a2) \ (filters[kernel1].func ((a1)) * filters[kernel2].func ((a2) * scale)) double s = 0.0; double h = width / N_SEGMENTS; int i; s = SAMPLE (x1, x2); for (i = 1; i < N_SEGMENTS; i += 2) { double a1 = x1 + h * i; double a2 = x2 + h * i; s += 4 * SAMPLE (a1, a2); } for (i = 2; i < N_SEGMENTS; i += 2) { double a1 = x1 + h * i; double a2 = x2 + h * i; s += 2 * SAMPLE (a1, a2); } s += SAMPLE (x1 + width, x2 + width); return h * s * (1.0 / 3.0); } } static void create_1d_filter (int width, pixman_kernel_t reconstruct, pixman_kernel_t sample, double scale, int n_phases, pixman_fixed_t *pstart, pixman_fixed_t *pend ) { pixman_fixed_t *p = pstart; double step; int i; if(width <= 0) return; step = 1.0 / n_phases; for (i = 0; i < n_phases; ++i) { double frac = step / 2.0 + i * step; pixman_fixed_t new_total; int x, x1, x2; double total, e; /* Sample convolution of reconstruction and sampling * filter. See rounding.txt regarding the rounding * and sample positions. */ x1 = ceil (frac - width / 2.0 - 0.5); x2 = x1 + width; assert( p >= pstart && p + (x2 - x1) <= pend ); /* assert validity of the following loop */ total = 0; for (x = x1; x < x2; ++x) { double pos = x + 0.5 - frac; double rlow = - filters[reconstruct].width / 2.0; double rhigh = rlow + filters[reconstruct].width; double slow = pos - scale * filters[sample].width / 2.0; double shigh = slow + scale * filters[sample].width; double c = 0.0; double ilow, ihigh; if (rhigh >= slow && rlow <= shigh) { ilow = MAX (slow, rlow); ihigh = MIN (shigh, rhigh); c = integral (reconstruct, ilow, sample, 1.0 / scale, ilow - pos, ihigh - ilow); } *p = (pixman_fixed_t)floor (c * 65536.0 + 0.5); total += *p; p++; } /* Normalize, with error diffusion */ p -= width; assert(p >= pstart && p + (x2 - x1) <= pend); /* assert validity of the following loop */ total = 65536.0 / total; new_total = 0; e = 0.0; for (x = x1; x < x2; ++x) { double v = (*p) * total + e; pixman_fixed_t t = floor (v + 0.5); e = v - t; new_total += t; *p++ = t; } /* pixman_fixed_e's worth of error may remain; put it * at the first sample, since that is the only one that * hasn't had any error diffused into it. */ assert(p - width >= pstart && p - width < pend); /* assert... */ *(p - width) += pixman_fixed_1 - new_total; } } static int filter_width (pixman_kernel_t reconstruct, pixman_kernel_t sample, double size) { return ceil (filters[reconstruct].width + size * filters[sample].width); } #ifdef PIXMAN_GNUPLOT /* If enable-gnuplot is configured, then you can pipe the output of a * pixman-using program to gnuplot and get a continuously-updated plot * of the horizontal filter. This works well with demos/scale to test * the filter generation. * * The plot is all the different subposition filters shuffled * together. This is misleading in a few cases: * * IMPULSE.BOX - goes up and down as the subfilters have different * numbers of non-zero samples * IMPULSE.TRIANGLE - somewhat crooked for the same reason * 1-wide filters - looks triangular, but a 1-wide box would be more * accurate */ static void gnuplot_filter (int width, int n_phases, const pixman_fixed_t* p) { double step; int i, j; int first; step = 1.0 / n_phases; printf ("set style line 1 lc rgb '#0060ad' lt 1 lw 0.5 pt 7 pi 1 ps 0.5\n"); printf ("plot [x=%g:%g] '-' with linespoints ls 1\n", -width*0.5, width*0.5); /* Print a point at the origin so that y==0 line is included: */ printf ("0 0\n\n"); /* The position of the first sample of the phase corresponding to * frac is given by: * * ceil (frac - width / 2.0 - 0.5) + 0.5 - frac * * We have to find the frac that minimizes this expression. * * For odd widths, we have * * ceil (frac - width / 2.0 - 0.5) + 0.5 - frac * = ceil (frac) + K - frac * = 1 + K - frac * * for some K, so this is minimized when frac is maximized and * strictly growing with frac. So for odd widths, we can simply * start at the last phase and go backwards. * * For even widths, we have * * ceil (frac - width / 2.0 - 0.5) + 0.5 - frac * = ceil (frac - 0.5) + K - frac * * The graph for this function (ignoring K) looks like this: * * 0.5 * | |\ * | | \ * | | \ * 0 | | \ * |\ | * | \ | * | \ | * -0.5 | \| * --------------------------------- * 0 0.5 1 * * So in this case we need to start with the phase whose frac is * less than, but as close as possible to 0.5, then go backwards * until we hit the first phase, then wrap around to the last * phase and continue backwards. * * Which phase is as close as possible 0.5? The locations of the * sampling point corresponding to the kth phase is given by * 1/(2 * n_phases) + k / n_phases: * * 1/(2 * n_phases) + k / n_phases = 0.5 * * from which it follows that * * k = (n_phases - 1) / 2 * * rounded down is the phase in question. */ if (width & 1) first = n_phases - 1; else first = (n_phases - 1) / 2; for (j = 0; j < width; ++j) { for (i = 0; i < n_phases; ++i) { int phase = first - i; double frac, pos; if (phase < 0) phase = n_phases + phase; frac = step / 2.0 + phase * step; pos = ceil (frac - width / 2.0 - 0.5) + 0.5 - frac + j; printf ("%g %g\n", pos, pixman_fixed_to_double (*(p + phase * width + j))); } } printf ("e\n"); fflush (stdout); } #endif /* Create the parameter list for a SEPARABLE_CONVOLUTION filter * with the given kernels and scale parameters */ PIXMAN_EXPORT pixman_fixed_t * pixman_filter_create_separable_convolution (int *n_values, pixman_fixed_t scale_x, pixman_fixed_t scale_y, pixman_kernel_t reconstruct_x, pixman_kernel_t reconstruct_y, pixman_kernel_t sample_x, pixman_kernel_t sample_y, int subsample_bits_x, int subsample_bits_y) { double sx = fabs (pixman_fixed_to_double (scale_x)); double sy = fabs (pixman_fixed_to_double (scale_y)); pixman_fixed_t *params; int subsample_x, subsample_y; int width, height; width = filter_width (reconstruct_x, sample_x, sx); subsample_x = (1 << subsample_bits_x); height = filter_width (reconstruct_y, sample_y, sy); subsample_y = (1 << subsample_bits_y); *n_values = 4 + width * subsample_x + height * subsample_y; params = malloc (*n_values * sizeof (pixman_fixed_t)); if (!params) return NULL; params[0] = pixman_int_to_fixed (width); params[1] = pixman_int_to_fixed (height); params[2] = pixman_int_to_fixed (subsample_bits_x); params[3] = pixman_int_to_fixed (subsample_bits_y); { pixman_fixed_t *xparams = params+4, *yparams = xparams + width*subsample_x, *endparams = params + *n_values; create_1d_filter(width, reconstruct_x, sample_x, sx, subsample_x, xparams, yparams); create_1d_filter(height, reconstruct_y, sample_y, sy, subsample_y, yparams, endparams); } #ifdef PIXMAN_GNUPLOT gnuplot_filter(width, subsample_x, params + 4); #endif return params; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-general.c0000664000175000017500000002027114712446423017467 0ustar00mattst88mattst88/* * Copyright Âİ 2009 Red Hat, Inc. * Copyright Âİ 2000 SuSE, Inc. * Copyright Âİ 2007 Red Hat, Inc. * Copyright Âİ 2000 Keith Packard, member of The XFree86 Project, Inc. * 2005 Lars Knoll & Zack Rusin, Trolltech * 2008 Aaron Plattner, NVIDIA Corporation * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of Red Hat not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. Red Hat makes no representations about the * suitability of this software for any purpose. It is provided "as is" * without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS * SOFTWARE. */ #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include "pixman-private.h" static void general_iter_init (pixman_iter_t *iter, const pixman_iter_info_t *info) { pixman_image_t *image = iter->image; switch (image->type) { case BITS: if ((iter->iter_flags & ITER_SRC) == ITER_SRC) _pixman_bits_image_src_iter_init (image, iter); else _pixman_bits_image_dest_iter_init (image, iter); break; case LINEAR: _pixman_linear_gradient_iter_init (image, iter); break; case RADIAL: _pixman_radial_gradient_iter_init (image, iter); break; case CONICAL: _pixman_conical_gradient_iter_init (image, iter); break; case SOLID: _pixman_log_error (FUNC, "Solid image not handled by noop"); break; default: _pixman_log_error (FUNC, "Pixman bug: unknown image type\n"); break; } } static const pixman_iter_info_t general_iters[] = { { PIXMAN_any, 0, 0, general_iter_init, NULL, NULL }, { PIXMAN_null }, }; typedef struct op_info_t op_info_t; struct op_info_t { uint8_t src, dst; }; #define ITER_IGNORE_BOTH \ (ITER_IGNORE_ALPHA | ITER_IGNORE_RGB | ITER_LOCALIZED_ALPHA) static const op_info_t op_flags[PIXMAN_N_OPERATORS] = { /* Src Dst */ { ITER_IGNORE_BOTH, ITER_IGNORE_BOTH }, /* CLEAR */ { ITER_LOCALIZED_ALPHA, ITER_IGNORE_BOTH }, /* SRC */ { ITER_IGNORE_BOTH, ITER_LOCALIZED_ALPHA }, /* DST */ { 0, ITER_LOCALIZED_ALPHA }, /* OVER */ { ITER_LOCALIZED_ALPHA, 0 }, /* OVER_REVERSE */ { ITER_LOCALIZED_ALPHA, ITER_IGNORE_RGB }, /* IN */ { ITER_IGNORE_RGB, ITER_LOCALIZED_ALPHA }, /* IN_REVERSE */ { ITER_LOCALIZED_ALPHA, ITER_IGNORE_RGB }, /* OUT */ { ITER_IGNORE_RGB, ITER_LOCALIZED_ALPHA }, /* OUT_REVERSE */ { 0, 0 }, /* ATOP */ { 0, 0 }, /* ATOP_REVERSE */ { 0, 0 }, /* XOR */ { ITER_LOCALIZED_ALPHA, ITER_LOCALIZED_ALPHA }, /* ADD */ { 0, 0 }, /* SATURATE */ }; #define SCANLINE_BUFFER_LENGTH 8192 static pixman_bool_t operator_needs_division (pixman_op_t op) { static const uint8_t needs_division[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, /* SATURATE */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, /* DISJOINT */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, /* CONJOINT */ 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, /* blend ops */ }; return needs_division[op]; } static void general_composite_rect (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint8_t stack_scanline_buffer[3 * SCANLINE_BUFFER_LENGTH]; uint8_t *scanline_buffer = (uint8_t *) stack_scanline_buffer; uint8_t *src_buffer, *mask_buffer, *dest_buffer; pixman_iter_t src_iter, mask_iter, dest_iter; pixman_combine_32_func_t compose; pixman_bool_t component_alpha; iter_flags_t width_flag, src_iter_flags; int Bpp; int i; if ((src_image->common.flags & FAST_PATH_NARROW_FORMAT) && (!mask_image || mask_image->common.flags & FAST_PATH_NARROW_FORMAT) && (dest_image->common.flags & FAST_PATH_NARROW_FORMAT) && !(operator_needs_division (op)) && (dest_image->bits.dither == PIXMAN_DITHER_NONE)) { width_flag = ITER_NARROW; Bpp = 4; } else { width_flag = ITER_WIDE; Bpp = 16; } #define ALIGN(addr) \ ((uint8_t *)((((uintptr_t)(addr)) + 15) & (~15))) if (width <= 0 || _pixman_multiply_overflows_int (width, Bpp * 3)) return; if (width * Bpp * 3 > sizeof (stack_scanline_buffer) - 15 * 3) { scanline_buffer = pixman_malloc_ab_plus_c (width, Bpp * 3, 15 * 3); if (!scanline_buffer) return; memset (scanline_buffer, 0, width * Bpp * 3 + 15 * 3); } else { memset (stack_scanline_buffer, 0, sizeof (stack_scanline_buffer)); } src_buffer = ALIGN (scanline_buffer); mask_buffer = ALIGN (src_buffer + width * Bpp); dest_buffer = ALIGN (mask_buffer + width * Bpp); if (width_flag == ITER_WIDE) { /* To make sure there aren't any NANs in the buffers */ memset (src_buffer, 0, width * Bpp); memset (mask_buffer, 0, width * Bpp); memset (dest_buffer, 0, width * Bpp); } /* src iter */ src_iter_flags = width_flag | op_flags[op].src | ITER_SRC; _pixman_implementation_iter_init (imp->toplevel, &src_iter, src_image, src_x, src_y, width, height, src_buffer, src_iter_flags, info->src_flags); /* mask iter */ if ((src_iter_flags & (ITER_IGNORE_ALPHA | ITER_IGNORE_RGB)) == (ITER_IGNORE_ALPHA | ITER_IGNORE_RGB)) { /* If it doesn't matter what the source is, then it doesn't matter * what the mask is */ mask_image = NULL; } component_alpha = mask_image && mask_image->common.component_alpha; _pixman_implementation_iter_init ( imp->toplevel, &mask_iter, mask_image, mask_x, mask_y, width, height, mask_buffer, ITER_SRC | width_flag | (component_alpha? 0 : ITER_IGNORE_RGB), info->mask_flags); /* dest iter */ _pixman_implementation_iter_init ( imp->toplevel, &dest_iter, dest_image, dest_x, dest_y, width, height, dest_buffer, ITER_DEST | width_flag | op_flags[op].dst, info->dest_flags); compose = _pixman_implementation_lookup_combiner ( imp->toplevel, op, component_alpha, width_flag != ITER_WIDE); for (i = 0; i < height; ++i) { uint32_t *s, *m, *d; m = mask_iter.get_scanline (&mask_iter, NULL); s = src_iter.get_scanline (&src_iter, m); d = dest_iter.get_scanline (&dest_iter, NULL); compose (imp->toplevel, op, d, s, m, width); dest_iter.write_back (&dest_iter); } if (src_iter.fini) src_iter.fini (&src_iter); if (mask_iter.fini) mask_iter.fini (&mask_iter); if (dest_iter.fini) dest_iter.fini (&dest_iter); if (scanline_buffer != (uint8_t *) stack_scanline_buffer) free (scanline_buffer); } static const pixman_fast_path_t general_fast_path[] = { { PIXMAN_OP_any, PIXMAN_any, 0, PIXMAN_any, 0, PIXMAN_any, 0, general_composite_rect }, { PIXMAN_OP_NONE } }; pixman_implementation_t * _pixman_implementation_create_general (void) { pixman_implementation_t *imp = _pixman_implementation_create (NULL, general_fast_path); _pixman_setup_combiner_functions_32 (imp); _pixman_setup_combiner_functions_float (imp); imp->iter_info = general_iters; return imp; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-glyph.c0000664000175000017500000004224014712446423017175 0ustar00mattst88mattst88/* * Copyright 2010, 2012, Soren Sandmann * Copyright 2010, 2011, 2012, Red Hat, Inc * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Author: Soren Sandmann */ #ifdef HAVE_CONFIG_H #include #endif #include "pixman-private.h" #include typedef struct glyph_metrics_t glyph_metrics_t; typedef struct glyph_t glyph_t; #define TOMBSTONE ((glyph_t *)0x1) /* XXX: These numbers are arbitrary---we've never done any measurements. */ #define N_GLYPHS_HIGH_WATER (16384) #define N_GLYPHS_LOW_WATER (8192) #define HASH_SIZE (2 * N_GLYPHS_HIGH_WATER) #define HASH_MASK (HASH_SIZE - 1) struct glyph_t { void * font_key; void * glyph_key; int origin_x; int origin_y; pixman_image_t * image; pixman_link_t mru_link; }; struct pixman_glyph_cache_t { int n_glyphs; int n_tombstones; int freeze_count; pixman_list_t mru; glyph_t * glyphs[HASH_SIZE]; }; static void free_glyph (glyph_t *glyph) { pixman_list_unlink (&glyph->mru_link); pixman_image_unref (glyph->image); free (glyph); } static unsigned int hash (const void *font_key, const void *glyph_key) { size_t key = (size_t)font_key + (size_t)glyph_key; /* This hash function is based on one found on Thomas Wang's * web page at * * http://www.concentric.net/~Ttwang/tech/inthash.htm * */ key = (key << 15) - key - 1; key = key ^ (key >> 12); key = key + (key << 2); key = key ^ (key >> 4); key = key + (key << 3) + (key << 11); key = key ^ (key >> 16); return key; } static glyph_t * lookup_glyph (pixman_glyph_cache_t *cache, void *font_key, void *glyph_key) { unsigned idx; glyph_t *g; idx = hash (font_key, glyph_key); while ((g = cache->glyphs[idx++ & HASH_MASK])) { if (g != TOMBSTONE && g->font_key == font_key && g->glyph_key == glyph_key) { return g; } } return NULL; } static void insert_glyph (pixman_glyph_cache_t *cache, glyph_t *glyph) { unsigned idx; glyph_t **loc; idx = hash (glyph->font_key, glyph->glyph_key); /* Note: we assume that there is room in the table. If there isn't, * this will be an infinite loop. */ do { loc = &cache->glyphs[idx++ & HASH_MASK]; } while (*loc && *loc != TOMBSTONE); if (*loc == TOMBSTONE) cache->n_tombstones--; cache->n_glyphs++; *loc = glyph; } static void remove_glyph (pixman_glyph_cache_t *cache, glyph_t *glyph) { unsigned idx; idx = hash (glyph->font_key, glyph->glyph_key); while (cache->glyphs[idx & HASH_MASK] != glyph) idx++; cache->glyphs[idx & HASH_MASK] = TOMBSTONE; cache->n_tombstones++; cache->n_glyphs--; /* Eliminate tombstones if possible */ if (cache->glyphs[(idx + 1) & HASH_MASK] == NULL) { while (cache->glyphs[idx & HASH_MASK] == TOMBSTONE) { cache->glyphs[idx & HASH_MASK] = NULL; cache->n_tombstones--; idx--; } } } static void clear_table (pixman_glyph_cache_t *cache) { int i; for (i = 0; i < HASH_SIZE; ++i) { glyph_t *glyph = cache->glyphs[i]; if (glyph && glyph != TOMBSTONE) free_glyph (glyph); cache->glyphs[i] = NULL; } cache->n_glyphs = 0; cache->n_tombstones = 0; } PIXMAN_EXPORT pixman_glyph_cache_t * pixman_glyph_cache_create (void) { pixman_glyph_cache_t *cache; if (!(cache = malloc (sizeof *cache))) return NULL; memset (cache->glyphs, 0, sizeof (cache->glyphs)); cache->n_glyphs = 0; cache->n_tombstones = 0; cache->freeze_count = 0; pixman_list_init (&cache->mru); return cache; } PIXMAN_EXPORT void pixman_glyph_cache_destroy (pixman_glyph_cache_t *cache) { return_if_fail (cache->freeze_count == 0); clear_table (cache); free (cache); } PIXMAN_EXPORT void pixman_glyph_cache_freeze (pixman_glyph_cache_t *cache) { cache->freeze_count++; } PIXMAN_EXPORT void pixman_glyph_cache_thaw (pixman_glyph_cache_t *cache) { if (--cache->freeze_count == 0 && cache->n_glyphs + cache->n_tombstones > N_GLYPHS_HIGH_WATER) { if (cache->n_tombstones > N_GLYPHS_HIGH_WATER) { /* More than half the entries are * tombstones. Just dump the whole table. */ clear_table (cache); } while (cache->n_glyphs > N_GLYPHS_LOW_WATER) { glyph_t *glyph = CONTAINER_OF (glyph_t, mru_link, cache->mru.tail); remove_glyph (cache, glyph); free_glyph (glyph); } } } PIXMAN_EXPORT const void * pixman_glyph_cache_lookup (pixman_glyph_cache_t *cache, void *font_key, void *glyph_key) { return lookup_glyph (cache, font_key, glyph_key); } PIXMAN_EXPORT const void * pixman_glyph_cache_insert (pixman_glyph_cache_t *cache, void *font_key, void *glyph_key, int origin_x, int origin_y, pixman_image_t *image) { glyph_t *glyph; int32_t width, height; return_val_if_fail (cache->freeze_count > 0, NULL); return_val_if_fail (image->type == BITS, NULL); width = image->bits.width; height = image->bits.height; if (cache->n_glyphs >= HASH_SIZE) return NULL; if (!(glyph = malloc (sizeof *glyph))) return NULL; glyph->font_key = font_key; glyph->glyph_key = glyph_key; glyph->origin_x = origin_x; glyph->origin_y = origin_y; if (!(glyph->image = pixman_image_create_bits ( image->bits.format, width, height, NULL, -1))) { free (glyph); return NULL; } pixman_image_composite32 (PIXMAN_OP_SRC, image, NULL, glyph->image, 0, 0, 0, 0, 0, 0, width, height); if (PIXMAN_FORMAT_A (glyph->image->bits.format) != 0 && PIXMAN_FORMAT_RGB (glyph->image->bits.format) != 0) { pixman_image_set_component_alpha (glyph->image, TRUE); } pixman_list_prepend (&cache->mru, &glyph->mru_link); _pixman_image_validate (glyph->image); insert_glyph (cache, glyph); return glyph; } PIXMAN_EXPORT void pixman_glyph_cache_remove (pixman_glyph_cache_t *cache, void *font_key, void *glyph_key) { glyph_t *glyph; if ((glyph = lookup_glyph (cache, font_key, glyph_key))) { remove_glyph (cache, glyph); free_glyph (glyph); } } PIXMAN_EXPORT void pixman_glyph_get_extents (pixman_glyph_cache_t *cache, int n_glyphs, pixman_glyph_t *glyphs, pixman_box32_t *extents) { int i; extents->x1 = extents->y1 = INT32_MAX; extents->x2 = extents->y2 = INT32_MIN; for (i = 0; i < n_glyphs; ++i) { glyph_t *glyph = (glyph_t *)glyphs[i].glyph; int x1, y1, x2, y2; x1 = glyphs[i].x - glyph->origin_x; y1 = glyphs[i].y - glyph->origin_y; x2 = glyphs[i].x - glyph->origin_x + glyph->image->bits.width; y2 = glyphs[i].y - glyph->origin_y + glyph->image->bits.height; if (x1 < extents->x1) extents->x1 = x1; if (y1 < extents->y1) extents->y1 = y1; if (x2 > extents->x2) extents->x2 = x2; if (y2 > extents->y2) extents->y2 = y2; } } /* This function returns a format that is suitable for use as a mask for the * set of glyphs in question. */ PIXMAN_EXPORT pixman_format_code_t pixman_glyph_get_mask_format (pixman_glyph_cache_t *cache, int n_glyphs, const pixman_glyph_t *glyphs) { pixman_format_code_t format = PIXMAN_a1; int i; for (i = 0; i < n_glyphs; ++i) { const glyph_t *glyph = glyphs[i].glyph; pixman_format_code_t glyph_format = glyph->image->bits.format; if (PIXMAN_FORMAT_TYPE (glyph_format) == PIXMAN_TYPE_A) { if (PIXMAN_FORMAT_A (glyph_format) > PIXMAN_FORMAT_A (format)) format = glyph_format; } else { return PIXMAN_a8r8g8b8; } } return format; } static pixman_bool_t box32_intersect (pixman_box32_t *dest, const pixman_box32_t *box1, const pixman_box32_t *box2) { dest->x1 = MAX (box1->x1, box2->x1); dest->y1 = MAX (box1->y1, box2->y1); dest->x2 = MIN (box1->x2, box2->x2); dest->y2 = MIN (box1->y2, box2->y2); return dest->x2 > dest->x1 && dest->y2 > dest->y1; } #if defined(__GNUC__) && !defined(__x86_64__) && !defined(__amd64__) __attribute__((__force_align_arg_pointer__)) #endif PIXMAN_EXPORT void pixman_composite_glyphs_no_mask (pixman_op_t op, pixman_image_t *src, pixman_image_t *dest, int32_t src_x, int32_t src_y, int32_t dest_x, int32_t dest_y, pixman_glyph_cache_t *cache, int n_glyphs, const pixman_glyph_t *glyphs) { pixman_region32_t region; pixman_format_code_t glyph_format = PIXMAN_null; uint32_t glyph_flags = 0; pixman_format_code_t dest_format; uint32_t dest_flags; pixman_composite_func_t func = NULL; pixman_implementation_t *implementation = NULL; pixman_composite_info_t info; int i; _pixman_image_validate (src); _pixman_image_validate (dest); dest_format = dest->common.extended_format_code; dest_flags = dest->common.flags; pixman_region32_init (®ion); if (!_pixman_compute_composite_region32 ( ®ion, src, NULL, dest, src_x - dest_x, src_y - dest_y, 0, 0, 0, 0, dest->bits.width, dest->bits.height)) { goto out; } info.op = op; info.src_image = src; info.dest_image = dest; info.src_flags = src->common.flags; info.dest_flags = dest->common.flags; for (i = 0; i < n_glyphs; ++i) { glyph_t *glyph = (glyph_t *)glyphs[i].glyph; pixman_image_t *glyph_img = glyph->image; pixman_box32_t glyph_box; pixman_box32_t *pbox; uint32_t extra = FAST_PATH_SAMPLES_COVER_CLIP_NEAREST; pixman_box32_t composite_box; int n; glyph_box.x1 = dest_x + glyphs[i].x - glyph->origin_x; glyph_box.y1 = dest_y + glyphs[i].y - glyph->origin_y; glyph_box.x2 = glyph_box.x1 + glyph->image->bits.width; glyph_box.y2 = glyph_box.y1 + glyph->image->bits.height; pbox = pixman_region32_rectangles (®ion, &n); info.mask_image = glyph_img; while (n--) { if (box32_intersect (&composite_box, pbox, &glyph_box)) { if (glyph_img->common.extended_format_code != glyph_format || glyph_img->common.flags != glyph_flags) { glyph_format = glyph_img->common.extended_format_code; glyph_flags = glyph_img->common.flags; _pixman_implementation_lookup_composite ( get_implementation(), op, src->common.extended_format_code, src->common.flags, glyph_format, glyph_flags | extra, dest_format, dest_flags, &implementation, &func); } info.src_x = src_x + composite_box.x1 - dest_x; info.src_y = src_y + composite_box.y1 - dest_y; info.mask_x = composite_box.x1 - (dest_x + glyphs[i].x - glyph->origin_x); info.mask_y = composite_box.y1 - (dest_y + glyphs[i].y - glyph->origin_y); info.dest_x = composite_box.x1; info.dest_y = composite_box.y1; info.width = composite_box.x2 - composite_box.x1; info.height = composite_box.y2 - composite_box.y1; info.mask_flags = glyph_flags; func (implementation, &info); } pbox++; } pixman_list_move_to_front (&cache->mru, &glyph->mru_link); } out: pixman_region32_fini (®ion); } static void add_glyphs (pixman_glyph_cache_t *cache, pixman_image_t *dest, int off_x, int off_y, int n_glyphs, const pixman_glyph_t *glyphs) { pixman_format_code_t glyph_format = PIXMAN_null; uint32_t glyph_flags = 0; pixman_composite_func_t func = NULL; pixman_implementation_t *implementation = NULL; pixman_format_code_t dest_format; uint32_t dest_flags; pixman_box32_t dest_box; pixman_composite_info_t info; pixman_image_t *white_img = NULL; pixman_bool_t white_src = FALSE; int i; _pixman_image_validate (dest); dest_format = dest->common.extended_format_code; dest_flags = dest->common.flags; info.op = PIXMAN_OP_ADD; info.dest_image = dest; info.src_x = 0; info.src_y = 0; info.dest_flags = dest_flags; dest_box.x1 = 0; dest_box.y1 = 0; dest_box.x2 = dest->bits.width; dest_box.y2 = dest->bits.height; for (i = 0; i < n_glyphs; ++i) { glyph_t *glyph = (glyph_t *)glyphs[i].glyph; pixman_image_t *glyph_img = glyph->image; pixman_box32_t glyph_box; pixman_box32_t composite_box; if (glyph_img->common.extended_format_code != glyph_format || glyph_img->common.flags != glyph_flags) { pixman_format_code_t src_format, mask_format; glyph_format = glyph_img->common.extended_format_code; glyph_flags = glyph_img->common.flags; if (glyph_format == dest->bits.format) { src_format = glyph_format; mask_format = PIXMAN_null; info.src_flags = glyph_flags | FAST_PATH_SAMPLES_COVER_CLIP_NEAREST; info.mask_flags = FAST_PATH_IS_OPAQUE; info.mask_image = NULL; white_src = FALSE; } else { if (!white_img) { static const pixman_color_t white = { 0xffff, 0xffff, 0xffff, 0xffff }; if (!(white_img = pixman_image_create_solid_fill (&white))) goto out; _pixman_image_validate (white_img); } src_format = PIXMAN_solid; mask_format = glyph_format; info.src_flags = white_img->common.flags; info.mask_flags = glyph_flags | FAST_PATH_SAMPLES_COVER_CLIP_NEAREST; info.src_image = white_img; white_src = TRUE; } _pixman_implementation_lookup_composite ( get_implementation(), PIXMAN_OP_ADD, src_format, info.src_flags, mask_format, info.mask_flags, dest_format, dest_flags, &implementation, &func); } glyph_box.x1 = glyphs[i].x - glyph->origin_x + off_x; glyph_box.y1 = glyphs[i].y - glyph->origin_y + off_y; glyph_box.x2 = glyph_box.x1 + glyph->image->bits.width; glyph_box.y2 = glyph_box.y1 + glyph->image->bits.height; if (box32_intersect (&composite_box, &glyph_box, &dest_box)) { int src_x = composite_box.x1 - glyph_box.x1; int src_y = composite_box.y1 - glyph_box.y1; if (white_src) info.mask_image = glyph_img; else info.src_image = glyph_img; info.mask_x = info.src_x = src_x; info.mask_y = info.src_y = src_y; info.dest_x = composite_box.x1; info.dest_y = composite_box.y1; info.width = composite_box.x2 - composite_box.x1; info.height = composite_box.y2 - composite_box.y1; func (implementation, &info); pixman_list_move_to_front (&cache->mru, &glyph->mru_link); } } out: if (white_img) pixman_image_unref (white_img); } /* Conceptually, for each glyph, (white IN glyph) is PIXMAN_OP_ADDed to an * infinitely big mask image at the position such that the glyph origin point * is positioned at the (glyphs[i].x, glyphs[i].y) point. * * Then (mask_x, mask_y) in the infinite mask and (src_x, src_y) in the source * image are both aligned with (dest_x, dest_y) in the destination image. Then * these three images are composited within the * * (dest_x, dest_y, dst_x + width, dst_y + height) * * rectangle. * * TODO: * - Trim the mask to the destination clip/image? * - Trim composite region based on sources, when the op ignores 0s. */ #if defined(__GNUC__) && !defined(__x86_64__) && !defined(__amd64__) __attribute__((__force_align_arg_pointer__)) #endif PIXMAN_EXPORT void pixman_composite_glyphs (pixman_op_t op, pixman_image_t *src, pixman_image_t *dest, pixman_format_code_t mask_format, int32_t src_x, int32_t src_y, int32_t mask_x, int32_t mask_y, int32_t dest_x, int32_t dest_y, int32_t width, int32_t height, pixman_glyph_cache_t *cache, int n_glyphs, const pixman_glyph_t *glyphs) { pixman_image_t *mask; if (!(mask = pixman_image_create_bits (mask_format, width, height, NULL, -1))) return; if (PIXMAN_FORMAT_A (mask_format) != 0 && PIXMAN_FORMAT_RGB (mask_format) != 0) { pixman_image_set_component_alpha (mask, TRUE); } add_glyphs (cache, mask, - mask_x, - mask_y, n_glyphs, glyphs); pixman_image_composite32 (op, src, mask, dest, src_x, src_y, 0, 0, dest_x, dest_y, width, height); pixman_image_unref (mask); } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-gradient-walker.c0000664000175000017500000001710214712446423021131 0ustar00mattst88mattst88/* * * Copyright Âİ 2000 Keith Packard, member of The XFree86 Project, Inc. * 2005 Lars Knoll & Zack Rusin, Trolltech * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of Keith Packard not be used in * advertising or publicity pertaining to distribution of the software without * specific, written prior permission. Keith Packard makes no * representations about the suitability of this software for any purpose. It * is provided "as is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS * SOFTWARE. */ #ifdef HAVE_CONFIG_H #include #endif #include "pixman-private.h" void _pixman_gradient_walker_init (pixman_gradient_walker_t *walker, gradient_t * gradient, pixman_repeat_t repeat) { walker->num_stops = gradient->n_stops; walker->stops = gradient->stops; walker->left_x = 0; walker->right_x = 0x10000; walker->a_s = 0.0f; walker->a_b = 0.0f; walker->r_s = 0.0f; walker->r_b = 0.0f; walker->g_s = 0.0f; walker->g_b = 0.0f; walker->b_s = 0.0f; walker->b_b = 0.0f; walker->repeat = repeat; walker->need_reset = TRUE; } static void gradient_walker_reset (pixman_gradient_walker_t *walker, pixman_fixed_48_16_t pos) { int64_t x, left_x, right_x; pixman_color_t *left_c, *right_c; int n, count = walker->num_stops; pixman_gradient_stop_t *stops = walker->stops; float la, lr, lg, lb; float ra, rr, rg, rb; float lx, rx; if (walker->repeat == PIXMAN_REPEAT_NORMAL) { x = (int32_t)pos & 0xffff; } else if (walker->repeat == PIXMAN_REPEAT_REFLECT) { x = (int32_t)pos & 0xffff; if ((int32_t)pos & 0x10000) x = 0x10000 - x; } else { x = pos; } for (n = 0; n < count; n++) { if (x < stops[n].x) break; } left_x = stops[n - 1].x; left_c = &stops[n - 1].color; right_x = stops[n].x; right_c = &stops[n].color; if (walker->repeat == PIXMAN_REPEAT_NORMAL) { left_x += (pos - x); right_x += (pos - x); } else if (walker->repeat == PIXMAN_REPEAT_REFLECT) { if ((int32_t)pos & 0x10000) { pixman_color_t *tmp_c; int32_t tmp_x; tmp_x = 0x10000 - right_x; right_x = 0x10000 - left_x; left_x = tmp_x; tmp_c = right_c; right_c = left_c; left_c = tmp_c; x = 0x10000 - x; } left_x += (pos - x); right_x += (pos - x); } else if (walker->repeat == PIXMAN_REPEAT_NONE) { if (n == 0) right_c = left_c; else if (n == count) left_c = right_c; } /* The alpha/red/green/blue channels are scaled to be in [0, 1]. * This ensures that after premultiplication all channels will * be in the [0, 1] interval. */ la = (left_c->alpha * (1.0f/257.0f)); lr = (left_c->red * (1.0f/257.0f)); lg = (left_c->green * (1.0f/257.0f)); lb = (left_c->blue * (1.0f/257.0f)); ra = (right_c->alpha * (1.0f/257.0f)); rr = (right_c->red * (1.0f/257.0f)); rg = (right_c->green * (1.0f/257.0f)); rb = (right_c->blue * (1.0f/257.0f)); lx = left_x * (1.0f/65536.0f); rx = right_x * (1.0f/65536.0f); if (FLOAT_IS_ZERO (rx - lx) || left_x == INT32_MIN || right_x == INT32_MAX) { walker->a_s = walker->r_s = walker->g_s = walker->b_s = 0.0f; walker->a_b = (la + ra) / 510.0f; walker->r_b = (lr + rr) / 510.0f; walker->g_b = (lg + rg) / 510.0f; walker->b_b = (lb + rb) / 510.0f; } else { float w_rec = 1.0f / (rx - lx); walker->a_b = (la * rx - ra * lx) * w_rec * (1.0f/255.0f); walker->r_b = (lr * rx - rr * lx) * w_rec * (1.0f/255.0f); walker->g_b = (lg * rx - rg * lx) * w_rec * (1.0f/255.0f); walker->b_b = (lb * rx - rb * lx) * w_rec * (1.0f/255.0f); walker->a_s = (ra - la) * w_rec * (1.0f/255.0f); walker->r_s = (rr - lr) * w_rec * (1.0f/255.0f); walker->g_s = (rg - lg) * w_rec * (1.0f/255.0f); walker->b_s = (rb - lb) * w_rec * (1.0f/255.0f); } walker->left_x = left_x; walker->right_x = right_x; walker->need_reset = FALSE; } static argb_t pixman_gradient_walker_pixel_float (pixman_gradient_walker_t *walker, pixman_fixed_48_16_t x) { argb_t f; float y; if (walker->need_reset || x < walker->left_x || x >= walker->right_x) gradient_walker_reset (walker, x); y = x * (1.0f / 65536.0f); f.a = walker->a_s * y + walker->a_b; f.r = f.a * (walker->r_s * y + walker->r_b); f.g = f.a * (walker->g_s * y + walker->g_b); f.b = f.a * (walker->b_s * y + walker->b_b); return f; } static uint32_t pixman_gradient_walker_pixel_32 (pixman_gradient_walker_t *walker, pixman_fixed_48_16_t x) { argb_t f; float y; if (walker->need_reset || x < walker->left_x || x >= walker->right_x) gradient_walker_reset (walker, x); y = x * (1.0f / 65536.0f); /* Instead of [0...1] for ARGB, we want [0...255], * multiply alpha with 255 and the color channels * also get multiplied by the alpha multiplier. * * We don't use pixman_contract_from_float because it causes a 2x * slowdown to do so, and the values are already normalized, * so we don't have to worry about values < 0.f or > 1.f */ f.a = 255.f * (walker->a_s * y + walker->a_b); f.r = f.a * (walker->r_s * y + walker->r_b); f.g = f.a * (walker->g_s * y + walker->g_b); f.b = f.a * (walker->b_s * y + walker->b_b); return (((uint32_t)(f.a + .5f) << 24) & 0xff000000) | (((uint32_t)(f.r + .5f) << 16) & 0x00ff0000) | (((uint32_t)(f.g + .5f) << 8) & 0x0000ff00) | (((uint32_t)(f.b + .5f) >> 0) & 0x000000ff); } void _pixman_gradient_walker_write_narrow (pixman_gradient_walker_t *walker, pixman_fixed_48_16_t x, uint32_t *buffer) { *buffer = pixman_gradient_walker_pixel_32 (walker, x); } void _pixman_gradient_walker_write_wide (pixman_gradient_walker_t *walker, pixman_fixed_48_16_t x, uint32_t *buffer) { *(argb_t *)buffer = pixman_gradient_walker_pixel_float (walker, x); } void _pixman_gradient_walker_fill_narrow (pixman_gradient_walker_t *walker, pixman_fixed_48_16_t x, uint32_t *buffer, uint32_t *end) { register uint32_t color; color = pixman_gradient_walker_pixel_32 (walker, x); while (buffer < end) *buffer++ = color; } void _pixman_gradient_walker_fill_wide (pixman_gradient_walker_t *walker, pixman_fixed_48_16_t x, uint32_t *buffer, uint32_t *end) { register argb_t color; argb_t *buffer_wide = (argb_t *)buffer; argb_t *end_wide = (argb_t *)end; color = pixman_gradient_walker_pixel_float (walker, x); while (buffer_wide < end_wide) *buffer_wide++ = color; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-image.c0000664000175000017500000006077114712446423017145 0ustar00mattst88mattst88/* * Copyright Âİ 2000 SuSE, Inc. * Copyright Âİ 2007 Red Hat, Inc. * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of SuSE not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. SuSE makes no representations about the * suitability of this software for any purpose. It is provided "as is" * without express or implied warranty. * * SuSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL SuSE * BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include "pixman-private.h" static const pixman_color_t transparent_black = { 0, 0, 0, 0 }; static void gradient_property_changed (pixman_image_t *image) { gradient_t *gradient = &image->gradient; int n = gradient->n_stops; pixman_gradient_stop_t *stops = gradient->stops; pixman_gradient_stop_t *begin = &(gradient->stops[-1]); pixman_gradient_stop_t *end = &(gradient->stops[n]); switch (gradient->common.repeat) { default: case PIXMAN_REPEAT_NONE: begin->x = INT32_MIN; begin->color = transparent_black; end->x = INT32_MAX; end->color = transparent_black; break; case PIXMAN_REPEAT_NORMAL: begin->x = stops[n - 1].x - pixman_fixed_1; begin->color = stops[n - 1].color; end->x = stops[0].x + pixman_fixed_1; end->color = stops[0].color; break; case PIXMAN_REPEAT_REFLECT: begin->x = - stops[0].x; begin->color = stops[0].color; end->x = pixman_int_to_fixed (2) - stops[n - 1].x; end->color = stops[n - 1].color; break; case PIXMAN_REPEAT_PAD: begin->x = INT32_MIN; begin->color = stops[0].color; end->x = INT32_MAX; end->color = stops[n - 1].color; break; } } pixman_bool_t _pixman_init_gradient (gradient_t * gradient, const pixman_gradient_stop_t *stops, int n_stops) { return_val_if_fail (n_stops > 0, FALSE); /* We allocate two extra stops, one before the beginning of the stop list, * and one after the end. These stops are initialized to whatever color * would be used for positions outside the range of the stop list. * * This saves a bit of computation in the gradient walker. * * The pointer we store in the gradient_t struct still points to the * first user-supplied struct, so when freeing, we will have to * subtract one. */ gradient->stops = pixman_malloc_ab (n_stops + 2, sizeof (pixman_gradient_stop_t)); if (!gradient->stops) return FALSE; gradient->stops += 1; memcpy (gradient->stops, stops, n_stops * sizeof (pixman_gradient_stop_t)); gradient->n_stops = n_stops; gradient->common.property_changed = gradient_property_changed; return TRUE; } void _pixman_image_init (pixman_image_t *image) { image_common_t *common = &image->common; pixman_region32_init (&common->clip_region); common->alpha_count = 0; common->have_clip_region = FALSE; common->clip_sources = FALSE; common->transform = NULL; common->repeat = PIXMAN_REPEAT_NONE; common->filter = PIXMAN_FILTER_NEAREST; common->filter_params = NULL; common->n_filter_params = 0; common->alpha_map = NULL; common->component_alpha = FALSE; common->ref_count = 1; common->property_changed = NULL; common->client_clip = FALSE; common->destroy_func = NULL; common->destroy_data = NULL; common->dirty = TRUE; } pixman_bool_t _pixman_image_fini (pixman_image_t *image) { image_common_t *common = (image_common_t *)image; common->ref_count--; if (common->ref_count == 0) { if (image->common.destroy_func) image->common.destroy_func (image, image->common.destroy_data); pixman_region32_fini (&common->clip_region); free (common->transform); free (common->filter_params); if (common->alpha_map) pixman_image_unref ((pixman_image_t *)common->alpha_map); if (image->type == LINEAR || image->type == RADIAL || image->type == CONICAL) { if (image->gradient.stops) { /* See _pixman_init_gradient() for an explanation of the - 1 */ free (image->gradient.stops - 1); } /* This will trigger if someone adds a property_changed * method to the linear/radial/conical gradient overwriting * the general one. */ assert ( image->common.property_changed == gradient_property_changed); } if (image->type == BITS && image->bits.free_me) free (image->bits.free_me); return TRUE; } return FALSE; } pixman_image_t * _pixman_image_allocate (void) { pixman_image_t *image = malloc (sizeof (pixman_image_t)); if (image) _pixman_image_init (image); return image; } static void image_property_changed (pixman_image_t *image) { image->common.dirty = TRUE; } /* Ref Counting */ PIXMAN_EXPORT pixman_image_t * pixman_image_ref (pixman_image_t *image) { image->common.ref_count++; return image; } /* returns TRUE when the image is freed */ PIXMAN_EXPORT pixman_bool_t pixman_image_unref (pixman_image_t *image) { if (_pixman_image_fini (image)) { free (image); return TRUE; } return FALSE; } PIXMAN_EXPORT void pixman_image_set_destroy_function (pixman_image_t * image, pixman_image_destroy_func_t func, void * data) { image->common.destroy_func = func; image->common.destroy_data = data; } PIXMAN_EXPORT void * pixman_image_get_destroy_data (pixman_image_t *image) { return image->common.destroy_data; } void _pixman_image_reset_clip_region (pixman_image_t *image) { image->common.have_clip_region = FALSE; } /* Executive Summary: This function is a no-op that only exists * for historical reasons. * * There used to be a bug in the X server where it would rely on * out-of-bounds accesses when it was asked to composite with a * window as the source. It would create a pixman image pointing * to some bogus position in memory, but then set a clip region * to the position where the actual bits were. * * Due to a bug in old versions of pixman, where it would not clip * against the image bounds when a clip region was set, this would * actually work. So when the pixman bug was fixed, a workaround was * added to allow certain out-of-bound accesses. This function disabled * those workarounds. * * Since 0.21.2, pixman doesn't do these workarounds anymore, so now * this function is a no-op. */ PIXMAN_EXPORT void pixman_disable_out_of_bounds_workaround (void) { } static void compute_image_info (pixman_image_t *image) { pixman_format_code_t code; uint32_t flags = 0; /* Transform */ if (!image->common.transform) { flags |= (FAST_PATH_ID_TRANSFORM | FAST_PATH_X_UNIT_POSITIVE | FAST_PATH_Y_UNIT_ZERO | FAST_PATH_AFFINE_TRANSFORM); } else { flags |= FAST_PATH_HAS_TRANSFORM; if (image->common.transform->matrix[2][0] == 0 && image->common.transform->matrix[2][1] == 0 && image->common.transform->matrix[2][2] == pixman_fixed_1) { flags |= FAST_PATH_AFFINE_TRANSFORM; if (image->common.transform->matrix[0][1] == 0 && image->common.transform->matrix[1][0] == 0) { if (image->common.transform->matrix[0][0] == -pixman_fixed_1 && image->common.transform->matrix[1][1] == -pixman_fixed_1) { flags |= FAST_PATH_ROTATE_180_TRANSFORM; } flags |= FAST_PATH_SCALE_TRANSFORM; } else if (image->common.transform->matrix[0][0] == 0 && image->common.transform->matrix[1][1] == 0) { pixman_fixed_t m01 = image->common.transform->matrix[0][1]; pixman_fixed_t m10 = image->common.transform->matrix[1][0]; if (m01 == -pixman_fixed_1 && m10 == pixman_fixed_1) flags |= FAST_PATH_ROTATE_90_TRANSFORM; else if (m01 == pixman_fixed_1 && m10 == -pixman_fixed_1) flags |= FAST_PATH_ROTATE_270_TRANSFORM; } } if (image->common.transform->matrix[0][0] > 0) flags |= FAST_PATH_X_UNIT_POSITIVE; if (image->common.transform->matrix[1][0] == 0) flags |= FAST_PATH_Y_UNIT_ZERO; } /* Filter */ switch (image->common.filter) { case PIXMAN_FILTER_NEAREST: case PIXMAN_FILTER_FAST: flags |= (FAST_PATH_NEAREST_FILTER | FAST_PATH_NO_CONVOLUTION_FILTER); break; case PIXMAN_FILTER_BILINEAR: case PIXMAN_FILTER_GOOD: case PIXMAN_FILTER_BEST: flags |= (FAST_PATH_BILINEAR_FILTER | FAST_PATH_NO_CONVOLUTION_FILTER); /* Here we have a chance to optimize BILINEAR filter to NEAREST if * they are equivalent for the currently used transformation matrix. */ if (flags & FAST_PATH_ID_TRANSFORM) { flags |= FAST_PATH_NEAREST_FILTER; } else if (flags & FAST_PATH_AFFINE_TRANSFORM) { /* Suppose the transform is * * [ t00, t01, t02 ] * [ t10, t11, t12 ] * [ 0, 0, 1 ] * * and the destination coordinates are (n + 0.5, m + 0.5). Then * the transformed x coordinate is: * * tx = t00 * (n + 0.5) + t01 * (m + 0.5) + t02 * = t00 * n + t01 * m + t02 + (t00 + t01) * 0.5 * * which implies that if t00, t01 and t02 are all integers * and (t00 + t01) is odd, then tx will be an integer plus 0.5, * which means a BILINEAR filter will reduce to NEAREST. The same * applies in the y direction */ pixman_fixed_t (*t)[3] = image->common.transform->matrix; if ((pixman_fixed_frac ( t[0][0] | t[0][1] | t[0][2] | t[1][0] | t[1][1] | t[1][2]) == 0) && (pixman_fixed_to_int ( (t[0][0] + t[0][1]) & (t[1][0] + t[1][1])) % 2) == 1) { /* FIXME: there are some affine-test failures, showing that * handling of BILINEAR and NEAREST filter is not quite * equivalent when getting close to 32K for the translation * components of the matrix. That's likely some bug, but for * now just skip BILINEAR->NEAREST optimization in this case. */ pixman_fixed_t magic_limit = pixman_int_to_fixed (30000); if (image->common.transform->matrix[0][2] <= magic_limit && image->common.transform->matrix[1][2] <= magic_limit && image->common.transform->matrix[0][2] >= -magic_limit && image->common.transform->matrix[1][2] >= -magic_limit) { flags |= FAST_PATH_NEAREST_FILTER; } } } break; case PIXMAN_FILTER_CONVOLUTION: break; case PIXMAN_FILTER_SEPARABLE_CONVOLUTION: flags |= FAST_PATH_SEPARABLE_CONVOLUTION_FILTER; break; default: flags |= FAST_PATH_NO_CONVOLUTION_FILTER; break; } /* Repeat mode */ switch (image->common.repeat) { case PIXMAN_REPEAT_NONE: flags |= FAST_PATH_NO_REFLECT_REPEAT | FAST_PATH_NO_PAD_REPEAT | FAST_PATH_NO_NORMAL_REPEAT; break; case PIXMAN_REPEAT_REFLECT: flags |= FAST_PATH_NO_PAD_REPEAT | FAST_PATH_NO_NONE_REPEAT | FAST_PATH_NO_NORMAL_REPEAT; break; case PIXMAN_REPEAT_PAD: flags |= FAST_PATH_NO_REFLECT_REPEAT | FAST_PATH_NO_NONE_REPEAT | FAST_PATH_NO_NORMAL_REPEAT; break; default: flags |= FAST_PATH_NO_REFLECT_REPEAT | FAST_PATH_NO_PAD_REPEAT | FAST_PATH_NO_NONE_REPEAT; break; } /* Component alpha */ if (image->common.component_alpha) flags |= FAST_PATH_COMPONENT_ALPHA; else flags |= FAST_PATH_UNIFIED_ALPHA; flags |= (FAST_PATH_NO_ACCESSORS | FAST_PATH_NARROW_FORMAT); /* Type specific checks */ switch (image->type) { case SOLID: code = PIXMAN_solid; if (image->solid.color.alpha == 0xffff) flags |= FAST_PATH_IS_OPAQUE; break; case BITS: if (image->bits.width == 1 && image->bits.height == 1 && image->common.repeat != PIXMAN_REPEAT_NONE) { code = PIXMAN_solid; } else { code = image->bits.format; flags |= FAST_PATH_BITS_IMAGE; } if (!PIXMAN_FORMAT_A (image->bits.format) && PIXMAN_FORMAT_TYPE (image->bits.format) != PIXMAN_TYPE_GRAY && PIXMAN_FORMAT_TYPE (image->bits.format) != PIXMAN_TYPE_COLOR) { flags |= FAST_PATH_SAMPLES_OPAQUE; if (image->common.repeat != PIXMAN_REPEAT_NONE) flags |= FAST_PATH_IS_OPAQUE; } if (image->bits.read_func || image->bits.write_func) flags &= ~FAST_PATH_NO_ACCESSORS; if (PIXMAN_FORMAT_IS_WIDE (image->bits.format)) flags &= ~FAST_PATH_NARROW_FORMAT; break; case RADIAL: code = PIXMAN_unknown; /* * As explained in pixman-radial-gradient.c, every point of * the plane has a valid associated radius (and thus will be * colored) if and only if a is negative (i.e. one of the two * circles contains the other one). */ if (image->radial.a >= 0) break; /* Fall through */ case CONICAL: case LINEAR: code = PIXMAN_unknown; if (image->common.repeat != PIXMAN_REPEAT_NONE) { int i; flags |= FAST_PATH_IS_OPAQUE; for (i = 0; i < image->gradient.n_stops; ++i) { if (image->gradient.stops[i].color.alpha != 0xffff) { flags &= ~FAST_PATH_IS_OPAQUE; break; } } } break; default: code = PIXMAN_unknown; break; } /* Alpha maps are only supported for BITS images, so it's always * safe to ignore their presense for non-BITS images */ if (!image->common.alpha_map || image->type != BITS) { flags |= FAST_PATH_NO_ALPHA_MAP; } else { if (PIXMAN_FORMAT_IS_WIDE (image->common.alpha_map->format)) flags &= ~FAST_PATH_NARROW_FORMAT; } /* Both alpha maps and convolution filters can introduce * non-opaqueness in otherwise opaque images. Also * an image with component alpha turned on is only opaque * if all channels are opaque, so we simply turn it off * unconditionally for those images. */ if (image->common.alpha_map || image->common.filter == PIXMAN_FILTER_CONVOLUTION || image->common.filter == PIXMAN_FILTER_SEPARABLE_CONVOLUTION || image->common.component_alpha) { flags &= ~(FAST_PATH_IS_OPAQUE | FAST_PATH_SAMPLES_OPAQUE); } image->common.flags = flags; image->common.extended_format_code = code; } void _pixman_image_validate (pixman_image_t *image) { if (image->common.dirty) { compute_image_info (image); /* It is important that property_changed is * called *after* compute_image_info() because * property_changed() can make use of the flags * to set up accessors etc. */ if (image->common.property_changed) image->common.property_changed (image); image->common.dirty = FALSE; } if (image->common.alpha_map) _pixman_image_validate ((pixman_image_t *)image->common.alpha_map); } PIXMAN_EXPORT pixman_bool_t pixman_image_set_clip_region32 (pixman_image_t * image, const pixman_region32_t *region) { image_common_t *common = (image_common_t *)image; pixman_bool_t result; if (region) { if ((result = pixman_region32_copy (&common->clip_region, region))) image->common.have_clip_region = TRUE; } else { _pixman_image_reset_clip_region (image); result = TRUE; } image_property_changed (image); return result; } PIXMAN_EXPORT pixman_bool_t pixman_image_set_clip_region (pixman_image_t * image, const pixman_region16_t *region) { image_common_t *common = (image_common_t *)image; pixman_bool_t result; if (region) { if ((result = pixman_region32_copy_from_region16 (&common->clip_region, region))) image->common.have_clip_region = TRUE; } else { _pixman_image_reset_clip_region (image); result = TRUE; } image_property_changed (image); return result; } PIXMAN_EXPORT void pixman_image_set_has_client_clip (pixman_image_t *image, pixman_bool_t client_clip) { image->common.client_clip = client_clip; } PIXMAN_EXPORT pixman_bool_t pixman_image_set_transform (pixman_image_t * image, const pixman_transform_t *transform) { static const pixman_transform_t id = { { { pixman_fixed_1, 0, 0 }, { 0, pixman_fixed_1, 0 }, { 0, 0, pixman_fixed_1 } } }; image_common_t *common = (image_common_t *)image; pixman_bool_t result; if (common->transform == transform) return TRUE; if (!transform || memcmp (&id, transform, sizeof (pixman_transform_t)) == 0) { free (common->transform); common->transform = NULL; result = TRUE; goto out; } if (common->transform && memcmp (common->transform, transform, sizeof (pixman_transform_t)) == 0) { return TRUE; } if (common->transform == NULL) common->transform = malloc (sizeof (pixman_transform_t)); if (common->transform == NULL) { result = FALSE; goto out; } memcpy (common->transform, transform, sizeof(pixman_transform_t)); result = TRUE; out: image_property_changed (image); return result; } PIXMAN_EXPORT void pixman_image_set_repeat (pixman_image_t *image, pixman_repeat_t repeat) { if (image->common.repeat == repeat) return; image->common.repeat = repeat; image_property_changed (image); } PIXMAN_EXPORT void pixman_image_set_dither (pixman_image_t *image, pixman_dither_t dither) { if (image->type == BITS) { if (image->bits.dither == dither) return; image->bits.dither = dither; image_property_changed (image); } } PIXMAN_EXPORT void pixman_image_set_dither_offset (pixman_image_t *image, int offset_x, int offset_y) { if (image->type == BITS) { if (image->bits.dither_offset_x == offset_x && image->bits.dither_offset_y == offset_y) { return; } image->bits.dither_offset_x = offset_x; image->bits.dither_offset_y = offset_y; image_property_changed (image); } } PIXMAN_EXPORT pixman_bool_t pixman_image_set_filter (pixman_image_t * image, pixman_filter_t filter, const pixman_fixed_t *params, int n_params) { image_common_t *common = (image_common_t *)image; pixman_fixed_t *new_params; if (params == common->filter_params && filter == common->filter) return TRUE; if (filter == PIXMAN_FILTER_SEPARABLE_CONVOLUTION) { int width = pixman_fixed_to_int (params[0]); int height = pixman_fixed_to_int (params[1]); int x_phase_bits = pixman_fixed_to_int (params[2]); int y_phase_bits = pixman_fixed_to_int (params[3]); int n_x_phases = (1 << x_phase_bits); int n_y_phases = (1 << y_phase_bits); return_val_if_fail ( n_params == 4 + n_x_phases * width + n_y_phases * height, FALSE); } new_params = NULL; if (params) { new_params = pixman_malloc_ab (n_params, sizeof (pixman_fixed_t)); if (!new_params) return FALSE; memcpy (new_params, params, n_params * sizeof (pixman_fixed_t)); } common->filter = filter; if (common->filter_params) free (common->filter_params); common->filter_params = new_params; common->n_filter_params = n_params; image_property_changed (image); return TRUE; } PIXMAN_EXPORT void pixman_image_set_source_clipping (pixman_image_t *image, pixman_bool_t clip_sources) { if (image->common.clip_sources == clip_sources) return; image->common.clip_sources = clip_sources; image_property_changed (image); } /* Unlike all the other property setters, this function does not * copy the content of indexed. Doing this copying is simply * way, way too expensive. */ PIXMAN_EXPORT void pixman_image_set_indexed (pixman_image_t * image, const pixman_indexed_t *indexed) { bits_image_t *bits = (bits_image_t *)image; if (bits->indexed == indexed) return; bits->indexed = indexed; image_property_changed (image); } PIXMAN_EXPORT void pixman_image_set_alpha_map (pixman_image_t *image, pixman_image_t *alpha_map, int16_t x, int16_t y) { image_common_t *common = (image_common_t *)image; return_if_fail (!alpha_map || alpha_map->type == BITS); if (alpha_map && common->alpha_count > 0) { /* If this image is being used as an alpha map itself, * then you can't give it an alpha map of its own. */ return; } if (alpha_map && alpha_map->common.alpha_map) { /* If the image has an alpha map of its own, * then it can't be used as an alpha map itself */ return; } if (common->alpha_map != (bits_image_t *)alpha_map) { if (common->alpha_map) { common->alpha_map->common.alpha_count--; pixman_image_unref ((pixman_image_t *)common->alpha_map); } if (alpha_map) { common->alpha_map = (bits_image_t *)pixman_image_ref (alpha_map); common->alpha_map->common.alpha_count++; } else { common->alpha_map = NULL; } } common->alpha_origin_x = x; common->alpha_origin_y = y; image_property_changed (image); } PIXMAN_EXPORT void pixman_image_set_component_alpha (pixman_image_t *image, pixman_bool_t component_alpha) { if (image->common.component_alpha == component_alpha) return; image->common.component_alpha = component_alpha; image_property_changed (image); } PIXMAN_EXPORT pixman_bool_t pixman_image_get_component_alpha (pixman_image_t *image) { return image->common.component_alpha; } PIXMAN_EXPORT void pixman_image_set_accessors (pixman_image_t * image, pixman_read_memory_func_t read_func, pixman_write_memory_func_t write_func) { return_if_fail (image != NULL); if (image->type == BITS) { /* Accessors only work for <= 32 bpp. */ if (PIXMAN_FORMAT_BPP(image->bits.format) > 32) return_if_fail (!read_func && !write_func); image->bits.read_func = read_func; image->bits.write_func = write_func; image_property_changed (image); } } PIXMAN_EXPORT uint32_t * pixman_image_get_data (pixman_image_t *image) { if (image->type == BITS) return image->bits.bits; return NULL; } PIXMAN_EXPORT int pixman_image_get_width (pixman_image_t *image) { if (image->type == BITS) return image->bits.width; return 0; } PIXMAN_EXPORT int pixman_image_get_height (pixman_image_t *image) { if (image->type == BITS) return image->bits.height; return 0; } PIXMAN_EXPORT int pixman_image_get_stride (pixman_image_t *image) { if (image->type == BITS) return image->bits.rowstride * (int) sizeof (uint32_t); return 0; } PIXMAN_EXPORT int pixman_image_get_depth (pixman_image_t *image) { if (image->type == BITS) return PIXMAN_FORMAT_DEPTH (image->bits.format); return 0; } PIXMAN_EXPORT pixman_format_code_t pixman_image_get_format (pixman_image_t *image) { if (image->type == BITS) return image->bits.format; return PIXMAN_null; } uint32_t _pixman_image_get_solid (pixman_implementation_t *imp, pixman_image_t * image, pixman_format_code_t format) { uint32_t result; if (image->type == SOLID) { result = image->solid.color_32; } else if (image->type == BITS) { if (image->bits.format == PIXMAN_a8r8g8b8) result = image->bits.bits[0]; else if (image->bits.format == PIXMAN_x8r8g8b8) result = image->bits.bits[0] | 0xff000000; else if (image->bits.format == PIXMAN_a8) result = (uint32_t)(*(uint8_t *)image->bits.bits) << 24; else goto otherwise; } else { pixman_iter_t iter; otherwise: _pixman_implementation_iter_init ( imp, &iter, image, 0, 0, 1, 1, (uint8_t *)&result, ITER_NARROW | ITER_SRC, image->common.flags); result = *iter.get_scanline (&iter, NULL); if (iter.fini) iter.fini (&iter); } /* If necessary, convert RGB <--> BGR. */ if (PIXMAN_FORMAT_TYPE (format) != PIXMAN_TYPE_ARGB && PIXMAN_FORMAT_TYPE (format) != PIXMAN_TYPE_ARGB_SRGB) { result = (((result & 0xff000000) >> 0) | ((result & 0x00ff0000) >> 16) | ((result & 0x0000ff00) >> 0) | ((result & 0x000000ff) << 16)); } return result; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-implementation.c0000664000175000017500000002657014712446423021107 0ustar00mattst88mattst88/* * Copyright Âİ 2009 Red Hat, Inc. * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of Red Hat not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. Red Hat makes no representations about the * suitability of this software for any purpose. It is provided "as is" * without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS * SOFTWARE. */ #ifdef HAVE_CONFIG_H #include #endif #include #include "pixman-private.h" pixman_implementation_t * _pixman_implementation_create (pixman_implementation_t *fallback, const pixman_fast_path_t *fast_paths) { pixman_implementation_t *imp; assert (fast_paths); if ((imp = malloc (sizeof (pixman_implementation_t)))) { pixman_implementation_t *d; memset (imp, 0, sizeof *imp); imp->fallback = fallback; imp->fast_paths = fast_paths; /* Make sure the whole fallback chain has the right toplevel */ for (d = imp; d != NULL; d = d->fallback) d->toplevel = imp; } return imp; } #define N_CACHED_FAST_PATHS 8 typedef struct { struct { pixman_implementation_t * imp; pixman_fast_path_t fast_path; } cache [N_CACHED_FAST_PATHS]; } cache_t; PIXMAN_DEFINE_THREAD_LOCAL (cache_t, fast_path_cache) static void dummy_composite_rect (pixman_implementation_t *imp, pixman_composite_info_t *info) { } void _pixman_implementation_lookup_composite (pixman_implementation_t *toplevel, pixman_op_t op, pixman_format_code_t src_format, uint32_t src_flags, pixman_format_code_t mask_format, uint32_t mask_flags, pixman_format_code_t dest_format, uint32_t dest_flags, pixman_implementation_t **out_imp, pixman_composite_func_t *out_func) { pixman_implementation_t *imp; cache_t *cache; int i; /* Check cache for fast paths */ cache = PIXMAN_GET_THREAD_LOCAL (fast_path_cache); for (i = 0; i < N_CACHED_FAST_PATHS; ++i) { const pixman_fast_path_t *info = &(cache->cache[i].fast_path); /* Note that we check for equality here, not whether * the cached fast path matches. This is to prevent * us from selecting an overly general fast path * when a more specific one would work. */ if (info->op == op && info->src_format == src_format && info->mask_format == mask_format && info->dest_format == dest_format && info->src_flags == src_flags && info->mask_flags == mask_flags && info->dest_flags == dest_flags && info->func) { *out_imp = cache->cache[i].imp; *out_func = cache->cache[i].fast_path.func; goto update_cache; } } for (imp = toplevel; imp != NULL; imp = imp->fallback) { const pixman_fast_path_t *info = imp->fast_paths; while (info->op != PIXMAN_OP_NONE) { if ((info->op == op || info->op == PIXMAN_OP_any) && /* Formats */ ((info->src_format == src_format) || (info->src_format == PIXMAN_any)) && ((info->mask_format == mask_format) || (info->mask_format == PIXMAN_any)) && ((info->dest_format == dest_format) || (info->dest_format == PIXMAN_any)) && /* Flags */ (info->src_flags & src_flags) == info->src_flags && (info->mask_flags & mask_flags) == info->mask_flags && (info->dest_flags & dest_flags) == info->dest_flags) { *out_imp = imp; *out_func = info->func; /* Set i to the last spot in the cache so that the * move-to-front code below will work */ i = N_CACHED_FAST_PATHS - 1; goto update_cache; } ++info; } } /* We should never reach this point */ _pixman_log_error ( FUNC, "No composite function found\n" "\n" "The most likely cause of this is that this system has issues with\n" "thread local storage\n"); *out_imp = NULL; *out_func = dummy_composite_rect; return; update_cache: if (i) { while (i--) cache->cache[i + 1] = cache->cache[i]; cache->cache[0].imp = *out_imp; cache->cache[0].fast_path.op = op; cache->cache[0].fast_path.src_format = src_format; cache->cache[0].fast_path.src_flags = src_flags; cache->cache[0].fast_path.mask_format = mask_format; cache->cache[0].fast_path.mask_flags = mask_flags; cache->cache[0].fast_path.dest_format = dest_format; cache->cache[0].fast_path.dest_flags = dest_flags; cache->cache[0].fast_path.func = *out_func; } } static void dummy_combine (pixman_implementation_t *imp, pixman_op_t op, uint32_t * pd, const uint32_t * ps, const uint32_t * pm, int w) { } pixman_combine_32_func_t _pixman_implementation_lookup_combiner (pixman_implementation_t *imp, pixman_op_t op, pixman_bool_t component_alpha, pixman_bool_t narrow) { while (imp) { pixman_combine_32_func_t f = NULL; switch ((narrow << 1) | component_alpha) { case 0: /* not narrow, not component alpha */ f = (pixman_combine_32_func_t)imp->combine_float[op]; break; case 1: /* not narrow, component_alpha */ f = (pixman_combine_32_func_t)imp->combine_float_ca[op]; break; case 2: /* narrow, not component alpha */ f = imp->combine_32[op]; break; case 3: /* narrow, component_alpha */ f = imp->combine_32_ca[op]; break; } if (f) return f; imp = imp->fallback; } /* We should never reach this point */ _pixman_log_error (FUNC, "No known combine function\n"); return dummy_combine; } pixman_bool_t _pixman_implementation_blt (pixman_implementation_t * imp, uint32_t * src_bits, uint32_t * dst_bits, int src_stride, int dst_stride, int src_bpp, int dst_bpp, int src_x, int src_y, int dest_x, int dest_y, int width, int height) { while (imp) { if (imp->blt && (*imp->blt) (imp, src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp, src_x, src_y, dest_x, dest_y, width, height)) { return TRUE; } imp = imp->fallback; } return FALSE; } pixman_bool_t _pixman_implementation_fill (pixman_implementation_t *imp, uint32_t * bits, int stride, int bpp, int x, int y, int width, int height, uint32_t filler) { while (imp) { if (imp->fill && ((*imp->fill) (imp, bits, stride, bpp, x, y, width, height, filler))) { return TRUE; } imp = imp->fallback; } return FALSE; } static uint32_t * get_scanline_null (pixman_iter_t *iter, const uint32_t *mask) { return NULL; } void _pixman_implementation_iter_init (pixman_implementation_t *imp, pixman_iter_t *iter, pixman_image_t *image, int x, int y, int width, int height, uint8_t *buffer, iter_flags_t iter_flags, uint32_t image_flags) { pixman_format_code_t format; iter->image = image; iter->buffer = (uint32_t *)buffer; iter->x = x; iter->y = y; iter->width = width; iter->height = height; iter->iter_flags = iter_flags; iter->image_flags = image_flags; iter->fini = NULL; if (!iter->image) { iter->get_scanline = get_scanline_null; return; } format = iter->image->common.extended_format_code; while (imp) { if (imp->iter_info) { const pixman_iter_info_t *info; for (info = imp->iter_info; info->format != PIXMAN_null; ++info) { if ((info->format == PIXMAN_any || info->format == format) && (info->image_flags & image_flags) == info->image_flags && (info->iter_flags & iter_flags) == info->iter_flags) { iter->get_scanline = info->get_scanline; iter->write_back = info->write_back; if (info->initializer) info->initializer (iter, info); return; } } } imp = imp->fallback; } } pixman_bool_t _pixman_disabled (const char *name) { const char *env; if ((env = getenv ("PIXMAN_DISABLE"))) { do { const char *end; int len; if ((end = strchr (env, ' '))) len = end - env; else len = strlen (env); if (strlen (name) == len && strncmp (name, env, len) == 0) { printf ("pixman: Disabled %s implementation\n", name); return TRUE; } env += len; } while (*env++); } return FALSE; } static const pixman_fast_path_t empty_fast_path[] = { { PIXMAN_OP_NONE } }; pixman_implementation_t * _pixman_choose_implementation (void) { pixman_implementation_t *imp; imp = _pixman_implementation_create_general(); if (!_pixman_disabled ("fast")) imp = _pixman_implementation_create_fast_path (imp); imp = _pixman_x86_get_implementations (imp); imp = _pixman_arm_get_implementations (imp); imp = _pixman_ppc_get_implementations (imp); imp = _pixman_mips_get_implementations (imp); imp = _pixman_riscv_get_implementations (imp); imp = _pixman_implementation_create_noop (imp); if (_pixman_disabled ("wholeops")) { pixman_implementation_t *cur; /* Disable all whole-operation paths except the general one, * so that optimized iterators are used as much as possible. */ for (cur = imp; cur->fallback; cur = cur->fallback) cur->fast_paths = empty_fast_path; } return imp; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-inlines.h0000664000175000017500000014124514712446423017525 0ustar00mattst88mattst88/* -*- Mode: c; c-basic-offset: 4; tab-width: 8; indent-tabs-mode: t; -*- */ /* * Copyright Âİ 2000 SuSE, Inc. * Copyright Âİ 2007 Red Hat, Inc. * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of SuSE not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. SuSE makes no representations about the * suitability of this software for any purpose. It is provided "as is" * without express or implied warranty. * * SuSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL SuSE * BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * * Author: Keith Packard, SuSE, Inc. */ #ifndef PIXMAN_FAST_PATH_H__ #define PIXMAN_FAST_PATH_H__ #include "pixman-private.h" #define PIXMAN_REPEAT_COVER -1 /* Flags describing input parameters to fast path macro template. * Turning on some flag values may indicate that * "some property X is available so template can use this" or * "some property X should be handled by template". * * FLAG_HAVE_SOLID_MASK * Input mask is solid so template should handle this. * * FLAG_HAVE_NON_SOLID_MASK * Input mask is bits mask so template should handle this. * * FLAG_HAVE_SOLID_MASK and FLAG_HAVE_NON_SOLID_MASK are mutually * exclusive. (It's not allowed to turn both flags on) */ #define FLAG_NONE (0) #define FLAG_HAVE_SOLID_MASK (1 << 1) #define FLAG_HAVE_NON_SOLID_MASK (1 << 2) /* To avoid too short repeated scanline function calls, extend source * scanlines having width less than below constant value. */ #define REPEAT_NORMAL_MIN_WIDTH 64 static force_inline pixman_bool_t repeat (pixman_repeat_t repeat, int *c, int size) { if (repeat == PIXMAN_REPEAT_NONE) { if (*c < 0 || *c >= size) return FALSE; } else if (repeat == PIXMAN_REPEAT_NORMAL) { while (*c >= size) *c -= size; while (*c < 0) *c += size; } else if (repeat == PIXMAN_REPEAT_PAD) { *c = CLIP (*c, 0, size - 1); } else /* REFLECT */ { *c = MOD (*c, size * 2); if (*c >= size) *c = size * 2 - *c - 1; } return TRUE; } static force_inline int pixman_fixed_to_bilinear_weight (pixman_fixed_t x) { return (x >> (16 - BILINEAR_INTERPOLATION_BITS)) & ((1 << BILINEAR_INTERPOLATION_BITS) - 1); } #if BILINEAR_INTERPOLATION_BITS <= 4 /* Inspired by Filter_32_opaque from Skia */ static force_inline uint32_t bilinear_interpolation (uint32_t tl, uint32_t tr, uint32_t bl, uint32_t br, int distx, int disty) { int distxy, distxiy, distixy, distixiy; uint32_t lo, hi; distx <<= (4 - BILINEAR_INTERPOLATION_BITS); disty <<= (4 - BILINEAR_INTERPOLATION_BITS); distxy = distx * disty; distxiy = (distx << 4) - distxy; /* distx * (16 - disty) */ distixy = (disty << 4) - distxy; /* disty * (16 - distx) */ distixiy = 16 * 16 - (disty << 4) - (distx << 4) + distxy; /* (16 - distx) * (16 - disty) */ lo = (tl & 0xff00ff) * distixiy; hi = ((tl >> 8) & 0xff00ff) * distixiy; lo += (tr & 0xff00ff) * distxiy; hi += ((tr >> 8) & 0xff00ff) * distxiy; lo += (bl & 0xff00ff) * distixy; hi += ((bl >> 8) & 0xff00ff) * distixy; lo += (br & 0xff00ff) * distxy; hi += ((br >> 8) & 0xff00ff) * distxy; return ((lo >> 8) & 0xff00ff) | (hi & ~0xff00ff); } #else #if SIZEOF_LONG > 4 static force_inline uint32_t bilinear_interpolation (uint32_t tl, uint32_t tr, uint32_t bl, uint32_t br, int distx, int disty) { uint64_t distxy, distxiy, distixy, distixiy; uint64_t tl64, tr64, bl64, br64; uint64_t f, r; distx <<= (8 - BILINEAR_INTERPOLATION_BITS); disty <<= (8 - BILINEAR_INTERPOLATION_BITS); distxy = distx * disty; distxiy = distx * (256 - disty); distixy = (256 - distx) * disty; distixiy = (256 - distx) * (256 - disty); /* Alpha and Blue */ tl64 = tl & 0xff0000ff; tr64 = tr & 0xff0000ff; bl64 = bl & 0xff0000ff; br64 = br & 0xff0000ff; f = tl64 * distixiy + tr64 * distxiy + bl64 * distixy + br64 * distxy; r = f & 0x0000ff0000ff0000ull; /* Red and Green */ tl64 = tl; tl64 = ((tl64 << 16) & 0x000000ff00000000ull) | (tl64 & 0x0000ff00ull); tr64 = tr; tr64 = ((tr64 << 16) & 0x000000ff00000000ull) | (tr64 & 0x0000ff00ull); bl64 = bl; bl64 = ((bl64 << 16) & 0x000000ff00000000ull) | (bl64 & 0x0000ff00ull); br64 = br; br64 = ((br64 << 16) & 0x000000ff00000000ull) | (br64 & 0x0000ff00ull); f = tl64 * distixiy + tr64 * distxiy + bl64 * distixy + br64 * distxy; r |= ((f >> 16) & 0x000000ff00000000ull) | (f & 0xff000000ull); return (uint32_t)(r >> 16); } #else static force_inline uint32_t bilinear_interpolation (uint32_t tl, uint32_t tr, uint32_t bl, uint32_t br, int distx, int disty) { int distxy, distxiy, distixy, distixiy; uint32_t f, r; distx <<= (8 - BILINEAR_INTERPOLATION_BITS); disty <<= (8 - BILINEAR_INTERPOLATION_BITS); distxy = distx * disty; distxiy = (distx << 8) - distxy; /* distx * (256 - disty) */ distixy = (disty << 8) - distxy; /* disty * (256 - distx) */ distixiy = 256 * 256 - (disty << 8) - (distx << 8) + distxy; /* (256 - distx) * (256 - disty) */ /* Blue */ r = (tl & 0x000000ff) * distixiy + (tr & 0x000000ff) * distxiy + (bl & 0x000000ff) * distixy + (br & 0x000000ff) * distxy; /* Green */ f = (tl & 0x0000ff00) * distixiy + (tr & 0x0000ff00) * distxiy + (bl & 0x0000ff00) * distixy + (br & 0x0000ff00) * distxy; r |= f & 0xff000000; tl >>= 16; tr >>= 16; bl >>= 16; br >>= 16; r >>= 16; /* Red */ f = (tl & 0x000000ff) * distixiy + (tr & 0x000000ff) * distxiy + (bl & 0x000000ff) * distixy + (br & 0x000000ff) * distxy; r |= f & 0x00ff0000; /* Alpha */ f = (tl & 0x0000ff00) * distixiy + (tr & 0x0000ff00) * distxiy + (bl & 0x0000ff00) * distixy + (br & 0x0000ff00) * distxy; r |= f & 0xff000000; return r; } #endif #endif // BILINEAR_INTERPOLATION_BITS <= 4 static force_inline argb_t bilinear_interpolation_float (argb_t tl, argb_t tr, argb_t bl, argb_t br, float distx, float disty) { float distxy, distxiy, distixy, distixiy; argb_t r; distxy = distx * disty; distxiy = distx * (1.f - disty); distixy = (1.f - distx) * disty; distixiy = (1.f - distx) * (1.f - disty); r.a = tl.a * distixiy + tr.a * distxiy + bl.a * distixy + br.a * distxy; r.r = tl.r * distixiy + tr.r * distxiy + bl.r * distixy + br.r * distxy; r.g = tl.g * distixiy + tr.g * distxiy + bl.g * distixy + br.g * distxy; r.b = tl.b * distixiy + tr.b * distxiy + bl.b * distixy + br.b * distxy; return r; } /* * For each scanline fetched from source image with PAD repeat: * - calculate how many pixels need to be padded on the left side * - calculate how many pixels need to be padded on the right side * - update width to only count pixels which are fetched from the image * All this information is returned via 'width', 'left_pad', 'right_pad' * arguments. The code is assuming that 'unit_x' is positive. * * Note: 64-bit math is used in order to avoid potential overflows, which * is probably excessive in many cases. This particular function * may need its own correctness test and performance tuning. */ static force_inline void pad_repeat_get_scanline_bounds (int32_t source_image_width, pixman_fixed_t vx, pixman_fixed_t unit_x, int32_t * width, int32_t * left_pad, int32_t * right_pad) { int64_t max_vx = (int64_t) source_image_width << 16; int64_t tmp; if (vx < 0) { tmp = ((int64_t) unit_x - 1 - vx) / unit_x; if (tmp > *width) { *left_pad = *width; *width = 0; } else { *left_pad = (int32_t) tmp; *width -= (int32_t) tmp; } } else { *left_pad = 0; } tmp = ((int64_t) unit_x - 1 - vx + max_vx) / unit_x - *left_pad; if (tmp < 0) { *right_pad = *width; *width = 0; } else if (tmp >= *width) { *right_pad = 0; } else { *right_pad = *width - (int32_t) tmp; *width = (int32_t) tmp; } } /* A macroified version of specialized nearest scalers for some * common 8888 and 565 formats. It supports SRC and OVER ops. * * There are two repeat versions, one that handles repeat normal, * and one without repeat handling that only works if the src region * used is completely covered by the pre-repeated source samples. * * The loops are unrolled to process two pixels per iteration for better * performance on most CPU architectures (superscalar processors * can issue several operations simultaneously, other processors can hide * instructions latencies by pipelining operations). Unrolling more * does not make much sense because the compiler will start running out * of spare registers soon. */ #define GET_8888_ALPHA(s) ((s) >> 24) /* This is not actually used since we don't have an OVER with 565 source, but it is needed to build. */ #define GET_0565_ALPHA(s) 0xff #define GET_x888_ALPHA(s) 0xff #define FAST_NEAREST_SCANLINE(scanline_func_name, SRC_FORMAT, DST_FORMAT, \ src_type_t, dst_type_t, OP, repeat_mode) \ static force_inline void \ scanline_func_name (dst_type_t *dst, \ const src_type_t *src, \ int32_t w, \ pixman_fixed_t vx, \ pixman_fixed_t unit_x, \ pixman_fixed_t src_width_fixed, \ pixman_bool_t fully_transparent_src) \ { \ uint32_t d; \ src_type_t s1, s2; \ uint8_t a1, a2; \ int x1, x2; \ \ if (PIXMAN_OP_ ## OP == PIXMAN_OP_OVER && fully_transparent_src) \ return; \ \ if (PIXMAN_OP_ ## OP != PIXMAN_OP_SRC && PIXMAN_OP_ ## OP != PIXMAN_OP_OVER) \ abort(); \ \ while ((w -= 2) >= 0) \ { \ x1 = pixman_fixed_to_int (vx); \ vx += unit_x; \ if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NORMAL) \ { \ /* This works because we know that unit_x is positive */ \ while (vx >= 0) \ vx -= src_width_fixed; \ } \ s1 = *(src + x1); \ \ x2 = pixman_fixed_to_int (vx); \ vx += unit_x; \ if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NORMAL) \ { \ /* This works because we know that unit_x is positive */ \ while (vx >= 0) \ vx -= src_width_fixed; \ } \ s2 = *(src + x2); \ \ if (PIXMAN_OP_ ## OP == PIXMAN_OP_OVER) \ { \ a1 = GET_ ## SRC_FORMAT ## _ALPHA(s1); \ a2 = GET_ ## SRC_FORMAT ## _ALPHA(s2); \ \ if (a1 == 0xff) \ { \ *dst = convert_ ## SRC_FORMAT ## _to_ ## DST_FORMAT (s1); \ } \ else if (s1) \ { \ d = convert_ ## DST_FORMAT ## _to_8888 (*dst); \ s1 = convert_ ## SRC_FORMAT ## _to_8888 (s1); \ a1 ^= 0xff; \ UN8x4_MUL_UN8_ADD_UN8x4 (d, a1, s1); \ *dst = convert_8888_to_ ## DST_FORMAT (d); \ } \ dst++; \ \ if (a2 == 0xff) \ { \ *dst = convert_ ## SRC_FORMAT ## _to_ ## DST_FORMAT (s2); \ } \ else if (s2) \ { \ d = convert_## DST_FORMAT ## _to_8888 (*dst); \ s2 = convert_## SRC_FORMAT ## _to_8888 (s2); \ a2 ^= 0xff; \ UN8x4_MUL_UN8_ADD_UN8x4 (d, a2, s2); \ *dst = convert_8888_to_ ## DST_FORMAT (d); \ } \ dst++; \ } \ else /* PIXMAN_OP_SRC */ \ { \ *dst++ = convert_ ## SRC_FORMAT ## _to_ ## DST_FORMAT (s1); \ *dst++ = convert_ ## SRC_FORMAT ## _to_ ## DST_FORMAT (s2); \ } \ } \ \ if (w & 1) \ { \ x1 = pixman_fixed_to_int (vx); \ s1 = *(src + x1); \ \ if (PIXMAN_OP_ ## OP == PIXMAN_OP_OVER) \ { \ a1 = GET_ ## SRC_FORMAT ## _ALPHA(s1); \ \ if (a1 == 0xff) \ { \ *dst = convert_ ## SRC_FORMAT ## _to_ ## DST_FORMAT (s1); \ } \ else if (s1) \ { \ d = convert_## DST_FORMAT ## _to_8888 (*dst); \ s1 = convert_ ## SRC_FORMAT ## _to_8888 (s1); \ a1 ^= 0xff; \ UN8x4_MUL_UN8_ADD_UN8x4 (d, a1, s1); \ *dst = convert_8888_to_ ## DST_FORMAT (d); \ } \ dst++; \ } \ else /* PIXMAN_OP_SRC */ \ { \ *dst++ = convert_ ## SRC_FORMAT ## _to_ ## DST_FORMAT (s1); \ } \ } \ } #define FAST_NEAREST_MAINLOOP_INT(scale_func_name, scanline_func, src_type_t, mask_type_t, \ dst_type_t, repeat_mode, have_mask, mask_is_solid) \ static void \ fast_composite_scaled_nearest ## scale_func_name (pixman_implementation_t *imp, \ pixman_composite_info_t *info) \ { \ PIXMAN_COMPOSITE_ARGS (info); \ dst_type_t *dst_line; \ mask_type_t *mask_line; \ src_type_t *src_first_line; \ int y; \ pixman_fixed_t src_width_fixed = pixman_int_to_fixed (src_image->bits.width); \ pixman_fixed_t max_vy; \ pixman_vector_t v; \ pixman_fixed_t vx, vy; \ pixman_fixed_t unit_x, unit_y; \ int32_t left_pad, right_pad; \ \ src_type_t *src; \ dst_type_t *dst; \ mask_type_t solid_mask; \ const mask_type_t *mask = &solid_mask; \ int src_stride, mask_stride, dst_stride; \ \ PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, dst_type_t, dst_stride, dst_line, 1); \ if (have_mask) \ { \ if (mask_is_solid) \ solid_mask = _pixman_image_get_solid (imp, mask_image, dest_image->bits.format); \ else \ PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, mask_type_t, \ mask_stride, mask_line, 1); \ } \ /* pass in 0 instead of src_x and src_y because src_x and src_y need to be \ * transformed from destination space to source space */ \ PIXMAN_IMAGE_GET_LINE (src_image, 0, 0, src_type_t, src_stride, src_first_line, 1); \ \ /* reference point is the center of the pixel */ \ v.vector[0] = pixman_int_to_fixed (src_x) + pixman_fixed_1 / 2; \ v.vector[1] = pixman_int_to_fixed (src_y) + pixman_fixed_1 / 2; \ v.vector[2] = pixman_fixed_1; \ \ if (!pixman_transform_point_3d (src_image->common.transform, &v)) \ return; \ \ unit_x = src_image->common.transform->matrix[0][0]; \ unit_y = src_image->common.transform->matrix[1][1]; \ \ /* Round down to closest integer, ensuring that 0.5 rounds to 0, not 1 */ \ v.vector[0] -= pixman_fixed_e; \ v.vector[1] -= pixman_fixed_e; \ \ vx = v.vector[0]; \ vy = v.vector[1]; \ \ if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NORMAL) \ { \ max_vy = pixman_int_to_fixed (src_image->bits.height); \ \ /* Clamp repeating positions inside the actual samples */ \ repeat (PIXMAN_REPEAT_NORMAL, &vx, src_width_fixed); \ repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); \ } \ \ if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_PAD || \ PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NONE) \ { \ pad_repeat_get_scanline_bounds (src_image->bits.width, vx, unit_x, \ &width, &left_pad, &right_pad); \ vx += left_pad * unit_x; \ } \ \ while (--height >= 0) \ { \ dst = dst_line; \ dst_line += dst_stride; \ if (have_mask && !mask_is_solid) \ { \ mask = mask_line; \ mask_line += mask_stride; \ } \ \ y = pixman_fixed_to_int (vy); \ vy += unit_y; \ if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NORMAL) \ repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); \ if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_PAD) \ { \ repeat (PIXMAN_REPEAT_PAD, &y, src_image->bits.height); \ src = src_first_line + src_stride * y; \ if (left_pad > 0) \ { \ scanline_func (mask, dst, \ src + src_image->bits.width - src_image->bits.width + 1, \ left_pad, -pixman_fixed_e, 0, src_width_fixed, FALSE); \ } \ if (width > 0) \ { \ scanline_func (mask + (mask_is_solid ? 0 : left_pad), \ dst + left_pad, src + src_image->bits.width, width, \ vx - src_width_fixed, unit_x, src_width_fixed, FALSE); \ } \ if (right_pad > 0) \ { \ scanline_func (mask + (mask_is_solid ? 0 : left_pad + width), \ dst + left_pad + width, src + src_image->bits.width, \ right_pad, -pixman_fixed_e, 0, src_width_fixed, FALSE); \ } \ } \ else if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NONE) \ { \ static const src_type_t zero[1] = { 0 }; \ if (y < 0 || y >= src_image->bits.height) \ { \ scanline_func (mask, dst, zero + 1, left_pad + width + right_pad, \ -pixman_fixed_e, 0, src_width_fixed, TRUE); \ continue; \ } \ src = src_first_line + src_stride * y; \ if (left_pad > 0) \ { \ scanline_func (mask, dst, zero + 1, left_pad, \ -pixman_fixed_e, 0, src_width_fixed, TRUE); \ } \ if (width > 0) \ { \ scanline_func (mask + (mask_is_solid ? 0 : left_pad), \ dst + left_pad, src + src_image->bits.width, width, \ vx - src_width_fixed, unit_x, src_width_fixed, FALSE); \ } \ if (right_pad > 0) \ { \ scanline_func (mask + (mask_is_solid ? 0 : left_pad + width), \ dst + left_pad + width, zero + 1, right_pad, \ -pixman_fixed_e, 0, src_width_fixed, TRUE); \ } \ } \ else \ { \ src = src_first_line + src_stride * y; \ scanline_func (mask, dst, src + src_image->bits.width, width, vx - src_width_fixed, \ unit_x, src_width_fixed, FALSE); \ } \ } \ } /* A workaround for old sun studio, see: https://bugs.freedesktop.org/show_bug.cgi?id=32764 */ #define FAST_NEAREST_MAINLOOP_COMMON(scale_func_name, scanline_func, src_type_t, mask_type_t, \ dst_type_t, repeat_mode, have_mask, mask_is_solid) \ FAST_NEAREST_MAINLOOP_INT(_ ## scale_func_name, scanline_func, src_type_t, mask_type_t, \ dst_type_t, repeat_mode, have_mask, mask_is_solid) #define FAST_NEAREST_MAINLOOP_NOMASK(scale_func_name, scanline_func, src_type_t, dst_type_t, \ repeat_mode) \ static force_inline void \ scanline_func##scale_func_name##_wrapper ( \ const uint8_t *mask, \ dst_type_t *dst, \ const src_type_t *src, \ int32_t w, \ pixman_fixed_t vx, \ pixman_fixed_t unit_x, \ pixman_fixed_t max_vx, \ pixman_bool_t fully_transparent_src) \ { \ scanline_func (dst, src, w, vx, unit_x, max_vx, fully_transparent_src); \ } \ FAST_NEAREST_MAINLOOP_INT (scale_func_name, scanline_func##scale_func_name##_wrapper, \ src_type_t, uint8_t, dst_type_t, repeat_mode, FALSE, FALSE) #define FAST_NEAREST_MAINLOOP(scale_func_name, scanline_func, src_type_t, dst_type_t, \ repeat_mode) \ FAST_NEAREST_MAINLOOP_NOMASK(_ ## scale_func_name, scanline_func, src_type_t, \ dst_type_t, repeat_mode) #define FAST_NEAREST(scale_func_name, SRC_FORMAT, DST_FORMAT, \ src_type_t, dst_type_t, OP, repeat_mode) \ FAST_NEAREST_SCANLINE(scaled_nearest_scanline_ ## scale_func_name ## _ ## OP, \ SRC_FORMAT, DST_FORMAT, src_type_t, dst_type_t, \ OP, repeat_mode) \ FAST_NEAREST_MAINLOOP_NOMASK(_ ## scale_func_name ## _ ## OP, \ scaled_nearest_scanline_ ## scale_func_name ## _ ## OP, \ src_type_t, dst_type_t, repeat_mode) #define SCALED_NEAREST_FLAGS \ (FAST_PATH_SCALE_TRANSFORM | \ FAST_PATH_NO_ALPHA_MAP | \ FAST_PATH_NEAREST_FILTER | \ FAST_PATH_NO_ACCESSORS | \ FAST_PATH_NARROW_FORMAT) #define SIMPLE_NEAREST_FAST_PATH_NORMAL(op,s,d,func) \ { PIXMAN_OP_ ## op, \ PIXMAN_ ## s, \ (SCALED_NEAREST_FLAGS | \ FAST_PATH_NORMAL_REPEAT | \ FAST_PATH_X_UNIT_POSITIVE), \ PIXMAN_null, 0, \ PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ fast_composite_scaled_nearest_ ## func ## _normal ## _ ## op, \ } #define SIMPLE_NEAREST_FAST_PATH_PAD(op,s,d,func) \ { PIXMAN_OP_ ## op, \ PIXMAN_ ## s, \ (SCALED_NEAREST_FLAGS | \ FAST_PATH_PAD_REPEAT | \ FAST_PATH_X_UNIT_POSITIVE), \ PIXMAN_null, 0, \ PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ fast_composite_scaled_nearest_ ## func ## _pad ## _ ## op, \ } #define SIMPLE_NEAREST_FAST_PATH_NONE(op,s,d,func) \ { PIXMAN_OP_ ## op, \ PIXMAN_ ## s, \ (SCALED_NEAREST_FLAGS | \ FAST_PATH_NONE_REPEAT | \ FAST_PATH_X_UNIT_POSITIVE), \ PIXMAN_null, 0, \ PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ fast_composite_scaled_nearest_ ## func ## _none ## _ ## op, \ } #define SIMPLE_NEAREST_FAST_PATH_COVER(op,s,d,func) \ { PIXMAN_OP_ ## op, \ PIXMAN_ ## s, \ SCALED_NEAREST_FLAGS | FAST_PATH_SAMPLES_COVER_CLIP_NEAREST, \ PIXMAN_null, 0, \ PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ fast_composite_scaled_nearest_ ## func ## _cover ## _ ## op, \ } #define SIMPLE_NEAREST_A8_MASK_FAST_PATH_NORMAL(op,s,d,func) \ { PIXMAN_OP_ ## op, \ PIXMAN_ ## s, \ (SCALED_NEAREST_FLAGS | \ FAST_PATH_NORMAL_REPEAT | \ FAST_PATH_X_UNIT_POSITIVE), \ PIXMAN_a8, MASK_FLAGS (a8, FAST_PATH_UNIFIED_ALPHA), \ PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ fast_composite_scaled_nearest_ ## func ## _normal ## _ ## op, \ } #define SIMPLE_NEAREST_A8_MASK_FAST_PATH_PAD(op,s,d,func) \ { PIXMAN_OP_ ## op, \ PIXMAN_ ## s, \ (SCALED_NEAREST_FLAGS | \ FAST_PATH_PAD_REPEAT | \ FAST_PATH_X_UNIT_POSITIVE), \ PIXMAN_a8, MASK_FLAGS (a8, FAST_PATH_UNIFIED_ALPHA), \ PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ fast_composite_scaled_nearest_ ## func ## _pad ## _ ## op, \ } #define SIMPLE_NEAREST_A8_MASK_FAST_PATH_NONE(op,s,d,func) \ { PIXMAN_OP_ ## op, \ PIXMAN_ ## s, \ (SCALED_NEAREST_FLAGS | \ FAST_PATH_NONE_REPEAT | \ FAST_PATH_X_UNIT_POSITIVE), \ PIXMAN_a8, MASK_FLAGS (a8, FAST_PATH_UNIFIED_ALPHA), \ PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ fast_composite_scaled_nearest_ ## func ## _none ## _ ## op, \ } #define SIMPLE_NEAREST_A8_MASK_FAST_PATH_COVER(op,s,d,func) \ { PIXMAN_OP_ ## op, \ PIXMAN_ ## s, \ SCALED_NEAREST_FLAGS | FAST_PATH_SAMPLES_COVER_CLIP_NEAREST, \ PIXMAN_a8, MASK_FLAGS (a8, FAST_PATH_UNIFIED_ALPHA), \ PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ fast_composite_scaled_nearest_ ## func ## _cover ## _ ## op, \ } #define SIMPLE_NEAREST_SOLID_MASK_FAST_PATH_NORMAL(op,s,d,func) \ { PIXMAN_OP_ ## op, \ PIXMAN_ ## s, \ (SCALED_NEAREST_FLAGS | \ FAST_PATH_NORMAL_REPEAT | \ FAST_PATH_X_UNIT_POSITIVE), \ PIXMAN_solid, MASK_FLAGS (solid, FAST_PATH_UNIFIED_ALPHA), \ PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ fast_composite_scaled_nearest_ ## func ## _normal ## _ ## op, \ } #define SIMPLE_NEAREST_SOLID_MASK_FAST_PATH_PAD(op,s,d,func) \ { PIXMAN_OP_ ## op, \ PIXMAN_ ## s, \ (SCALED_NEAREST_FLAGS | \ FAST_PATH_PAD_REPEAT | \ FAST_PATH_X_UNIT_POSITIVE), \ PIXMAN_solid, MASK_FLAGS (solid, FAST_PATH_UNIFIED_ALPHA), \ PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ fast_composite_scaled_nearest_ ## func ## _pad ## _ ## op, \ } #define SIMPLE_NEAREST_SOLID_MASK_FAST_PATH_NONE(op,s,d,func) \ { PIXMAN_OP_ ## op, \ PIXMAN_ ## s, \ (SCALED_NEAREST_FLAGS | \ FAST_PATH_NONE_REPEAT | \ FAST_PATH_X_UNIT_POSITIVE), \ PIXMAN_solid, MASK_FLAGS (solid, FAST_PATH_UNIFIED_ALPHA), \ PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ fast_composite_scaled_nearest_ ## func ## _none ## _ ## op, \ } #define SIMPLE_NEAREST_SOLID_MASK_FAST_PATH_COVER(op,s,d,func) \ { PIXMAN_OP_ ## op, \ PIXMAN_ ## s, \ SCALED_NEAREST_FLAGS | FAST_PATH_SAMPLES_COVER_CLIP_NEAREST, \ PIXMAN_solid, MASK_FLAGS (solid, FAST_PATH_UNIFIED_ALPHA), \ PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ fast_composite_scaled_nearest_ ## func ## _cover ## _ ## op, \ } /* Prefer the use of 'cover' variant, because it is faster */ #define SIMPLE_NEAREST_FAST_PATH(op,s,d,func) \ SIMPLE_NEAREST_FAST_PATH_COVER (op,s,d,func), \ SIMPLE_NEAREST_FAST_PATH_NONE (op,s,d,func), \ SIMPLE_NEAREST_FAST_PATH_PAD (op,s,d,func), \ SIMPLE_NEAREST_FAST_PATH_NORMAL (op,s,d,func) #define SIMPLE_NEAREST_A8_MASK_FAST_PATH(op,s,d,func) \ SIMPLE_NEAREST_A8_MASK_FAST_PATH_COVER (op,s,d,func), \ SIMPLE_NEAREST_A8_MASK_FAST_PATH_NONE (op,s,d,func), \ SIMPLE_NEAREST_A8_MASK_FAST_PATH_PAD (op,s,d,func) #define SIMPLE_NEAREST_SOLID_MASK_FAST_PATH(op,s,d,func) \ SIMPLE_NEAREST_SOLID_MASK_FAST_PATH_COVER (op,s,d,func), \ SIMPLE_NEAREST_SOLID_MASK_FAST_PATH_NONE (op,s,d,func), \ SIMPLE_NEAREST_SOLID_MASK_FAST_PATH_PAD (op,s,d,func), \ SIMPLE_NEAREST_SOLID_MASK_FAST_PATH_NORMAL (op,s,d,func) /*****************************************************************************/ /* * Identify 5 zones in each scanline for bilinear scaling. Depending on * whether 2 pixels to be interpolated are fetched from the image itself, * from the padding area around it or from both image and padding area. */ static force_inline void bilinear_pad_repeat_get_scanline_bounds (int32_t source_image_width, pixman_fixed_t vx, pixman_fixed_t unit_x, int32_t * left_pad, int32_t * left_tz, int32_t * width, int32_t * right_tz, int32_t * right_pad) { int width1 = *width, left_pad1, right_pad1; int width2 = *width, left_pad2, right_pad2; pad_repeat_get_scanline_bounds (source_image_width, vx, unit_x, &width1, &left_pad1, &right_pad1); pad_repeat_get_scanline_bounds (source_image_width, vx + pixman_fixed_1, unit_x, &width2, &left_pad2, &right_pad2); *left_pad = left_pad2; *left_tz = left_pad1 - left_pad2; *right_tz = right_pad2 - right_pad1; *right_pad = right_pad1; *width -= *left_pad + *left_tz + *right_tz + *right_pad; } /* * Main loop template for single pass bilinear scaling. It needs to be * provided with 'scanline_func' which should do the compositing operation. * The needed function has the following prototype: * * scanline_func (dst_type_t * dst, * const mask_type_ * mask, * const src_type_t * src_top, * const src_type_t * src_bottom, * int32_t width, * int weight_top, * int weight_bottom, * pixman_fixed_t vx, * pixman_fixed_t unit_x, * pixman_fixed_t max_vx, * pixman_bool_t zero_src) * * Where: * dst - destination scanline buffer for storing results * mask - mask buffer (or single value for solid mask) * src_top, src_bottom - two source scanlines * width - number of pixels to process * weight_top - weight of the top row for interpolation * weight_bottom - weight of the bottom row for interpolation * vx - initial position for fetching the first pair of * pixels from the source buffer * unit_x - position increment needed to move to the next pair * of pixels * max_vx - image size as a fixed point value, can be used for * implementing NORMAL repeat (when it is supported) * zero_src - boolean hint variable, which is set to TRUE when * all source pixels are fetched from zero padding * zone for NONE repeat * * Note: normally the sum of 'weight_top' and 'weight_bottom' is equal to * BILINEAR_INTERPOLATION_RANGE, but sometimes it may be less than that * for NONE repeat when handling fuzzy antialiased top or bottom image * edges. Also both top and bottom weight variables are guaranteed to * have value, which is less than BILINEAR_INTERPOLATION_RANGE. * For example, the weights can fit into unsigned byte or be used * with 8-bit SIMD multiplication instructions for 8-bit interpolation * precision. */ #define FAST_BILINEAR_MAINLOOP_INT(scale_func_name, scanline_func, src_type_t, mask_type_t, \ dst_type_t, repeat_mode, flags) \ static void \ fast_composite_scaled_bilinear ## scale_func_name (pixman_implementation_t *imp, \ pixman_composite_info_t *info) \ { \ PIXMAN_COMPOSITE_ARGS (info); \ dst_type_t *dst_line; \ mask_type_t *mask_line; \ src_type_t *src_first_line; \ int y1, y2; \ pixman_fixed_t max_vx = INT32_MAX; /* suppress uninitialized variable warning */ \ pixman_vector_t v; \ pixman_fixed_t vx, vy; \ pixman_fixed_t unit_x, unit_y; \ int32_t left_pad, left_tz, right_tz, right_pad; \ \ dst_type_t *dst; \ mask_type_t solid_mask; \ const mask_type_t *mask = &solid_mask; \ int src_stride, mask_stride, dst_stride; \ \ int src_width; \ pixman_fixed_t src_width_fixed; \ int max_x; \ pixman_bool_t need_src_extension; \ \ PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, dst_type_t, dst_stride, dst_line, 1); \ if (flags & FLAG_HAVE_SOLID_MASK) \ { \ solid_mask = _pixman_image_get_solid (imp, mask_image, dest_image->bits.format); \ mask_stride = 0; \ } \ else if (flags & FLAG_HAVE_NON_SOLID_MASK) \ { \ PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, mask_type_t, \ mask_stride, mask_line, 1); \ } \ \ /* pass in 0 instead of src_x and src_y because src_x and src_y need to be \ * transformed from destination space to source space */ \ PIXMAN_IMAGE_GET_LINE (src_image, 0, 0, src_type_t, src_stride, src_first_line, 1); \ \ /* reference point is the center of the pixel */ \ v.vector[0] = pixman_int_to_fixed (src_x) + pixman_fixed_1 / 2; \ v.vector[1] = pixman_int_to_fixed (src_y) + pixman_fixed_1 / 2; \ v.vector[2] = pixman_fixed_1; \ \ if (!pixman_transform_point_3d (src_image->common.transform, &v)) \ return; \ \ unit_x = src_image->common.transform->matrix[0][0]; \ unit_y = src_image->common.transform->matrix[1][1]; \ \ v.vector[0] -= pixman_fixed_1 / 2; \ v.vector[1] -= pixman_fixed_1 / 2; \ \ vy = v.vector[1]; \ \ if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_PAD || \ PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NONE) \ { \ bilinear_pad_repeat_get_scanline_bounds (src_image->bits.width, v.vector[0], unit_x, \ &left_pad, &left_tz, &width, &right_tz, &right_pad); \ if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_PAD) \ { \ /* PAD repeat does not need special handling for 'transition zones' and */ \ /* they can be combined with 'padding zones' safely */ \ left_pad += left_tz; \ right_pad += right_tz; \ left_tz = right_tz = 0; \ } \ v.vector[0] += left_pad * unit_x; \ } \ \ if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NORMAL) \ { \ vx = v.vector[0]; \ repeat (PIXMAN_REPEAT_NORMAL, &vx, pixman_int_to_fixed(src_image->bits.width)); \ max_x = pixman_fixed_to_int (vx + (width - 1) * (int64_t)unit_x) + 1; \ \ if (src_image->bits.width < REPEAT_NORMAL_MIN_WIDTH) \ { \ src_width = 0; \ \ while (src_width < REPEAT_NORMAL_MIN_WIDTH && src_width <= max_x) \ src_width += src_image->bits.width; \ \ need_src_extension = TRUE; \ } \ else \ { \ src_width = src_image->bits.width; \ need_src_extension = FALSE; \ } \ \ src_width_fixed = pixman_int_to_fixed (src_width); \ } \ \ while (--height >= 0) \ { \ int weight1, weight2; \ dst = dst_line; \ dst_line += dst_stride; \ vx = v.vector[0]; \ if (flags & FLAG_HAVE_NON_SOLID_MASK) \ { \ mask = mask_line; \ mask_line += mask_stride; \ } \ \ y1 = pixman_fixed_to_int (vy); \ weight2 = pixman_fixed_to_bilinear_weight (vy); \ if (weight2) \ { \ /* both weight1 and weight2 are smaller than BILINEAR_INTERPOLATION_RANGE */ \ y2 = y1 + 1; \ weight1 = BILINEAR_INTERPOLATION_RANGE - weight2; \ } \ else \ { \ /* set both top and bottom row to the same scanline and tweak weights */ \ y2 = y1; \ weight1 = weight2 = BILINEAR_INTERPOLATION_RANGE / 2; \ } \ vy += unit_y; \ if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_PAD) \ { \ src_type_t *src1, *src2; \ src_type_t buf1[2]; \ src_type_t buf2[2]; \ repeat (PIXMAN_REPEAT_PAD, &y1, src_image->bits.height); \ repeat (PIXMAN_REPEAT_PAD, &y2, src_image->bits.height); \ src1 = src_first_line + src_stride * y1; \ src2 = src_first_line + src_stride * y2; \ \ if (left_pad > 0) \ { \ buf1[0] = buf1[1] = src1[0]; \ buf2[0] = buf2[1] = src2[0]; \ scanline_func (dst, mask, \ buf1, buf2, left_pad, weight1, weight2, 0, 0, 0, FALSE); \ dst += left_pad; \ if (flags & FLAG_HAVE_NON_SOLID_MASK) \ mask += left_pad; \ } \ if (width > 0) \ { \ scanline_func (dst, mask, \ src1, src2, width, weight1, weight2, vx, unit_x, 0, FALSE); \ dst += width; \ if (flags & FLAG_HAVE_NON_SOLID_MASK) \ mask += width; \ } \ if (right_pad > 0) \ { \ buf1[0] = buf1[1] = src1[src_image->bits.width - 1]; \ buf2[0] = buf2[1] = src2[src_image->bits.width - 1]; \ scanline_func (dst, mask, \ buf1, buf2, right_pad, weight1, weight2, 0, 0, 0, FALSE); \ } \ } \ else if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NONE) \ { \ src_type_t *src1, *src2; \ src_type_t buf1[2]; \ src_type_t buf2[2]; \ /* handle top/bottom zero padding by just setting weights to 0 if needed */ \ if (y1 < 0) \ { \ weight1 = 0; \ y1 = 0; \ } \ if (y1 >= src_image->bits.height) \ { \ weight1 = 0; \ y1 = src_image->bits.height - 1; \ } \ if (y2 < 0) \ { \ weight2 = 0; \ y2 = 0; \ } \ if (y2 >= src_image->bits.height) \ { \ weight2 = 0; \ y2 = src_image->bits.height - 1; \ } \ src1 = src_first_line + src_stride * y1; \ src2 = src_first_line + src_stride * y2; \ \ if (left_pad > 0) \ { \ buf1[0] = buf1[1] = 0; \ buf2[0] = buf2[1] = 0; \ scanline_func (dst, mask, \ buf1, buf2, left_pad, weight1, weight2, 0, 0, 0, TRUE); \ dst += left_pad; \ if (flags & FLAG_HAVE_NON_SOLID_MASK) \ mask += left_pad; \ } \ if (left_tz > 0) \ { \ buf1[0] = 0; \ buf1[1] = src1[0]; \ buf2[0] = 0; \ buf2[1] = src2[0]; \ scanline_func (dst, mask, \ buf1, buf2, left_tz, weight1, weight2, \ pixman_fixed_frac (vx), unit_x, 0, FALSE); \ dst += left_tz; \ if (flags & FLAG_HAVE_NON_SOLID_MASK) \ mask += left_tz; \ vx += left_tz * unit_x; \ } \ if (width > 0) \ { \ scanline_func (dst, mask, \ src1, src2, width, weight1, weight2, vx, unit_x, 0, FALSE); \ dst += width; \ if (flags & FLAG_HAVE_NON_SOLID_MASK) \ mask += width; \ vx += width * unit_x; \ } \ if (right_tz > 0) \ { \ buf1[0] = src1[src_image->bits.width - 1]; \ buf1[1] = 0; \ buf2[0] = src2[src_image->bits.width - 1]; \ buf2[1] = 0; \ scanline_func (dst, mask, \ buf1, buf2, right_tz, weight1, weight2, \ pixman_fixed_frac (vx), unit_x, 0, FALSE); \ dst += right_tz; \ if (flags & FLAG_HAVE_NON_SOLID_MASK) \ mask += right_tz; \ } \ if (right_pad > 0) \ { \ buf1[0] = buf1[1] = 0; \ buf2[0] = buf2[1] = 0; \ scanline_func (dst, mask, \ buf1, buf2, right_pad, weight1, weight2, 0, 0, 0, TRUE); \ } \ } \ else if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NORMAL) \ { \ int32_t num_pixels; \ int32_t width_remain; \ src_type_t * src_line_top; \ src_type_t * src_line_bottom; \ src_type_t buf1[2]; \ src_type_t buf2[2]; \ src_type_t extended_src_line0[REPEAT_NORMAL_MIN_WIDTH*2]; \ src_type_t extended_src_line1[REPEAT_NORMAL_MIN_WIDTH*2]; \ int i, j; \ \ repeat (PIXMAN_REPEAT_NORMAL, &y1, src_image->bits.height); \ repeat (PIXMAN_REPEAT_NORMAL, &y2, src_image->bits.height); \ src_line_top = src_first_line + src_stride * y1; \ src_line_bottom = src_first_line + src_stride * y2; \ \ if (need_src_extension) \ { \ for (i=0; ibits.width; j++, i++) \ { \ extended_src_line0[i] = src_line_top[j]; \ extended_src_line1[i] = src_line_bottom[j]; \ } \ } \ \ src_line_top = &extended_src_line0[0]; \ src_line_bottom = &extended_src_line1[0]; \ } \ \ /* Top & Bottom wrap around buffer */ \ buf1[0] = src_line_top[src_width - 1]; \ buf1[1] = src_line_top[0]; \ buf2[0] = src_line_bottom[src_width - 1]; \ buf2[1] = src_line_bottom[0]; \ \ width_remain = width; \ \ while (width_remain > 0) \ { \ /* We use src_width_fixed because it can make vx in original source range */ \ repeat (PIXMAN_REPEAT_NORMAL, &vx, src_width_fixed); \ \ /* Wrap around part */ \ if (pixman_fixed_to_int (vx) == src_width - 1) \ { \ /* for positive unit_x \ * num_pixels = max(n) + 1, where vx + n*unit_x < src_width_fixed \ * \ * vx is in range [0, src_width_fixed - pixman_fixed_e] \ * So we are safe from overflow. \ */ \ num_pixels = ((src_width_fixed - vx - pixman_fixed_e) / unit_x) + 1; \ \ if (num_pixels > width_remain) \ num_pixels = width_remain; \ \ scanline_func (dst, mask, buf1, buf2, num_pixels, \ weight1, weight2, pixman_fixed_frac(vx), \ unit_x, src_width_fixed, FALSE); \ \ width_remain -= num_pixels; \ vx += num_pixels * unit_x; \ dst += num_pixels; \ \ if (flags & FLAG_HAVE_NON_SOLID_MASK) \ mask += num_pixels; \ \ repeat (PIXMAN_REPEAT_NORMAL, &vx, src_width_fixed); \ } \ \ /* Normal scanline composite */ \ if (pixman_fixed_to_int (vx) != src_width - 1 && width_remain > 0) \ { \ /* for positive unit_x \ * num_pixels = max(n) + 1, where vx + n*unit_x < (src_width_fixed - 1) \ * \ * vx is in range [0, src_width_fixed - pixman_fixed_e] \ * So we are safe from overflow here. \ */ \ num_pixels = ((src_width_fixed - pixman_fixed_1 - vx - pixman_fixed_e) \ / unit_x) + 1; \ \ if (num_pixels > width_remain) \ num_pixels = width_remain; \ \ scanline_func (dst, mask, src_line_top, src_line_bottom, num_pixels, \ weight1, weight2, vx, unit_x, src_width_fixed, FALSE); \ \ width_remain -= num_pixels; \ vx += num_pixels * unit_x; \ dst += num_pixels; \ \ if (flags & FLAG_HAVE_NON_SOLID_MASK) \ mask += num_pixels; \ } \ } \ } \ else \ { \ scanline_func (dst, mask, src_first_line + src_stride * y1, \ src_first_line + src_stride * y2, width, \ weight1, weight2, vx, unit_x, max_vx, FALSE); \ } \ } \ } /* A workaround for old sun studio, see: https://bugs.freedesktop.org/show_bug.cgi?id=32764 */ #define FAST_BILINEAR_MAINLOOP_COMMON(scale_func_name, scanline_func, src_type_t, mask_type_t, \ dst_type_t, repeat_mode, flags) \ FAST_BILINEAR_MAINLOOP_INT(_ ## scale_func_name, scanline_func, src_type_t, mask_type_t,\ dst_type_t, repeat_mode, flags) #define SCALED_BILINEAR_FLAGS \ (FAST_PATH_SCALE_TRANSFORM | \ FAST_PATH_NO_ALPHA_MAP | \ FAST_PATH_BILINEAR_FILTER | \ FAST_PATH_NO_ACCESSORS | \ FAST_PATH_NARROW_FORMAT) #define SIMPLE_BILINEAR_FAST_PATH_PAD(op,s,d,func) \ { PIXMAN_OP_ ## op, \ PIXMAN_ ## s, \ (SCALED_BILINEAR_FLAGS | \ FAST_PATH_PAD_REPEAT | \ FAST_PATH_X_UNIT_POSITIVE), \ PIXMAN_null, 0, \ PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ fast_composite_scaled_bilinear_ ## func ## _pad ## _ ## op, \ } #define SIMPLE_BILINEAR_FAST_PATH_NONE(op,s,d,func) \ { PIXMAN_OP_ ## op, \ PIXMAN_ ## s, \ (SCALED_BILINEAR_FLAGS | \ FAST_PATH_NONE_REPEAT | \ FAST_PATH_X_UNIT_POSITIVE), \ PIXMAN_null, 0, \ PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ fast_composite_scaled_bilinear_ ## func ## _none ## _ ## op, \ } #define SIMPLE_BILINEAR_FAST_PATH_COVER(op,s,d,func) \ { PIXMAN_OP_ ## op, \ PIXMAN_ ## s, \ SCALED_BILINEAR_FLAGS | FAST_PATH_SAMPLES_COVER_CLIP_BILINEAR, \ PIXMAN_null, 0, \ PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ fast_composite_scaled_bilinear_ ## func ## _cover ## _ ## op, \ } #define SIMPLE_BILINEAR_FAST_PATH_NORMAL(op,s,d,func) \ { PIXMAN_OP_ ## op, \ PIXMAN_ ## s, \ (SCALED_BILINEAR_FLAGS | \ FAST_PATH_NORMAL_REPEAT | \ FAST_PATH_X_UNIT_POSITIVE), \ PIXMAN_null, 0, \ PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ fast_composite_scaled_bilinear_ ## func ## _normal ## _ ## op, \ } #define SIMPLE_BILINEAR_A8_MASK_FAST_PATH_PAD(op,s,d,func) \ { PIXMAN_OP_ ## op, \ PIXMAN_ ## s, \ (SCALED_BILINEAR_FLAGS | \ FAST_PATH_PAD_REPEAT | \ FAST_PATH_X_UNIT_POSITIVE), \ PIXMAN_a8, MASK_FLAGS (a8, FAST_PATH_UNIFIED_ALPHA), \ PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ fast_composite_scaled_bilinear_ ## func ## _pad ## _ ## op, \ } #define SIMPLE_BILINEAR_A8_MASK_FAST_PATH_NONE(op,s,d,func) \ { PIXMAN_OP_ ## op, \ PIXMAN_ ## s, \ (SCALED_BILINEAR_FLAGS | \ FAST_PATH_NONE_REPEAT | \ FAST_PATH_X_UNIT_POSITIVE), \ PIXMAN_a8, MASK_FLAGS (a8, FAST_PATH_UNIFIED_ALPHA), \ PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ fast_composite_scaled_bilinear_ ## func ## _none ## _ ## op, \ } #define SIMPLE_BILINEAR_A8_MASK_FAST_PATH_COVER(op,s,d,func) \ { PIXMAN_OP_ ## op, \ PIXMAN_ ## s, \ SCALED_BILINEAR_FLAGS | FAST_PATH_SAMPLES_COVER_CLIP_BILINEAR, \ PIXMAN_a8, MASK_FLAGS (a8, FAST_PATH_UNIFIED_ALPHA), \ PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ fast_composite_scaled_bilinear_ ## func ## _cover ## _ ## op, \ } #define SIMPLE_BILINEAR_A8_MASK_FAST_PATH_NORMAL(op,s,d,func) \ { PIXMAN_OP_ ## op, \ PIXMAN_ ## s, \ (SCALED_BILINEAR_FLAGS | \ FAST_PATH_NORMAL_REPEAT | \ FAST_PATH_X_UNIT_POSITIVE), \ PIXMAN_a8, MASK_FLAGS (a8, FAST_PATH_UNIFIED_ALPHA), \ PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ fast_composite_scaled_bilinear_ ## func ## _normal ## _ ## op, \ } #define SIMPLE_BILINEAR_SOLID_MASK_FAST_PATH_PAD(op,s,d,func) \ { PIXMAN_OP_ ## op, \ PIXMAN_ ## s, \ (SCALED_BILINEAR_FLAGS | \ FAST_PATH_PAD_REPEAT | \ FAST_PATH_X_UNIT_POSITIVE), \ PIXMAN_solid, MASK_FLAGS (solid, FAST_PATH_UNIFIED_ALPHA), \ PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ fast_composite_scaled_bilinear_ ## func ## _pad ## _ ## op, \ } #define SIMPLE_BILINEAR_SOLID_MASK_FAST_PATH_NONE(op,s,d,func) \ { PIXMAN_OP_ ## op, \ PIXMAN_ ## s, \ (SCALED_BILINEAR_FLAGS | \ FAST_PATH_NONE_REPEAT | \ FAST_PATH_X_UNIT_POSITIVE), \ PIXMAN_solid, MASK_FLAGS (solid, FAST_PATH_UNIFIED_ALPHA), \ PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ fast_composite_scaled_bilinear_ ## func ## _none ## _ ## op, \ } #define SIMPLE_BILINEAR_SOLID_MASK_FAST_PATH_COVER(op,s,d,func) \ { PIXMAN_OP_ ## op, \ PIXMAN_ ## s, \ SCALED_BILINEAR_FLAGS | FAST_PATH_SAMPLES_COVER_CLIP_BILINEAR, \ PIXMAN_solid, MASK_FLAGS (solid, FAST_PATH_UNIFIED_ALPHA), \ PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ fast_composite_scaled_bilinear_ ## func ## _cover ## _ ## op, \ } #define SIMPLE_BILINEAR_SOLID_MASK_FAST_PATH_NORMAL(op,s,d,func) \ { PIXMAN_OP_ ## op, \ PIXMAN_ ## s, \ (SCALED_BILINEAR_FLAGS | \ FAST_PATH_NORMAL_REPEAT | \ FAST_PATH_X_UNIT_POSITIVE), \ PIXMAN_solid, MASK_FLAGS (solid, FAST_PATH_UNIFIED_ALPHA), \ PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ fast_composite_scaled_bilinear_ ## func ## _normal ## _ ## op, \ } /* Prefer the use of 'cover' variant, because it is faster */ #define SIMPLE_BILINEAR_FAST_PATH(op,s,d,func) \ SIMPLE_BILINEAR_FAST_PATH_COVER (op,s,d,func), \ SIMPLE_BILINEAR_FAST_PATH_NONE (op,s,d,func), \ SIMPLE_BILINEAR_FAST_PATH_PAD (op,s,d,func), \ SIMPLE_BILINEAR_FAST_PATH_NORMAL (op,s,d,func) #define SIMPLE_BILINEAR_A8_MASK_FAST_PATH(op,s,d,func) \ SIMPLE_BILINEAR_A8_MASK_FAST_PATH_COVER (op,s,d,func), \ SIMPLE_BILINEAR_A8_MASK_FAST_PATH_NONE (op,s,d,func), \ SIMPLE_BILINEAR_A8_MASK_FAST_PATH_PAD (op,s,d,func), \ SIMPLE_BILINEAR_A8_MASK_FAST_PATH_NORMAL (op,s,d,func) #define SIMPLE_BILINEAR_SOLID_MASK_FAST_PATH(op,s,d,func) \ SIMPLE_BILINEAR_SOLID_MASK_FAST_PATH_COVER (op,s,d,func), \ SIMPLE_BILINEAR_SOLID_MASK_FAST_PATH_NONE (op,s,d,func), \ SIMPLE_BILINEAR_SOLID_MASK_FAST_PATH_PAD (op,s,d,func), \ SIMPLE_BILINEAR_SOLID_MASK_FAST_PATH_NORMAL (op,s,d,func) #endif ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-linear-gradient.c0000664000175000017500000001707514712446423021127 0ustar00mattst88mattst88/* -*- Mode: c; c-basic-offset: 4; tab-width: 8; indent-tabs-mode: t; -*- */ /* * Copyright Âİ 2000 SuSE, Inc. * Copyright Âİ 2007 Red Hat, Inc. * Copyright Âİ 2000 Keith Packard, member of The XFree86 Project, Inc. * 2005 Lars Knoll & Zack Rusin, Trolltech * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of Keith Packard not be used in * advertising or publicity pertaining to distribution of the software without * specific, written prior permission. Keith Packard makes no * representations about the suitability of this software for any purpose. It * is provided "as is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS * SOFTWARE. */ #ifdef HAVE_CONFIG_H #include #endif #include #include "pixman-private.h" static pixman_bool_t linear_gradient_is_horizontal (pixman_image_t *image, int x, int y, int width, int height) { linear_gradient_t *linear = (linear_gradient_t *)image; pixman_vector_t v; pixman_fixed_32_32_t l; pixman_fixed_48_16_t dx, dy; double inc; if (image->common.transform) { /* projective transformation */ if (image->common.transform->matrix[2][0] != 0 || image->common.transform->matrix[2][1] != 0 || image->common.transform->matrix[2][2] == 0) { return FALSE; } v.vector[0] = image->common.transform->matrix[0][1]; v.vector[1] = image->common.transform->matrix[1][1]; v.vector[2] = image->common.transform->matrix[2][2]; } else { v.vector[0] = 0; v.vector[1] = pixman_fixed_1; v.vector[2] = pixman_fixed_1; } dx = linear->p2.x - linear->p1.x; dy = linear->p2.y - linear->p1.y; l = dx * dx + dy * dy; if (l == 0) return FALSE; /* * compute how much the input of the gradient walked changes * when moving vertically through the whole image */ inc = height * (double) pixman_fixed_1 * pixman_fixed_1 * (dx * v.vector[0] + dy * v.vector[1]) / (v.vector[2] * (double) l); /* check that casting to integer would result in 0 */ if (-1 < inc && inc < 1) return TRUE; return FALSE; } static uint32_t * linear_get_scanline (pixman_iter_t *iter, const uint32_t *mask, int Bpp, pixman_gradient_walker_write_t write_pixel, pixman_gradient_walker_fill_t fill_pixel) { pixman_image_t *image = iter->image; int x = iter->x; int y = iter->y; int width = iter->width; uint32_t * buffer = iter->buffer; pixman_vector_t v, unit; pixman_fixed_32_32_t l; pixman_fixed_48_16_t dx, dy; gradient_t *gradient = (gradient_t *)image; linear_gradient_t *linear = (linear_gradient_t *)image; uint32_t *end = buffer + width * (Bpp / 4); pixman_gradient_walker_t walker; _pixman_gradient_walker_init (&walker, gradient, image->common.repeat); /* reference point is the center of the pixel */ v.vector[0] = pixman_int_to_fixed (x) + pixman_fixed_1 / 2; v.vector[1] = pixman_int_to_fixed (y) + pixman_fixed_1 / 2; v.vector[2] = pixman_fixed_1; if (image->common.transform) { if (!pixman_transform_point_3d (image->common.transform, &v)) return iter->buffer; unit.vector[0] = image->common.transform->matrix[0][0]; unit.vector[1] = image->common.transform->matrix[1][0]; unit.vector[2] = image->common.transform->matrix[2][0]; } else { unit.vector[0] = pixman_fixed_1; unit.vector[1] = 0; unit.vector[2] = 0; } dx = linear->p2.x - linear->p1.x; dy = linear->p2.y - linear->p1.y; l = dx * dx + dy * dy; if (l == 0 || unit.vector[2] == 0) { /* affine transformation only */ pixman_fixed_32_32_t t, next_inc; double inc; if (l == 0 || v.vector[2] == 0) { t = 0; inc = 0; } else { double invden, v2; invden = pixman_fixed_1 * (double) pixman_fixed_1 / (l * (double) v.vector[2]); v2 = v.vector[2] * (1. / pixman_fixed_1); t = ((dx * v.vector[0] + dy * v.vector[1]) - (dx * linear->p1.x + dy * linear->p1.y) * v2) * invden; inc = (dx * unit.vector[0] + dy * unit.vector[1]) * invden; } next_inc = 0; if (((pixman_fixed_32_32_t )(inc * width)) == 0) { fill_pixel (&walker, t, buffer, end); } else { int i; i = 0; while (buffer < end) { if (!mask || *mask++) { write_pixel (&walker, t + next_inc, buffer); } i++; next_inc = inc * i; buffer += (Bpp / 4); } } } else { /* projective transformation */ double t; t = 0; while (buffer < end) { if (!mask || *mask++) { if (v.vector[2] != 0) { double invden, v2; invden = pixman_fixed_1 * (double) pixman_fixed_1 / (l * (double) v.vector[2]); v2 = v.vector[2] * (1. / pixman_fixed_1); t = ((dx * v.vector[0] + dy * v.vector[1]) - (dx * linear->p1.x + dy * linear->p1.y) * v2) * invden; } write_pixel (&walker, t, buffer); } buffer += (Bpp / 4); v.vector[0] += unit.vector[0]; v.vector[1] += unit.vector[1]; v.vector[2] += unit.vector[2]; } } iter->y++; return iter->buffer; } static uint32_t * linear_get_scanline_narrow (pixman_iter_t *iter, const uint32_t *mask) { return linear_get_scanline (iter, mask, 4, _pixman_gradient_walker_write_narrow, _pixman_gradient_walker_fill_narrow); } static uint32_t * linear_get_scanline_wide (pixman_iter_t *iter, const uint32_t *mask) { return linear_get_scanline (iter, NULL, 16, _pixman_gradient_walker_write_wide, _pixman_gradient_walker_fill_wide); } void _pixman_linear_gradient_iter_init (pixman_image_t *image, pixman_iter_t *iter) { if (linear_gradient_is_horizontal ( iter->image, iter->x, iter->y, iter->width, iter->height)) { if (iter->iter_flags & ITER_NARROW) linear_get_scanline_narrow (iter, NULL); else linear_get_scanline_wide (iter, NULL); iter->get_scanline = _pixman_iter_get_scanline_noop; } else { if (iter->iter_flags & ITER_NARROW) iter->get_scanline = linear_get_scanline_narrow; else iter->get_scanline = linear_get_scanline_wide; } } PIXMAN_EXPORT pixman_image_t * pixman_image_create_linear_gradient (const pixman_point_fixed_t * p1, const pixman_point_fixed_t * p2, const pixman_gradient_stop_t *stops, int n_stops) { pixman_image_t *image; linear_gradient_t *linear; image = _pixman_image_allocate (); if (!image) return NULL; linear = &image->linear; if (!_pixman_init_gradient (&linear->common, stops, n_stops)) { free (image); return NULL; } linear->p1 = *p1; linear->p2 = *p2; image->type = LINEAR; return image; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-matrix.c0000664000175000017500000007027614712446423017370 0ustar00mattst88mattst88/* * Copyright Âİ 2008 Keith Packard * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that copyright * notice and this permission notice appear in supporting documentation, and * that the name of the copyright holders not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. The copyright holders make no representations * about the suitability of this software for any purpose. It is provided "as * is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. */ /* * Matrix interfaces */ #ifdef HAVE_CONFIG_H #include #endif #include #include #include "pixman-private.h" #define F(x) pixman_int_to_fixed (x) static force_inline int count_leading_zeros (uint32_t x) { #ifdef HAVE_BUILTIN_CLZ return __builtin_clz (x); #else int n = 0; while (x) { n++; x >>= 1; } return 32 - n; #endif } /* * Large signed/unsigned integer division with rounding for the platforms with * only 64-bit integer data type supported (no 128-bit data type). * * Arguments: * hi, lo - high and low 64-bit parts of the dividend * div - 48-bit divisor * * Returns: lowest 64 bits of the result as a return value and highest 64 * bits of the result to "result_hi" pointer */ /* grade-school unsigned division (128-bit by 48-bit) with rounding to nearest */ static force_inline uint64_t rounded_udiv_128_by_48 (uint64_t hi, uint64_t lo, uint64_t div, uint64_t *result_hi) { uint64_t tmp, remainder, result_lo; assert(div < ((uint64_t)1 << 48)); remainder = hi % div; *result_hi = hi / div; tmp = (remainder << 16) + (lo >> 48); result_lo = tmp / div; remainder = tmp % div; tmp = (remainder << 16) + ((lo >> 32) & 0xFFFF); result_lo = (result_lo << 16) + (tmp / div); remainder = tmp % div; tmp = (remainder << 16) + ((lo >> 16) & 0xFFFF); result_lo = (result_lo << 16) + (tmp / div); remainder = tmp % div; tmp = (remainder << 16) + (lo & 0xFFFF); result_lo = (result_lo << 16) + (tmp / div); remainder = tmp % div; /* round to nearest */ if (remainder * 2 >= div && ++result_lo == 0) *result_hi += 1; return result_lo; } /* signed division (128-bit by 49-bit) with rounding to nearest */ static inline int64_t rounded_sdiv_128_by_49 (int64_t hi, uint64_t lo, int64_t div, int64_t *signed_result_hi) { uint64_t result_lo, result_hi; int sign = 0; if (div < 0) { div = -div; sign ^= 1; } if (hi < 0) { if (lo != 0) hi++; hi = -hi; lo = -lo; sign ^= 1; } result_lo = rounded_udiv_128_by_48 (hi, lo, div, &result_hi); if (sign) { if (result_lo != 0) result_hi++; result_hi = -result_hi; result_lo = -result_lo; } if (signed_result_hi) { *signed_result_hi = result_hi; } return result_lo; } /* * Multiply 64.16 fixed point value by (2^scalebits) and convert * to 128-bit integer. */ static force_inline void fixed_64_16_to_int128 (int64_t hi, int64_t lo, int64_t *rhi, int64_t *rlo, int scalebits) { /* separate integer and fractional parts */ hi += lo >> 16; lo &= 0xFFFF; if (scalebits <= 0) { *rlo = hi >> (-scalebits); *rhi = *rlo >> 63; } else { *rhi = hi >> (64 - scalebits); *rlo = (uint64_t)hi << scalebits; if (scalebits < 16) *rlo += lo >> (16 - scalebits); else *rlo += lo << (scalebits - 16); } } /* * Convert 112.16 fixed point value to 48.16 with clamping for the out * of range values. */ static force_inline pixman_fixed_48_16_t fixed_112_16_to_fixed_48_16 (int64_t hi, int64_t lo, pixman_bool_t *clampflag) { if ((lo >> 63) != hi) { *clampflag = TRUE; return hi >= 0 ? INT64_MAX : INT64_MIN; } else { return lo; } } /* * Transform a point with 31.16 fixed point coordinates from the destination * space to a point with 48.16 fixed point coordinates in the source space. * No overflows are possible for affine transformations and the results are * accurate including the least significant bit. Projective transformations * may overflow, in this case the results are just clamped to return maximum * or minimum 48.16 values (so that the caller can at least handle the NONE * and PAD repeats correctly) and the return value is FALSE to indicate that * such clamping has happened. */ PIXMAN_EXPORT pixman_bool_t pixman_transform_point_31_16 (const pixman_transform_t *t, const pixman_vector_48_16_t *v, pixman_vector_48_16_t *result) { pixman_bool_t clampflag = FALSE; int i; int64_t tmp[3][2], divint; uint16_t divfrac; /* input vector values must have no more than 31 bits (including sign) * in the integer part */ assert (v->v[0] < ((pixman_fixed_48_16_t)1 << (30 + 16))); assert (v->v[0] >= -((pixman_fixed_48_16_t)1 << (30 + 16))); assert (v->v[1] < ((pixman_fixed_48_16_t)1 << (30 + 16))); assert (v->v[1] >= -((pixman_fixed_48_16_t)1 << (30 + 16))); assert (v->v[2] < ((pixman_fixed_48_16_t)1 << (30 + 16))); assert (v->v[2] >= -((pixman_fixed_48_16_t)1 << (30 + 16))); for (i = 0; i < 3; i++) { tmp[i][0] = (int64_t)t->matrix[i][0] * (v->v[0] >> 16); tmp[i][1] = (int64_t)t->matrix[i][0] * (v->v[0] & 0xFFFF); tmp[i][0] += (int64_t)t->matrix[i][1] * (v->v[1] >> 16); tmp[i][1] += (int64_t)t->matrix[i][1] * (v->v[1] & 0xFFFF); tmp[i][0] += (int64_t)t->matrix[i][2] * (v->v[2] >> 16); tmp[i][1] += (int64_t)t->matrix[i][2] * (v->v[2] & 0xFFFF); } /* * separate 64-bit integer and 16-bit fractional parts for the divisor, * which is also scaled by 65536 after fixed point multiplication. */ divint = tmp[2][0] + (tmp[2][1] >> 16); divfrac = tmp[2][1] & 0xFFFF; if (divint == pixman_fixed_1 && divfrac == 0) { /* * this is a simple affine transformation */ result->v[0] = tmp[0][0] + ((tmp[0][1] + 0x8000) >> 16); result->v[1] = tmp[1][0] + ((tmp[1][1] + 0x8000) >> 16); result->v[2] = pixman_fixed_1; } else if (divint == 0 && divfrac == 0) { /* * handle zero divisor (if the values are non-zero, set the * results to maximum positive or minimum negative) */ clampflag = TRUE; result->v[0] = tmp[0][0] + ((tmp[0][1] + 0x8000) >> 16); result->v[1] = tmp[1][0] + ((tmp[1][1] + 0x8000) >> 16); if (result->v[0] > 0) result->v[0] = INT64_MAX; else if (result->v[0] < 0) result->v[0] = INT64_MIN; if (result->v[1] > 0) result->v[1] = INT64_MAX; else if (result->v[1] < 0) result->v[1] = INT64_MIN; } else { /* * projective transformation, analyze the top 32 bits of the divisor */ int32_t hi32divbits = divint >> 32; if (hi32divbits < 0) hi32divbits = ~hi32divbits; if (hi32divbits == 0) { /* the divisor is small, we can actually keep all the bits */ int64_t hi, rhi, lo, rlo; int64_t div = ((uint64_t)divint << 16) + divfrac; fixed_64_16_to_int128 (tmp[0][0], tmp[0][1], &hi, &lo, 32); rlo = rounded_sdiv_128_by_49 (hi, lo, div, &rhi); result->v[0] = fixed_112_16_to_fixed_48_16 (rhi, rlo, &clampflag); fixed_64_16_to_int128 (tmp[1][0], tmp[1][1], &hi, &lo, 32); rlo = rounded_sdiv_128_by_49 (hi, lo, div, &rhi); result->v[1] = fixed_112_16_to_fixed_48_16 (rhi, rlo, &clampflag); } else { /* the divisor needs to be reduced to 48 bits */ int64_t hi, rhi, lo, rlo, div; int shift = 32 - count_leading_zeros (hi32divbits); fixed_64_16_to_int128 (divint, divfrac, &hi, &div, 16 - shift); fixed_64_16_to_int128 (tmp[0][0], tmp[0][1], &hi, &lo, 32 - shift); rlo = rounded_sdiv_128_by_49 (hi, lo, div, &rhi); result->v[0] = fixed_112_16_to_fixed_48_16 (rhi, rlo, &clampflag); fixed_64_16_to_int128 (tmp[1][0], tmp[1][1], &hi, &lo, 32 - shift); rlo = rounded_sdiv_128_by_49 (hi, lo, div, &rhi); result->v[1] = fixed_112_16_to_fixed_48_16 (rhi, rlo, &clampflag); } } result->v[2] = pixman_fixed_1; return !clampflag; } PIXMAN_EXPORT void pixman_transform_point_31_16_affine (const pixman_transform_t *t, const pixman_vector_48_16_t *v, pixman_vector_48_16_t *result) { int64_t hi0, lo0, hi1, lo1; /* input vector values must have no more than 31 bits (including sign) * in the integer part */ assert (v->v[0] < ((pixman_fixed_48_16_t)1 << (30 + 16))); assert (v->v[0] >= -((pixman_fixed_48_16_t)1 << (30 + 16))); assert (v->v[1] < ((pixman_fixed_48_16_t)1 << (30 + 16))); assert (v->v[1] >= -((pixman_fixed_48_16_t)1 << (30 + 16))); hi0 = (int64_t)t->matrix[0][0] * (v->v[0] >> 16); lo0 = (int64_t)t->matrix[0][0] * (v->v[0] & 0xFFFF); hi0 += (int64_t)t->matrix[0][1] * (v->v[1] >> 16); lo0 += (int64_t)t->matrix[0][1] * (v->v[1] & 0xFFFF); hi0 += (int64_t)t->matrix[0][2]; hi1 = (int64_t)t->matrix[1][0] * (v->v[0] >> 16); lo1 = (int64_t)t->matrix[1][0] * (v->v[0] & 0xFFFF); hi1 += (int64_t)t->matrix[1][1] * (v->v[1] >> 16); lo1 += (int64_t)t->matrix[1][1] * (v->v[1] & 0xFFFF); hi1 += (int64_t)t->matrix[1][2]; result->v[0] = hi0 + ((lo0 + 0x8000) >> 16); result->v[1] = hi1 + ((lo1 + 0x8000) >> 16); result->v[2] = pixman_fixed_1; } PIXMAN_EXPORT void pixman_transform_point_31_16_3d (const pixman_transform_t *t, const pixman_vector_48_16_t *v, pixman_vector_48_16_t *result) { int i; int64_t tmp[3][2]; /* input vector values must have no more than 31 bits (including sign) * in the integer part */ assert (v->v[0] < ((pixman_fixed_48_16_t)1 << (30 + 16))); assert (v->v[0] >= -((pixman_fixed_48_16_t)1 << (30 + 16))); assert (v->v[1] < ((pixman_fixed_48_16_t)1 << (30 + 16))); assert (v->v[1] >= -((pixman_fixed_48_16_t)1 << (30 + 16))); assert (v->v[2] < ((pixman_fixed_48_16_t)1 << (30 + 16))); assert (v->v[2] >= -((pixman_fixed_48_16_t)1 << (30 + 16))); for (i = 0; i < 3; i++) { tmp[i][0] = (int64_t)t->matrix[i][0] * (v->v[0] >> 16); tmp[i][1] = (int64_t)t->matrix[i][0] * (v->v[0] & 0xFFFF); tmp[i][0] += (int64_t)t->matrix[i][1] * (v->v[1] >> 16); tmp[i][1] += (int64_t)t->matrix[i][1] * (v->v[1] & 0xFFFF); tmp[i][0] += (int64_t)t->matrix[i][2] * (v->v[2] >> 16); tmp[i][1] += (int64_t)t->matrix[i][2] * (v->v[2] & 0xFFFF); } result->v[0] = tmp[0][0] + ((tmp[0][1] + 0x8000) >> 16); result->v[1] = tmp[1][0] + ((tmp[1][1] + 0x8000) >> 16); result->v[2] = tmp[2][0] + ((tmp[2][1] + 0x8000) >> 16); } PIXMAN_EXPORT void pixman_transform_init_identity (struct pixman_transform *matrix) { int i; memset (matrix, '\0', sizeof (struct pixman_transform)); for (i = 0; i < 3; i++) matrix->matrix[i][i] = F (1); } typedef pixman_fixed_32_32_t pixman_fixed_34_30_t; PIXMAN_EXPORT pixman_bool_t pixman_transform_point_3d (const struct pixman_transform *transform, struct pixman_vector * vector) { pixman_vector_48_16_t tmp; tmp.v[0] = vector->vector[0]; tmp.v[1] = vector->vector[1]; tmp.v[2] = vector->vector[2]; pixman_transform_point_31_16_3d (transform, &tmp, &tmp); vector->vector[0] = tmp.v[0]; vector->vector[1] = tmp.v[1]; vector->vector[2] = tmp.v[2]; return vector->vector[0] == tmp.v[0] && vector->vector[1] == tmp.v[1] && vector->vector[2] == tmp.v[2]; } PIXMAN_EXPORT pixman_bool_t pixman_transform_point (const struct pixman_transform *transform, struct pixman_vector * vector) { pixman_vector_48_16_t tmp; tmp.v[0] = vector->vector[0]; tmp.v[1] = vector->vector[1]; tmp.v[2] = vector->vector[2]; if (!pixman_transform_point_31_16 (transform, &tmp, &tmp)) return FALSE; vector->vector[0] = tmp.v[0]; vector->vector[1] = tmp.v[1]; vector->vector[2] = tmp.v[2]; return vector->vector[0] == tmp.v[0] && vector->vector[1] == tmp.v[1] && vector->vector[2] == tmp.v[2]; } PIXMAN_EXPORT pixman_bool_t pixman_transform_multiply (struct pixman_transform * dst, const struct pixman_transform *l, const struct pixman_transform *r) { struct pixman_transform d; int dx, dy; int o; for (dy = 0; dy < 3; dy++) { for (dx = 0; dx < 3; dx++) { pixman_fixed_48_16_t v; pixman_fixed_32_32_t partial; v = 0; for (o = 0; o < 3; o++) { partial = (pixman_fixed_32_32_t) l->matrix[dy][o] * (pixman_fixed_32_32_t) r->matrix[o][dx]; v += (partial + 0x8000) >> 16; } if (v > pixman_max_fixed_48_16 || v < pixman_min_fixed_48_16) return FALSE; d.matrix[dy][dx] = (pixman_fixed_t) v; } } *dst = d; return TRUE; } PIXMAN_EXPORT void pixman_transform_init_scale (struct pixman_transform *t, pixman_fixed_t sx, pixman_fixed_t sy) { memset (t, '\0', sizeof (struct pixman_transform)); t->matrix[0][0] = sx; t->matrix[1][1] = sy; t->matrix[2][2] = F (1); } static pixman_fixed_t fixed_inverse (pixman_fixed_t x) { return (pixman_fixed_t) ((((pixman_fixed_48_16_t) F (1)) * F (1)) / x); } PIXMAN_EXPORT pixman_bool_t pixman_transform_scale (struct pixman_transform *forward, struct pixman_transform *reverse, pixman_fixed_t sx, pixman_fixed_t sy) { struct pixman_transform t; if (sx == 0 || sy == 0) return FALSE; if (forward) { pixman_transform_init_scale (&t, sx, sy); if (!pixman_transform_multiply (forward, &t, forward)) return FALSE; } if (reverse) { pixman_transform_init_scale (&t, fixed_inverse (sx), fixed_inverse (sy)); if (!pixman_transform_multiply (reverse, reverse, &t)) return FALSE; } return TRUE; } PIXMAN_EXPORT void pixman_transform_init_rotate (struct pixman_transform *t, pixman_fixed_t c, pixman_fixed_t s) { memset (t, '\0', sizeof (struct pixman_transform)); t->matrix[0][0] = c; t->matrix[0][1] = -s; t->matrix[1][0] = s; t->matrix[1][1] = c; t->matrix[2][2] = F (1); } PIXMAN_EXPORT pixman_bool_t pixman_transform_rotate (struct pixman_transform *forward, struct pixman_transform *reverse, pixman_fixed_t c, pixman_fixed_t s) { struct pixman_transform t; if (forward) { pixman_transform_init_rotate (&t, c, s); if (!pixman_transform_multiply (forward, &t, forward)) return FALSE; } if (reverse) { pixman_transform_init_rotate (&t, c, -s); if (!pixman_transform_multiply (reverse, reverse, &t)) return FALSE; } return TRUE; } PIXMAN_EXPORT void pixman_transform_init_translate (struct pixman_transform *t, pixman_fixed_t tx, pixman_fixed_t ty) { memset (t, '\0', sizeof (struct pixman_transform)); t->matrix[0][0] = F (1); t->matrix[0][2] = tx; t->matrix[1][1] = F (1); t->matrix[1][2] = ty; t->matrix[2][2] = F (1); } PIXMAN_EXPORT pixman_bool_t pixman_transform_translate (struct pixman_transform *forward, struct pixman_transform *reverse, pixman_fixed_t tx, pixman_fixed_t ty) { struct pixman_transform t; if (forward) { pixman_transform_init_translate (&t, tx, ty); if (!pixman_transform_multiply (forward, &t, forward)) return FALSE; } if (reverse) { pixman_transform_init_translate (&t, -tx, -ty); if (!pixman_transform_multiply (reverse, reverse, &t)) return FALSE; } return TRUE; } PIXMAN_EXPORT pixman_bool_t pixman_transform_bounds (const struct pixman_transform *matrix, struct pixman_box16 * b) { struct pixman_vector v[4]; int i; int x1, y1, x2, y2; v[0].vector[0] = F (b->x1); v[0].vector[1] = F (b->y1); v[0].vector[2] = F (1); v[1].vector[0] = F (b->x2); v[1].vector[1] = F (b->y1); v[1].vector[2] = F (1); v[2].vector[0] = F (b->x2); v[2].vector[1] = F (b->y2); v[2].vector[2] = F (1); v[3].vector[0] = F (b->x1); v[3].vector[1] = F (b->y2); v[3].vector[2] = F (1); for (i = 0; i < 4; i++) { if (!pixman_transform_point (matrix, &v[i])) return FALSE; x1 = pixman_fixed_to_int (v[i].vector[0]); y1 = pixman_fixed_to_int (v[i].vector[1]); x2 = pixman_fixed_to_int (pixman_fixed_ceil (v[i].vector[0])); y2 = pixman_fixed_to_int (pixman_fixed_ceil (v[i].vector[1])); if (i == 0) { b->x1 = x1; b->y1 = y1; b->x2 = x2; b->y2 = y2; } else { if (x1 < b->x1) b->x1 = x1; if (y1 < b->y1) b->y1 = y1; if (x2 > b->x2) b->x2 = x2; if (y2 > b->y2) b->y2 = y2; } } return TRUE; } PIXMAN_EXPORT pixman_bool_t pixman_transform_invert (struct pixman_transform * dst, const struct pixman_transform *src) { struct pixman_f_transform m; pixman_f_transform_from_pixman_transform (&m, src); if (!pixman_f_transform_invert (&m, &m)) return FALSE; if (!pixman_transform_from_pixman_f_transform (dst, &m)) return FALSE; return TRUE; } static pixman_bool_t within_epsilon (pixman_fixed_t a, pixman_fixed_t b, pixman_fixed_t epsilon) { pixman_fixed_t t = a - b; if (t < 0) t = -t; return t <= epsilon; } #define EPSILON (pixman_fixed_t) (2) #define IS_SAME(a, b) (within_epsilon (a, b, EPSILON)) #define IS_ZERO(a) (within_epsilon (a, 0, EPSILON)) #define IS_ONE(a) (within_epsilon (a, F (1), EPSILON)) #define IS_UNIT(a) \ (within_epsilon (a, F (1), EPSILON) || \ within_epsilon (a, F (-1), EPSILON) || \ IS_ZERO (a)) #define IS_INT(a) (IS_ZERO (pixman_fixed_frac (a))) PIXMAN_EXPORT pixman_bool_t pixman_transform_is_identity (const struct pixman_transform *t) { return (IS_SAME (t->matrix[0][0], t->matrix[1][1]) && IS_SAME (t->matrix[0][0], t->matrix[2][2]) && !IS_ZERO (t->matrix[0][0]) && IS_ZERO (t->matrix[0][1]) && IS_ZERO (t->matrix[0][2]) && IS_ZERO (t->matrix[1][0]) && IS_ZERO (t->matrix[1][2]) && IS_ZERO (t->matrix[2][0]) && IS_ZERO (t->matrix[2][1])); } PIXMAN_EXPORT pixman_bool_t pixman_transform_is_scale (const struct pixman_transform *t) { return (!IS_ZERO (t->matrix[0][0]) && IS_ZERO (t->matrix[0][1]) && IS_ZERO (t->matrix[0][2]) && IS_ZERO (t->matrix[1][0]) && !IS_ZERO (t->matrix[1][1]) && IS_ZERO (t->matrix[1][2]) && IS_ZERO (t->matrix[2][0]) && IS_ZERO (t->matrix[2][1]) && !IS_ZERO (t->matrix[2][2])); } PIXMAN_EXPORT pixman_bool_t pixman_transform_is_int_translate (const struct pixman_transform *t) { return (IS_ONE (t->matrix[0][0]) && IS_ZERO (t->matrix[0][1]) && IS_INT (t->matrix[0][2]) && IS_ZERO (t->matrix[1][0]) && IS_ONE (t->matrix[1][1]) && IS_INT (t->matrix[1][2]) && IS_ZERO (t->matrix[2][0]) && IS_ZERO (t->matrix[2][1]) && IS_ONE (t->matrix[2][2])); } PIXMAN_EXPORT pixman_bool_t pixman_transform_is_inverse (const struct pixman_transform *a, const struct pixman_transform *b) { struct pixman_transform t; if (!pixman_transform_multiply (&t, a, b)) return FALSE; return pixman_transform_is_identity (&t); } PIXMAN_EXPORT void pixman_f_transform_from_pixman_transform (struct pixman_f_transform * ft, const struct pixman_transform *t) { int i, j; for (j = 0; j < 3; j++) { for (i = 0; i < 3; i++) ft->m[j][i] = pixman_fixed_to_double (t->matrix[j][i]); } } PIXMAN_EXPORT pixman_bool_t pixman_transform_from_pixman_f_transform (struct pixman_transform * t, const struct pixman_f_transform *ft) { int i, j; for (j = 0; j < 3; j++) { for (i = 0; i < 3; i++) { double d = ft->m[j][i]; if (d < -32767.0 || d > 32767.0) return FALSE; d = d * 65536.0 + 0.5; t->matrix[j][i] = (pixman_fixed_t) floor (d); } } return TRUE; } PIXMAN_EXPORT pixman_bool_t pixman_f_transform_invert (struct pixman_f_transform * dst, const struct pixman_f_transform *src) { static const int a[3] = { 2, 2, 1 }; static const int b[3] = { 1, 0, 0 }; pixman_f_transform_t d; double det; int i, j; det = 0; for (i = 0; i < 3; i++) { double p; int ai = a[i]; int bi = b[i]; p = src->m[i][0] * (src->m[ai][2] * src->m[bi][1] - src->m[ai][1] * src->m[bi][2]); if (i == 1) p = -p; det += p; } if (det == 0) return FALSE; det = 1 / det; for (j = 0; j < 3; j++) { for (i = 0; i < 3; i++) { double p; int ai = a[i]; int aj = a[j]; int bi = b[i]; int bj = b[j]; p = (src->m[ai][aj] * src->m[bi][bj] - src->m[ai][bj] * src->m[bi][aj]); if (((i + j) & 1) != 0) p = -p; d.m[j][i] = det * p; } } *dst = d; return TRUE; } PIXMAN_EXPORT pixman_bool_t pixman_f_transform_point (const struct pixman_f_transform *t, struct pixman_f_vector * v) { struct pixman_f_vector result; int i, j; double a; for (j = 0; j < 3; j++) { a = 0; for (i = 0; i < 3; i++) a += t->m[j][i] * v->v[i]; result.v[j] = a; } if (!result.v[2]) return FALSE; for (j = 0; j < 2; j++) v->v[j] = result.v[j] / result.v[2]; v->v[2] = 1; return TRUE; } PIXMAN_EXPORT void pixman_f_transform_point_3d (const struct pixman_f_transform *t, struct pixman_f_vector * v) { struct pixman_f_vector result; int i, j; double a; for (j = 0; j < 3; j++) { a = 0; for (i = 0; i < 3; i++) a += t->m[j][i] * v->v[i]; result.v[j] = a; } *v = result; } PIXMAN_EXPORT void pixman_f_transform_multiply (struct pixman_f_transform * dst, const struct pixman_f_transform *l, const struct pixman_f_transform *r) { struct pixman_f_transform d; int dx, dy; int o; for (dy = 0; dy < 3; dy++) { for (dx = 0; dx < 3; dx++) { double v = 0; for (o = 0; o < 3; o++) v += l->m[dy][o] * r->m[o][dx]; d.m[dy][dx] = v; } } *dst = d; } PIXMAN_EXPORT void pixman_f_transform_init_scale (struct pixman_f_transform *t, double sx, double sy) { t->m[0][0] = sx; t->m[0][1] = 0; t->m[0][2] = 0; t->m[1][0] = 0; t->m[1][1] = sy; t->m[1][2] = 0; t->m[2][0] = 0; t->m[2][1] = 0; t->m[2][2] = 1; } PIXMAN_EXPORT pixman_bool_t pixman_f_transform_scale (struct pixman_f_transform *forward, struct pixman_f_transform *reverse, double sx, double sy) { struct pixman_f_transform t; if (sx == 0 || sy == 0) return FALSE; if (forward) { pixman_f_transform_init_scale (&t, sx, sy); pixman_f_transform_multiply (forward, &t, forward); } if (reverse) { pixman_f_transform_init_scale (&t, 1 / sx, 1 / sy); pixman_f_transform_multiply (reverse, reverse, &t); } return TRUE; } PIXMAN_EXPORT void pixman_f_transform_init_rotate (struct pixman_f_transform *t, double c, double s) { t->m[0][0] = c; t->m[0][1] = -s; t->m[0][2] = 0; t->m[1][0] = s; t->m[1][1] = c; t->m[1][2] = 0; t->m[2][0] = 0; t->m[2][1] = 0; t->m[2][2] = 1; } PIXMAN_EXPORT pixman_bool_t pixman_f_transform_rotate (struct pixman_f_transform *forward, struct pixman_f_transform *reverse, double c, double s) { struct pixman_f_transform t; if (forward) { pixman_f_transform_init_rotate (&t, c, s); pixman_f_transform_multiply (forward, &t, forward); } if (reverse) { pixman_f_transform_init_rotate (&t, c, -s); pixman_f_transform_multiply (reverse, reverse, &t); } return TRUE; } PIXMAN_EXPORT void pixman_f_transform_init_translate (struct pixman_f_transform *t, double tx, double ty) { t->m[0][0] = 1; t->m[0][1] = 0; t->m[0][2] = tx; t->m[1][0] = 0; t->m[1][1] = 1; t->m[1][2] = ty; t->m[2][0] = 0; t->m[2][1] = 0; t->m[2][2] = 1; } PIXMAN_EXPORT pixman_bool_t pixman_f_transform_translate (struct pixman_f_transform *forward, struct pixman_f_transform *reverse, double tx, double ty) { struct pixman_f_transform t; if (forward) { pixman_f_transform_init_translate (&t, tx, ty); pixman_f_transform_multiply (forward, &t, forward); } if (reverse) { pixman_f_transform_init_translate (&t, -tx, -ty); pixman_f_transform_multiply (reverse, reverse, &t); } return TRUE; } PIXMAN_EXPORT pixman_bool_t pixman_f_transform_bounds (const struct pixman_f_transform *t, struct pixman_box16 * b) { struct pixman_f_vector v[4]; int i; int x1, y1, x2, y2; v[0].v[0] = b->x1; v[0].v[1] = b->y1; v[0].v[2] = 1; v[1].v[0] = b->x2; v[1].v[1] = b->y1; v[1].v[2] = 1; v[2].v[0] = b->x2; v[2].v[1] = b->y2; v[2].v[2] = 1; v[3].v[0] = b->x1; v[3].v[1] = b->y2; v[3].v[2] = 1; for (i = 0; i < 4; i++) { if (!pixman_f_transform_point (t, &v[i])) return FALSE; x1 = floor (v[i].v[0]); y1 = floor (v[i].v[1]); x2 = ceil (v[i].v[0]); y2 = ceil (v[i].v[1]); if (i == 0) { b->x1 = x1; b->y1 = y1; b->x2 = x2; b->y2 = y2; } else { if (x1 < b->x1) b->x1 = x1; if (y1 < b->y1) b->y1 = y1; if (x2 > b->x2) b->x2 = x2; if (y2 > b->y2) b->y2 = y2; } } return TRUE; } PIXMAN_EXPORT void pixman_f_transform_init_identity (struct pixman_f_transform *t) { int i, j; for (j = 0; j < 3; j++) { for (i = 0; i < 3; i++) t->m[j][i] = i == j ? 1 : 0; } } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-mips-dspr2-asm.S0000664000175000017500000035363514712446423020625 0ustar00mattst88mattst88/* * Copyright (c) 2012 * MIPS Technologies, Inc., California. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Author: Nemanja Lukic (nemanja.lukic@rt-rk.com) */ #include "pixman-private.h" #include "pixman-mips-dspr2-asm.h" LEAF_MIPS_DSPR2(pixman_fill_buff16_mips) /* * a0 - *dest * a1 - count (bytes) * a2 - value to fill buffer with */ beqz a1, 3f andi t1, a0, 0x0002 beqz t1, 0f /* check if address is 4-byte aligned */ nop sh a2, 0(a0) addiu a0, a0, 2 addiu a1, a1, -2 0: srl t1, a1, 5 /* t1 how many multiples of 32 bytes */ replv.ph a2, a2 /* replicate fill value (16bit) in a2 */ beqz t1, 2f nop 1: addiu t1, t1, -1 beqz t1, 11f addiu a1, a1, -32 pref 30, 32(a0) sw a2, 0(a0) sw a2, 4(a0) sw a2, 8(a0) sw a2, 12(a0) sw a2, 16(a0) sw a2, 20(a0) sw a2, 24(a0) sw a2, 28(a0) b 1b addiu a0, a0, 32 11: sw a2, 0(a0) sw a2, 4(a0) sw a2, 8(a0) sw a2, 12(a0) sw a2, 16(a0) sw a2, 20(a0) sw a2, 24(a0) sw a2, 28(a0) addiu a0, a0, 32 2: blez a1, 3f addiu a1, a1, -2 sh a2, 0(a0) b 2b addiu a0, a0, 2 3: jr ra nop END(pixman_fill_buff16_mips) LEAF_MIPS32R2(pixman_fill_buff32_mips) /* * a0 - *dest * a1 - count (bytes) * a2 - value to fill buffer with */ beqz a1, 3f nop srl t1, a1, 5 /* t1 how many multiples of 32 bytes */ beqz t1, 2f nop 1: addiu t1, t1, -1 beqz t1, 11f addiu a1, a1, -32 pref 30, 32(a0) sw a2, 0(a0) sw a2, 4(a0) sw a2, 8(a0) sw a2, 12(a0) sw a2, 16(a0) sw a2, 20(a0) sw a2, 24(a0) sw a2, 28(a0) b 1b addiu a0, a0, 32 11: sw a2, 0(a0) sw a2, 4(a0) sw a2, 8(a0) sw a2, 12(a0) sw a2, 16(a0) sw a2, 20(a0) sw a2, 24(a0) sw a2, 28(a0) addiu a0, a0, 32 2: blez a1, 3f addiu a1, a1, -4 sw a2, 0(a0) b 2b addiu a0, a0, 4 3: jr ra nop END(pixman_fill_buff32_mips) LEAF_MIPS_DSPR2(pixman_composite_src_8888_0565_asm_mips) /* * a0 - dst (r5g6b5) * a1 - src (a8r8g8b8) * a2 - w */ beqz a2, 3f nop addiu t1, a2, -1 beqz t1, 2f nop li t4, 0xf800f800 li t5, 0x07e007e0 li t6, 0x001f001f 1: lw t0, 0(a1) lw t1, 4(a1) addiu a1, a1, 8 addiu a2, a2, -2 CONVERT_2x8888_TO_2x0565 t0, t1, t2, t3, t4, t5, t6, t7, t8 sh t2, 0(a0) sh t3, 2(a0) addiu t2, a2, -1 bgtz t2, 1b addiu a0, a0, 4 2: beqz a2, 3f nop lw t0, 0(a1) CONVERT_1x8888_TO_1x0565 t0, t1, t2, t3 sh t1, 0(a0) 3: j ra nop END(pixman_composite_src_8888_0565_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_src_0565_8888_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (r5g6b5) * a2 - w */ beqz a2, 3f nop addiu t1, a2, -1 beqz t1, 2f nop li t4, 0x07e007e0 li t5, 0x001F001F 1: lhu t0, 0(a1) lhu t1, 2(a1) addiu a1, a1, 4 addiu a2, a2, -2 CONVERT_2x0565_TO_2x8888 t0, t1, t2, t3, t4, t5, t6, t7, t8, t9 sw t2, 0(a0) sw t3, 4(a0) addiu t2, a2, -1 bgtz t2, 1b addiu a0, a0, 8 2: beqz a2, 3f nop lhu t0, 0(a1) CONVERT_1x0565_TO_1x8888 t0, t1, t2, t3 sw t1, 0(a0) 3: j ra nop END(pixman_composite_src_0565_8888_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_src_x888_8888_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (x8r8g8b8) * a2 - w */ beqz a2, 4f nop li t9, 0xff000000 srl t8, a2, 3 /* t1 = how many multiples of 8 src pixels */ beqz t8, 3f /* branch if less than 8 src pixels */ nop 1: addiu t8, t8, -1 beqz t8, 2f addiu a2, a2, -8 pref 0, 32(a1) lw t0, 0(a1) lw t1, 4(a1) lw t2, 8(a1) lw t3, 12(a1) lw t4, 16(a1) lw t5, 20(a1) lw t6, 24(a1) lw t7, 28(a1) addiu a1, a1, 32 or t0, t0, t9 or t1, t1, t9 or t2, t2, t9 or t3, t3, t9 or t4, t4, t9 or t5, t5, t9 or t6, t6, t9 or t7, t7, t9 pref 30, 32(a0) sw t0, 0(a0) sw t1, 4(a0) sw t2, 8(a0) sw t3, 12(a0) sw t4, 16(a0) sw t5, 20(a0) sw t6, 24(a0) sw t7, 28(a0) b 1b addiu a0, a0, 32 2: lw t0, 0(a1) lw t1, 4(a1) lw t2, 8(a1) lw t3, 12(a1) lw t4, 16(a1) lw t5, 20(a1) lw t6, 24(a1) lw t7, 28(a1) addiu a1, a1, 32 or t0, t0, t9 or t1, t1, t9 or t2, t2, t9 or t3, t3, t9 or t4, t4, t9 or t5, t5, t9 or t6, t6, t9 or t7, t7, t9 sw t0, 0(a0) sw t1, 4(a0) sw t2, 8(a0) sw t3, 12(a0) sw t4, 16(a0) sw t5, 20(a0) sw t6, 24(a0) sw t7, 28(a0) beqz a2, 4f addiu a0, a0, 32 3: lw t0, 0(a1) addiu a1, a1, 4 addiu a2, a2, -1 or t1, t0, t9 sw t1, 0(a0) bnez a2, 3b addiu a0, a0, 4 4: jr ra nop END(pixman_composite_src_x888_8888_asm_mips) #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) LEAF_MIPS_DSPR2(pixman_composite_src_0888_8888_rev_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (b8g8r8) * a2 - w */ beqz a2, 6f nop lui t8, 0xff00; srl t9, a2, 2 /* t9 = how many multiples of 4 src pixels */ beqz t9, 4f /* branch if less than 4 src pixels */ nop li t0, 0x1 li t1, 0x2 li t2, 0x3 andi t3, a1, 0x3 beq t3, t0, 1f nop beq t3, t1, 2f nop beq t3, t2, 3f nop 0: beqz t9, 4f addiu t9, t9, -1 lw t0, 0(a1) /* t0 = R2 | B1 | G1 | R1 */ lw t1, 4(a1) /* t1 = G3 | R3 | B2 | G2 */ lw t2, 8(a1) /* t2 = B4 | G4 | R4 | B3 */ addiu a1, a1, 12 addiu a2, a2, -4 wsbh t0, t0 /* t0 = B1 | R2 | R1 | G1 */ wsbh t1, t1 /* t1 = R3 | G3 | G2 | B2 */ wsbh t2, t2 /* t2 = G4 | B4 | B3 | R4 */ packrl.ph t3, t1, t0 /* t3 = G2 | B2 | B1 | R2 */ packrl.ph t4, t0, t0 /* t4 = R1 | G1 | B1 | R2 */ rotr t3, t3, 16 /* t3 = B1 | R2 | G2 | B2 */ or t3, t3, t8 /* t3 = FF | R2 | G2 | B2 */ srl t4, t4, 8 /* t4 = 0 | R1 | G1 | B1 */ or t4, t4, t8 /* t4 = FF | R1 | G1 | B1 */ packrl.ph t5, t2, t1 /* t5 = B3 | R4 | R3 | G3 */ rotr t5, t5, 24 /* t5 = R4 | R3 | G3 | B3 */ or t5, t5, t8 /* t5 = FF | R3 | G3 | B3 */ rotr t2, t2, 16 /* t2 = B3 | R4 | G4 | B4 */ or t2, t2, t8 /* t5 = FF | R3 | G3 | B3 */ sw t4, 0(a0) sw t3, 4(a0) sw t5, 8(a0) sw t2, 12(a0) b 0b addiu a0, a0, 16 1: lbu t6, 0(a1) /* t6 = 0 | 0 | 0 | R1 */ lhu t7, 1(a1) /* t7 = 0 | 0 | B1 | G1 */ sll t6, t6, 16 /* t6 = 0 | R1 | 0 | 0 */ wsbh t7, t7 /* t7 = 0 | 0 | G1 | B1 */ or t7, t6, t7 /* t7 = 0 | R1 | G1 | B1 */ 11: beqz t9, 4f addiu t9, t9, -1 lw t0, 3(a1) /* t0 = R3 | B2 | G2 | R2 */ lw t1, 7(a1) /* t1 = G4 | R4 | B3 | G3 */ lw t2, 11(a1) /* t2 = B5 | G5 | R5 | B4 */ addiu a1, a1, 12 addiu a2, a2, -4 wsbh t0, t0 /* t0 = B2 | R3 | R2 | G2 */ wsbh t1, t1 /* t1 = R4 | G4 | G3 | B3 */ wsbh t2, t2 /* t2 = G5 | B5 | B4 | R5 */ packrl.ph t3, t1, t0 /* t3 = G3 | B3 | B2 | R3 */ packrl.ph t4, t2, t1 /* t4 = B4 | R5 | R4 | G4 */ rotr t0, t0, 24 /* t0 = R3 | R2 | G2 | B2 */ rotr t3, t3, 16 /* t3 = B2 | R3 | G3 | B3 */ rotr t4, t4, 24 /* t4 = R5 | R4 | G4 | B4 */ or t7, t7, t8 /* t7 = FF | R1 | G1 | B1 */ or t0, t0, t8 /* t0 = FF | R2 | G2 | B2 */ or t3, t3, t8 /* t1 = FF | R3 | G3 | B3 */ or t4, t4, t8 /* t3 = FF | R4 | G4 | B4 */ sw t7, 0(a0) sw t0, 4(a0) sw t3, 8(a0) sw t4, 12(a0) rotr t7, t2, 16 /* t7 = xx | R5 | G5 | B5 */ b 11b addiu a0, a0, 16 2: lhu t7, 0(a1) /* t7 = 0 | 0 | G1 | R1 */ wsbh t7, t7 /* t7 = 0 | 0 | R1 | G1 */ 21: beqz t9, 4f addiu t9, t9, -1 lw t0, 2(a1) /* t0 = B2 | G2 | R2 | B1 */ lw t1, 6(a1) /* t1 = R4 | B3 | G3 | R3 */ lw t2, 10(a1) /* t2 = G5 | R5 | B4 | G4 */ addiu a1, a1, 12 addiu a2, a2, -4 wsbh t0, t0 /* t0 = G2 | B2 | B1 | R2 */ wsbh t1, t1 /* t1 = B3 | R4 | R3 | G3 */ wsbh t2, t2 /* t2 = R5 | G5 | G4 | B4 */ precr_sra.ph.w t7, t0, 0 /* t7 = R1 | G1 | B1 | R2 */ rotr t0, t0, 16 /* t0 = B1 | R2 | G2 | B2 */ packrl.ph t3, t2, t1 /* t3 = G4 | B4 | B3 | R4 */ rotr t1, t1, 24 /* t1 = R4 | R3 | G3 | B3 */ srl t7, t7, 8 /* t7 = 0 | R1 | G1 | B1 */ rotr t3, t3, 16 /* t3 = B3 | R4 | G4 | B4 */ or t7, t7, t8 /* t7 = FF | R1 | G1 | B1 */ or t0, t0, t8 /* t0 = FF | R2 | G2 | B2 */ or t1, t1, t8 /* t1 = FF | R3 | G3 | B3 */ or t3, t3, t8 /* t3 = FF | R4 | G4 | B4 */ sw t7, 0(a0) sw t0, 4(a0) sw t1, 8(a0) sw t3, 12(a0) srl t7, t2, 16 /* t7 = 0 | 0 | R5 | G5 */ b 21b addiu a0, a0, 16 3: lbu t7, 0(a1) /* t7 = 0 | 0 | 0 | R1 */ 31: beqz t9, 4f addiu t9, t9, -1 lw t0, 1(a1) /* t0 = G2 | R2 | B1 | G1 */ lw t1, 5(a1) /* t1 = B3 | G3 | R3 | B2 */ lw t2, 9(a1) /* t2 = R5 | B4 | G4 | R4 */ addiu a1, a1, 12 addiu a2, a2, -4 wsbh t0, t0 /* t0 = R2 | G2 | G1 | B1 */ wsbh t1, t1 /* t1 = G3 | B3 | B2 | R3 */ wsbh t2, t2 /* t2 = B4 | R5 | R4 | G4 */ precr_sra.ph.w t7, t0, 0 /* t7 = xx | R1 | G1 | B1 */ packrl.ph t3, t1, t0 /* t3 = B2 | R3 | R2 | G2 */ rotr t1, t1, 16 /* t1 = B2 | R3 | G3 | B3 */ rotr t4, t2, 24 /* t4 = R5 | R4 | G4 | B4 */ rotr t3, t3, 24 /* t3 = R3 | R2 | G2 | B2 */ or t7, t7, t8 /* t7 = FF | R1 | G1 | B1 */ or t3, t3, t8 /* t3 = FF | R2 | G2 | B2 */ or t1, t1, t8 /* t1 = FF | R3 | G3 | B3 */ or t4, t4, t8 /* t4 = FF | R4 | G4 | B4 */ sw t7, 0(a0) sw t3, 4(a0) sw t1, 8(a0) sw t4, 12(a0) srl t7, t2, 16 /* t7 = 0 | 0 | xx | R5 */ b 31b addiu a0, a0, 16 4: beqz a2, 6f nop 5: lbu t0, 0(a1) /* t0 = 0 | 0 | 0 | R */ lbu t1, 1(a1) /* t1 = 0 | 0 | 0 | G */ lbu t2, 2(a1) /* t2 = 0 | 0 | 0 | B */ addiu a1, a1, 3 sll t0, t0, 16 /* t2 = 0 | R | 0 | 0 */ sll t1, t1, 8 /* t1 = 0 | 0 | G | 0 */ or t2, t2, t1 /* t2 = 0 | 0 | G | B */ or t2, t2, t0 /* t2 = 0 | R | G | B */ or t2, t2, t8 /* t2 = FF | R | G | B */ sw t2, 0(a0) addiu a2, a2, -1 bnez a2, 5b addiu a0, a0, 4 6: j ra nop END(pixman_composite_src_0888_8888_rev_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_src_0888_0565_rev_asm_mips) /* * a0 - dst (r5g6b5) * a1 - src (b8g8r8) * a2 - w */ SAVE_REGS_ON_STACK 0, v0, v1 beqz a2, 6f nop li t6, 0xf800f800 li t7, 0x07e007e0 li t8, 0x001F001F srl t9, a2, 2 /* t9 = how many multiples of 4 src pixels */ beqz t9, 4f /* branch if less than 4 src pixels */ nop li t0, 0x1 li t1, 0x2 li t2, 0x3 andi t3, a1, 0x3 beq t3, t0, 1f nop beq t3, t1, 2f nop beq t3, t2, 3f nop 0: beqz t9, 4f addiu t9, t9, -1 lw t0, 0(a1) /* t0 = R2 | B1 | G1 | R1 */ lw t1, 4(a1) /* t1 = G3 | R3 | B2 | G2 */ lw t2, 8(a1) /* t2 = B4 | G4 | R4 | B3 */ addiu a1, a1, 12 addiu a2, a2, -4 wsbh t0, t0 /* t0 = B1 | R2 | R1 | G1 */ wsbh t1, t1 /* t1 = R3 | G3 | G2 | B2 */ wsbh t2, t2 /* t2 = G4 | B4 | B3 | R4 */ packrl.ph t3, t1, t0 /* t3 = G2 | B2 | B1 | R2 */ packrl.ph t4, t0, t0 /* t4 = R1 | G1 | B1 | R2 */ rotr t3, t3, 16 /* t3 = B1 | R2 | G2 | B2 */ srl t4, t4, 8 /* t4 = 0 | R1 | G1 | B1 */ packrl.ph t5, t2, t1 /* t5 = B3 | R4 | R3 | G3 */ rotr t5, t5, 24 /* t5 = R4 | R3 | G3 | B3 */ rotr t2, t2, 16 /* t2 = B3 | R4 | G4 | B4 */ CONVERT_2x8888_TO_2x0565 t4, t3, t4, t3, t6, t7, t8, v0, v1 CONVERT_2x8888_TO_2x0565 t5, t2, t5, t2, t6, t7, t8, v0, v1 sh t4, 0(a0) sh t3, 2(a0) sh t5, 4(a0) sh t2, 6(a0) b 0b addiu a0, a0, 8 1: lbu t4, 0(a1) /* t4 = 0 | 0 | 0 | R1 */ lhu t5, 1(a1) /* t5 = 0 | 0 | B1 | G1 */ sll t4, t4, 16 /* t4 = 0 | R1 | 0 | 0 */ wsbh t5, t5 /* t5 = 0 | 0 | G1 | B1 */ or t5, t4, t5 /* t5 = 0 | R1 | G1 | B1 */ 11: beqz t9, 4f addiu t9, t9, -1 lw t0, 3(a1) /* t0 = R3 | B2 | G2 | R2 */ lw t1, 7(a1) /* t1 = G4 | R4 | B3 | G3 */ lw t2, 11(a1) /* t2 = B5 | G5 | R5 | B4 */ addiu a1, a1, 12 addiu a2, a2, -4 wsbh t0, t0 /* t0 = B2 | R3 | R2 | G2 */ wsbh t1, t1 /* t1 = R4 | G4 | G3 | B3 */ wsbh t2, t2 /* t2 = G5 | B5 | B4 | R5 */ packrl.ph t3, t1, t0 /* t3 = G3 | B3 | B2 | R3 */ packrl.ph t4, t2, t1 /* t4 = B4 | R5 | R4 | G4 */ rotr t0, t0, 24 /* t0 = R3 | R2 | G2 | B2 */ rotr t3, t3, 16 /* t3 = B2 | R3 | G3 | B3 */ rotr t4, t4, 24 /* t4 = R5 | R4 | G4 | B4 */ CONVERT_2x8888_TO_2x0565 t5, t0, t5, t0, t6, t7, t8, v0, v1 CONVERT_2x8888_TO_2x0565 t3, t4, t3, t4, t6, t7, t8, v0, v1 sh t5, 0(a0) sh t0, 2(a0) sh t3, 4(a0) sh t4, 6(a0) rotr t5, t2, 16 /* t5 = xx | R5 | G5 | B5 */ b 11b addiu a0, a0, 8 2: lhu t5, 0(a1) /* t5 = 0 | 0 | G1 | R1 */ wsbh t5, t5 /* t5 = 0 | 0 | R1 | G1 */ 21: beqz t9, 4f addiu t9, t9, -1 lw t0, 2(a1) /* t0 = B2 | G2 | R2 | B1 */ lw t1, 6(a1) /* t1 = R4 | B3 | G3 | R3 */ lw t2, 10(a1) /* t2 = G5 | R5 | B4 | G4 */ addiu a1, a1, 12 addiu a2, a2, -4 wsbh t0, t0 /* t0 = G2 | B2 | B1 | R2 */ wsbh t1, t1 /* t1 = B3 | R4 | R3 | G3 */ wsbh t2, t2 /* t2 = R5 | G5 | G4 | B4 */ precr_sra.ph.w t5, t0, 0 /* t5 = R1 | G1 | B1 | R2 */ rotr t0, t0, 16 /* t0 = B1 | R2 | G2 | B2 */ packrl.ph t3, t2, t1 /* t3 = G4 | B4 | B3 | R4 */ rotr t1, t1, 24 /* t1 = R4 | R3 | G3 | B3 */ srl t5, t5, 8 /* t5 = 0 | R1 | G1 | B1 */ rotr t3, t3, 16 /* t3 = B3 | R4 | G4 | B4 */ CONVERT_2x8888_TO_2x0565 t5, t0, t5, t0, t6, t7, t8, v0, v1 CONVERT_2x8888_TO_2x0565 t1, t3, t1, t3, t6, t7, t8, v0, v1 sh t5, 0(a0) sh t0, 2(a0) sh t1, 4(a0) sh t3, 6(a0) srl t5, t2, 16 /* t5 = 0 | 0 | R5 | G5 */ b 21b addiu a0, a0, 8 3: lbu t5, 0(a1) /* t5 = 0 | 0 | 0 | R1 */ 31: beqz t9, 4f addiu t9, t9, -1 lw t0, 1(a1) /* t0 = G2 | R2 | B1 | G1 */ lw t1, 5(a1) /* t1 = B3 | G3 | R3 | B2 */ lw t2, 9(a1) /* t2 = R5 | B4 | G4 | R4 */ addiu a1, a1, 12 addiu a2, a2, -4 wsbh t0, t0 /* t0 = R2 | G2 | G1 | B1 */ wsbh t1, t1 /* t1 = G3 | B3 | B2 | R3 */ wsbh t2, t2 /* t2 = B4 | R5 | R4 | G4 */ precr_sra.ph.w t5, t0, 0 /* t5 = xx | R1 | G1 | B1 */ packrl.ph t3, t1, t0 /* t3 = B2 | R3 | R2 | G2 */ rotr t1, t1, 16 /* t1 = B2 | R3 | G3 | B3 */ rotr t4, t2, 24 /* t4 = R5 | R4 | G4 | B4 */ rotr t3, t3, 24 /* t3 = R3 | R2 | G2 | B2 */ CONVERT_2x8888_TO_2x0565 t5, t3, t5, t3, t6, t7, t8, v0, v1 CONVERT_2x8888_TO_2x0565 t1, t4, t1, t4, t6, t7, t8, v0, v1 sh t5, 0(a0) sh t3, 2(a0) sh t1, 4(a0) sh t4, 6(a0) srl t5, t2, 16 /* t5 = 0 | 0 | xx | R5 */ b 31b addiu a0, a0, 8 4: beqz a2, 6f nop 5: lbu t0, 0(a1) /* t0 = 0 | 0 | 0 | R */ lbu t1, 1(a1) /* t1 = 0 | 0 | 0 | G */ lbu t2, 2(a1) /* t2 = 0 | 0 | 0 | B */ addiu a1, a1, 3 sll t0, t0, 16 /* t2 = 0 | R | 0 | 0 */ sll t1, t1, 8 /* t1 = 0 | 0 | G | 0 */ or t2, t2, t1 /* t2 = 0 | 0 | G | B */ or t2, t2, t0 /* t2 = 0 | R | G | B */ CONVERT_1x8888_TO_1x0565 t2, t3, t4, t5 sh t3, 0(a0) addiu a2, a2, -1 bnez a2, 5b addiu a0, a0, 2 6: RESTORE_REGS_FROM_STACK 0, v0, v1 j ra nop END(pixman_composite_src_0888_0565_rev_asm_mips) #endif LEAF_MIPS_DSPR2(pixman_composite_src_pixbuf_8888_asm_mips) /* * a0 - dst (a8b8g8r8) * a1 - src (a8r8g8b8) * a2 - w */ SAVE_REGS_ON_STACK 0, v0 li v0, 0x00ff00ff beqz a2, 3f nop addiu t1, a2, -1 beqz t1, 2f nop 1: lw t0, 0(a1) lw t1, 4(a1) addiu a1, a1, 8 addiu a2, a2, -2 srl t2, t0, 24 srl t3, t1, 24 MIPS_2xUN8x4_MUL_2xUN8 t0, t1, t2, t3, t0, t1, v0, t4, t5, t6, t7, t8, t9 sll t0, t0, 8 sll t1, t1, 8 andi t2, t2, 0xff andi t3, t3, 0xff or t0, t0, t2 or t1, t1, t3 wsbh t0, t0 wsbh t1, t1 rotr t0, t0, 16 rotr t1, t1, 16 sw t0, 0(a0) sw t1, 4(a0) addiu t2, a2, -1 bgtz t2, 1b addiu a0, a0, 8 2: beqz a2, 3f nop lw t0, 0(a1) srl t1, t0, 24 MIPS_UN8x4_MUL_UN8 t0, t1, t0, v0, t3, t4, t5 sll t0, t0, 8 andi t1, t1, 0xff or t0, t0, t1 wsbh t0, t0 rotr t0, t0, 16 sw t0, 0(a0) 3: RESTORE_REGS_FROM_STACK 0, v0 j ra nop END(pixman_composite_src_pixbuf_8888_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_src_rpixbuf_8888_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (a8r8g8b8) * a2 - w */ SAVE_REGS_ON_STACK 0, v0 li v0, 0x00ff00ff beqz a2, 3f nop addiu t1, a2, -1 beqz t1, 2f nop 1: lw t0, 0(a1) lw t1, 4(a1) addiu a1, a1, 8 addiu a2, a2, -2 srl t2, t0, 24 srl t3, t1, 24 MIPS_2xUN8x4_MUL_2xUN8 t0, t1, t2, t3, t0, t1, v0, t4, t5, t6, t7, t8, t9 sll t0, t0, 8 sll t1, t1, 8 andi t2, t2, 0xff andi t3, t3, 0xff or t0, t0, t2 or t1, t1, t3 rotr t0, t0, 8 rotr t1, t1, 8 sw t0, 0(a0) sw t1, 4(a0) addiu t2, a2, -1 bgtz t2, 1b addiu a0, a0, 8 2: beqz a2, 3f nop lw t0, 0(a1) srl t1, t0, 24 MIPS_UN8x4_MUL_UN8 t0, t1, t0, v0, t3, t4, t5 sll t0, t0, 8 andi t1, t1, 0xff or t0, t0, t1 rotr t0, t0, 8 sw t0, 0(a0) 3: RESTORE_REGS_FROM_STACK 0, v0 j ra nop END(pixman_composite_src_rpixbuf_8888_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_src_n_8_8888_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (32bit constant) * a2 - mask (a8) * a3 - w */ SAVE_REGS_ON_STACK 0, v0 li v0, 0x00ff00ff beqz a3, 3f nop addiu t1, a3, -1 beqz t1, 2f nop 1: /* a1 = source (32bit constant) */ lbu t0, 0(a2) /* t2 = mask (a8) */ lbu t1, 1(a2) /* t3 = mask (a8) */ addiu a2, a2, 2 MIPS_2xUN8x4_MUL_2xUN8 a1, a1, t0, t1, t2, t3, v0, t4, t5, t6, t7, t8, t9 sw t2, 0(a0) sw t3, 4(a0) addiu a3, a3, -2 addiu t2, a3, -1 bgtz t2, 1b addiu a0, a0, 8 beqz a3, 3f nop 2: lbu t0, 0(a2) addiu a2, a2, 1 MIPS_UN8x4_MUL_UN8 a1, t0, t1, v0, t3, t4, t5 sw t1, 0(a0) addiu a3, a3, -1 addiu a0, a0, 4 3: RESTORE_REGS_FROM_STACK 0, v0 j ra nop END(pixman_composite_src_n_8_8888_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_src_n_8_8_asm_mips) /* * a0 - dst (a8) * a1 - src (32bit constant) * a2 - mask (a8) * a3 - w */ li t9, 0x00ff00ff beqz a3, 3f nop srl t7, a3, 2 /* t7 = how many multiples of 4 dst pixels */ beqz t7, 1f /* branch if less than 4 src pixels */ nop srl t8, a1, 24 replv.ph t8, t8 0: beqz t7, 1f addiu t7, t7, -1 lbu t0, 0(a2) lbu t1, 1(a2) lbu t2, 2(a2) lbu t3, 3(a2) addiu a2, a2, 4 precr_sra.ph.w t1, t0, 0 precr_sra.ph.w t3, t2, 0 precr.qb.ph t0, t3, t1 muleu_s.ph.qbl t2, t0, t8 muleu_s.ph.qbr t3, t0, t8 shra_r.ph t4, t2, 8 shra_r.ph t5, t3, 8 and t4, t4, t9 and t5, t5, t9 addq.ph t2, t2, t4 addq.ph t3, t3, t5 shra_r.ph t2, t2, 8 shra_r.ph t3, t3, 8 precr.qb.ph t2, t2, t3 sb t2, 0(a0) srl t2, t2, 8 sb t2, 1(a0) srl t2, t2, 8 sb t2, 2(a0) srl t2, t2, 8 sb t2, 3(a0) addiu a3, a3, -4 b 0b addiu a0, a0, 4 1: beqz a3, 3f nop srl t8, a1, 24 2: lbu t0, 0(a2) addiu a2, a2, 1 mul t2, t0, t8 shra_r.ph t3, t2, 8 andi t3, t3, 0x00ff addq.ph t2, t2, t3 shra_r.ph t2, t2, 8 sb t2, 0(a0) addiu a3, a3, -1 bnez a3, 2b addiu a0, a0, 1 3: j ra nop END(pixman_composite_src_n_8_8_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_over_n_8888_8888_ca_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (32bit constant) * a2 - mask (a8r8g8b8) * a3 - w */ beqz a3, 8f nop SAVE_REGS_ON_STACK 8, s0, s1, s2, s3, s4, s5 li t6, 0xff addiu t7, zero, -1 /* t7 = 0xffffffff */ srl t8, a1, 24 /* t8 = srca */ li t9, 0x00ff00ff addiu t1, a3, -1 beqz t1, 4f /* last pixel */ nop 0: lw t0, 0(a2) /* t0 = mask */ lw t1, 4(a2) /* t1 = mask */ addiu a3, a3, -2 /* w = w - 2 */ or t2, t0, t1 beqz t2, 3f /* if (t0 == 0) && (t1 == 0) */ addiu a2, a2, 8 and t2, t0, t1 beq t2, t7, 1f /* if (t0 == 0xffffffff) && (t1 == 0xffffffff) */ nop //if(ma) lw t2, 0(a0) /* t2 = dst */ lw t3, 4(a0) /* t3 = dst */ MIPS_2xUN8x4_MUL_2xUN8x4 a1, a1, t0, t1, t4, t5, t9, s0, s1, s2, s3, s4, s5 MIPS_2xUN8x4_MUL_2xUN8 t0, t1, t8, t8, t0, t1, t9, s0, s1, s2, s3, s4, s5 not t0, t0 not t1, t1 MIPS_2xUN8x4_MUL_2xUN8x4 t2, t3, t0, t1, t2, t3, t9, s0, s1, s2, s3, s4, s5 addu_s.qb t2, t4, t2 addu_s.qb t3, t5, t3 sw t2, 0(a0) sw t3, 4(a0) addiu t1, a3, -1 bgtz t1, 0b addiu a0, a0, 8 b 4f nop 1: //if (t0 == 0xffffffff) && (t1 == 0xffffffff): beq t8, t6, 2f /* if (srca == 0xff) */ nop lw t2, 0(a0) /* t2 = dst */ lw t3, 4(a0) /* t3 = dst */ not t0, a1 not t1, a1 srl t0, t0, 24 srl t1, t1, 24 MIPS_2xUN8x4_MUL_2xUN8 t2, t3, t0, t1, t2, t3, t9, s0, s1, s2, s3, s4, s5 addu_s.qb t2, a1, t2 addu_s.qb t3, a1, t3 sw t2, 0(a0) sw t3, 4(a0) addiu t1, a3, -1 bgtz t1, 0b addiu a0, a0, 8 b 4f nop 2: sw a1, 0(a0) sw a1, 4(a0) 3: addiu t1, a3, -1 bgtz t1, 0b addiu a0, a0, 8 4: beqz a3, 7f nop /* a1 = src */ lw t0, 0(a2) /* t0 = mask */ beqz t0, 7f /* if (t0 == 0) */ nop beq t0, t7, 5f /* if (t0 == 0xffffffff) */ nop //if(ma) lw t1, 0(a0) /* t1 = dst */ MIPS_UN8x4_MUL_UN8x4 a1, t0, t2, t9, t3, t4, t5, s0 MIPS_UN8x4_MUL_UN8 t0, t8, t0, t9, t3, t4, t5 not t0, t0 MIPS_UN8x4_MUL_UN8x4 t1, t0, t1, t9, t3, t4, t5, s0 addu_s.qb t1, t2, t1 sw t1, 0(a0) RESTORE_REGS_FROM_STACK 8, s0, s1, s2, s3, s4, s5 j ra nop 5: //if (t0 == 0xffffffff) beq t8, t6, 6f /* if (srca == 0xff) */ nop lw t1, 0(a0) /* t1 = dst */ not t0, a1 srl t0, t0, 24 MIPS_UN8x4_MUL_UN8 t1, t0, t1, t9, t2, t3, t4 addu_s.qb t1, a1, t1 sw t1, 0(a0) RESTORE_REGS_FROM_STACK 8, s0, s1, s2, s3, s4, s5 j ra nop 6: sw a1, 0(a0) 7: RESTORE_REGS_FROM_STACK 8, s0, s1, s2, s3, s4, s5 8: j ra nop END(pixman_composite_over_n_8888_8888_ca_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_over_n_8888_0565_ca_asm_mips) /* * a0 - dst (r5g6b5) * a1 - src (32bit constant) * a2 - mask (a8r8g8b8) * a3 - w */ beqz a3, 8f nop SAVE_REGS_ON_STACK 20, s0, s1, s2, s3, s4, s5, s6, s7, s8 li t6, 0xff addiu t7, zero, -1 /* t7 = 0xffffffff */ srl t8, a1, 24 /* t8 = srca */ li t9, 0x00ff00ff li s6, 0xf800f800 li s7, 0x07e007e0 li s8, 0x001F001F addiu t1, a3, -1 beqz t1, 4f /* last pixel */ nop 0: lw t0, 0(a2) /* t0 = mask */ lw t1, 4(a2) /* t1 = mask */ addiu a3, a3, -2 /* w = w - 2 */ or t2, t0, t1 beqz t2, 3f /* if (t0 == 0) && (t1 == 0) */ addiu a2, a2, 8 and t2, t0, t1 beq t2, t7, 1f /* if (t0 == 0xffffffff) && (t1 == 0xffffffff) */ nop //if(ma) lhu t2, 0(a0) /* t2 = dst */ lhu t3, 2(a0) /* t3 = dst */ MIPS_2xUN8x4_MUL_2xUN8x4 a1, a1, t0, t1, t4, t5, t9, s0, s1, s2, s3, s4, s5 MIPS_2xUN8x4_MUL_2xUN8 t0, t1, t8, t8, t0, t1, t9, s0, s1, s2, s3, s4, s5 not t0, t0 not t1, t1 CONVERT_2x0565_TO_2x8888 t2, t3, t2, t3, s7, s8, s0, s1, s2, s3 MIPS_2xUN8x4_MUL_2xUN8x4 t2, t3, t0, t1, t2, t3, t9, s0, s1, s2, s3, s4, s5 addu_s.qb t2, t4, t2 addu_s.qb t3, t5, t3 CONVERT_2x8888_TO_2x0565 t2, t3, t2, t3, s6, s7, s8, s0, s1 sh t2, 0(a0) sh t3, 2(a0) addiu t1, a3, -1 bgtz t1, 0b addiu a0, a0, 4 b 4f nop 1: //if (t0 == 0xffffffff) && (t1 == 0xffffffff): beq t8, t6, 2f /* if (srca == 0xff) */ nop lhu t2, 0(a0) /* t2 = dst */ lhu t3, 2(a0) /* t3 = dst */ not t0, a1 not t1, a1 srl t0, t0, 24 srl t1, t1, 24 CONVERT_2x0565_TO_2x8888 t2, t3, t2, t3, s7, s8, s0, s1, s2, s3 MIPS_2xUN8x4_MUL_2xUN8 t2, t3, t0, t1, t2, t3, t9, s0, s1, s2, s3, s4, s5 addu_s.qb t2, a1, t2 addu_s.qb t3, a1, t3 CONVERT_2x8888_TO_2x0565 t2, t3, t2, t3, s6, s7, s8, s0, s1 sh t2, 0(a0) sh t3, 2(a0) addiu t1, a3, -1 bgtz t1, 0b addiu a0, a0, 4 b 4f nop 2: CONVERT_1x8888_TO_1x0565 a1, t2, s0, s1 sh t2, 0(a0) sh t2, 2(a0) 3: addiu t1, a3, -1 bgtz t1, 0b addiu a0, a0, 4 4: beqz a3, 7f nop /* a1 = src */ lw t0, 0(a2) /* t0 = mask */ beqz t0, 7f /* if (t0 == 0) */ nop beq t0, t7, 5f /* if (t0 == 0xffffffff) */ nop //if(ma) lhu t1, 0(a0) /* t1 = dst */ MIPS_UN8x4_MUL_UN8x4 a1, t0, t2, t9, t3, t4, t5, s0 MIPS_UN8x4_MUL_UN8 t0, t8, t0, t9, t3, t4, t5 not t0, t0 CONVERT_1x0565_TO_1x8888 t1, s1, s2, s3 MIPS_UN8x4_MUL_UN8x4 s1, t0, s1, t9, t3, t4, t5, s0 addu_s.qb s1, t2, s1 CONVERT_1x8888_TO_1x0565 s1, t1, s0, s2 sh t1, 0(a0) RESTORE_REGS_FROM_STACK 20, s0, s1, s2, s3, s4, s5, s6, s7, s8 j ra nop 5: //if (t0 == 0xffffffff) beq t8, t6, 6f /* if (srca == 0xff) */ nop lhu t1, 0(a0) /* t1 = dst */ not t0, a1 srl t0, t0, 24 CONVERT_1x0565_TO_1x8888 t1, s1, s2, s3 MIPS_UN8x4_MUL_UN8 s1, t0, s1, t9, t2, t3, t4 addu_s.qb s1, a1, s1 CONVERT_1x8888_TO_1x0565 s1, t1, s0, s2 sh t1, 0(a0) RESTORE_REGS_FROM_STACK 20, s0, s1, s2, s3, s4, s5, s6, s7, s8 j ra nop 6: CONVERT_1x8888_TO_1x0565 a1, t1, s0, s2 sh t1, 0(a0) 7: RESTORE_REGS_FROM_STACK 20, s0, s1, s2, s3, s4, s5, s6, s7, s8 8: j ra nop END(pixman_composite_over_n_8888_0565_ca_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_over_n_8_8_asm_mips) /* * a0 - dst (a8) * a1 - src (32bit constant) * a2 - mask (a8) * a3 - w */ SAVE_REGS_ON_STACK 0, v0 li t9, 0x00ff00ff beqz a3, 3f nop srl v0, a3, 2 /* v0 = how many multiples of 4 dst pixels */ beqz v0, 1f /* branch if less than 4 src pixels */ nop srl t8, a1, 24 replv.ph t8, t8 0: beqz v0, 1f addiu v0, v0, -1 lbu t0, 0(a2) lbu t1, 1(a2) lbu t2, 2(a2) lbu t3, 3(a2) lbu t4, 0(a0) lbu t5, 1(a0) lbu t6, 2(a0) lbu t7, 3(a0) addiu a2, a2, 4 precr_sra.ph.w t1, t0, 0 precr_sra.ph.w t3, t2, 0 precr_sra.ph.w t5, t4, 0 precr_sra.ph.w t7, t6, 0 precr.qb.ph t0, t3, t1 precr.qb.ph t1, t7, t5 muleu_s.ph.qbl t2, t0, t8 muleu_s.ph.qbr t3, t0, t8 shra_r.ph t4, t2, 8 shra_r.ph t5, t3, 8 and t4, t4, t9 and t5, t5, t9 addq.ph t2, t2, t4 addq.ph t3, t3, t5 shra_r.ph t2, t2, 8 shra_r.ph t3, t3, 8 precr.qb.ph t0, t2, t3 not t6, t0 preceu.ph.qbl t7, t6 preceu.ph.qbr t6, t6 muleu_s.ph.qbl t2, t1, t7 muleu_s.ph.qbr t3, t1, t6 shra_r.ph t4, t2, 8 shra_r.ph t5, t3, 8 and t4, t4, t9 and t5, t5, t9 addq.ph t2, t2, t4 addq.ph t3, t3, t5 shra_r.ph t2, t2, 8 shra_r.ph t3, t3, 8 precr.qb.ph t1, t2, t3 addu_s.qb t2, t0, t1 sb t2, 0(a0) srl t2, t2, 8 sb t2, 1(a0) srl t2, t2, 8 sb t2, 2(a0) srl t2, t2, 8 sb t2, 3(a0) addiu a3, a3, -4 b 0b addiu a0, a0, 4 1: beqz a3, 3f nop srl t8, a1, 24 2: lbu t0, 0(a2) lbu t1, 0(a0) addiu a2, a2, 1 mul t2, t0, t8 shra_r.ph t3, t2, 8 andi t3, t3, 0x00ff addq.ph t2, t2, t3 shra_r.ph t2, t2, 8 not t3, t2 andi t3, t3, 0x00ff mul t4, t1, t3 shra_r.ph t5, t4, 8 andi t5, t5, 0x00ff addq.ph t4, t4, t5 shra_r.ph t4, t4, 8 andi t4, t4, 0x00ff addu_s.qb t2, t2, t4 sb t2, 0(a0) addiu a3, a3, -1 bnez a3, 2b addiu a0, a0, 1 3: RESTORE_REGS_FROM_STACK 0, v0 j ra nop END(pixman_composite_over_n_8_8_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_over_n_8_8888_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (32bit constant) * a2 - mask (a8) * a3 - w */ SAVE_REGS_ON_STACK 4, s0, s1, s2, s3, s4 beqz a3, 4f nop li t4, 0x00ff00ff li t5, 0xff addiu t0, a3, -1 beqz t0, 3f /* last pixel */ srl t6, a1, 24 /* t6 = srca */ not s4, a1 beq t5, t6, 2f /* if (srca == 0xff) */ srl s4, s4, 24 1: /* a1 = src */ lbu t0, 0(a2) /* t0 = mask */ lbu t1, 1(a2) /* t1 = mask */ or t2, t0, t1 beqz t2, 111f /* if (t0 == 0) && (t1 == 0) */ addiu a2, a2, 2 and t3, t0, t1 lw t2, 0(a0) /* t2 = dst */ beq t3, t5, 11f /* if (t0 == 0xff) && (t1 == 0xff) */ lw t3, 4(a0) /* t3 = dst */ MIPS_2xUN8x4_MUL_2xUN8 a1, a1, t0, t1, s0, s1, t4, t6, t7, t8, t9, s2, s3 not s2, s0 not s3, s1 srl s2, s2, 24 srl s3, s3, 24 MIPS_2xUN8x4_MUL_2xUN8 t2, t3, s2, s3, t2, t3, t4, t0, t1, t6, t7, t8, t9 addu_s.qb s2, t2, s0 addu_s.qb s3, t3, s1 sw s2, 0(a0) b 111f sw s3, 4(a0) 11: MIPS_2xUN8x4_MUL_2xUN8 t2, t3, s4, s4, t2, t3, t4, t0, t1, t6, t7, t8, t9 addu_s.qb s2, t2, a1 addu_s.qb s3, t3, a1 sw s2, 0(a0) sw s3, 4(a0) 111: addiu a3, a3, -2 addiu t0, a3, -1 bgtz t0, 1b addiu a0, a0, 8 b 3f nop 2: /* a1 = src */ lbu t0, 0(a2) /* t0 = mask */ lbu t1, 1(a2) /* t1 = mask */ or t2, t0, t1 beqz t2, 222f /* if (t0 == 0) && (t1 == 0) */ addiu a2, a2, 2 and t3, t0, t1 beq t3, t5, 22f /* if (t0 == 0xff) && (t1 == 0xff) */ nop lw t2, 0(a0) /* t2 = dst */ lw t3, 4(a0) /* t3 = dst */ OVER_2x8888_2x8_2x8888 a1, a1, t0, t1, t2, t3, \ t6, t7, t4, t8, t9, s0, s1, s2, s3 sw t6, 0(a0) b 222f sw t7, 4(a0) 22: sw a1, 0(a0) sw a1, 4(a0) 222: addiu a3, a3, -2 addiu t0, a3, -1 bgtz t0, 2b addiu a0, a0, 8 3: blez a3, 4f nop /* a1 = src */ lbu t0, 0(a2) /* t0 = mask */ beqz t0, 4f /* if (t0 == 0) */ addiu a2, a2, 1 move t3, a1 beq t0, t5, 31f /* if (t0 == 0xff) */ lw t1, 0(a0) /* t1 = dst */ MIPS_UN8x4_MUL_UN8 a1, t0, t3, t4, t6, t7, t8 31: not t2, t3 srl t2, t2, 24 MIPS_UN8x4_MUL_UN8 t1, t2, t1, t4, t6, t7, t8 addu_s.qb t2, t1, t3 sw t2, 0(a0) 4: RESTORE_REGS_FROM_STACK 4, s0, s1, s2, s3, s4 j ra nop END(pixman_composite_over_n_8_8888_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_over_n_8_0565_asm_mips) /* * a0 - dst (r5g6b5) * a1 - src (32bit constant) * a2 - mask (a8) * a3 - w */ SAVE_REGS_ON_STACK 24, v0, s0, s1, s2, s3, s4, s5, s6, s7, s8 beqz a3, 4f nop li t4, 0x00ff00ff li t5, 0xff li t6, 0xf800f800 li t7, 0x07e007e0 li t8, 0x001F001F addiu t1, a3, -1 beqz t1, 3f /* last pixel */ srl t0, a1, 24 /* t0 = srca */ not v0, a1 beq t0, t5, 2f /* if (srca == 0xff) */ srl v0, v0, 24 1: /* a1 = src */ lbu t0, 0(a2) /* t0 = mask */ lbu t1, 1(a2) /* t1 = mask */ or t2, t0, t1 beqz t2, 111f /* if (t0 == 0) && (t1 == 0) */ addiu a2, a2, 2 lhu t2, 0(a0) /* t2 = dst */ lhu t3, 2(a0) /* t3 = dst */ CONVERT_2x0565_TO_2x8888 t2, t3, s0, s1, t7, t8, t9, s2, s3, s4 and t9, t0, t1 beq t9, t5, 11f /* if (t0 == 0xff) && (t1 == 0xff) */ nop MIPS_2xUN8x4_MUL_2xUN8 a1, a1, t0, t1, s2, s3, t4, t9, s4, s5, s6, s7, s8 not s4, s2 not s5, s3 srl s4, s4, 24 srl s5, s5, 24 MIPS_2xUN8x4_MUL_2xUN8 s0, s1, s4, s5, s0, s1, t4, t9, t0, t1, s6, s7, s8 addu_s.qb s4, s2, s0 addu_s.qb s5, s3, s1 CONVERT_2x8888_TO_2x0565 s4, s5, t2, t3, t6, t7, t8, s0, s1 sh t2, 0(a0) b 111f sh t3, 2(a0) 11: MIPS_2xUN8x4_MUL_2xUN8 s0, s1, v0, v0, s0, s1, t4, t9, t0, t1, s6, s7, s8 addu_s.qb s4, a1, s0 addu_s.qb s5, a1, s1 CONVERT_2x8888_TO_2x0565 s4, s5, t2, t3, t6, t7, t8, s0, s1 sh t2, 0(a0) sh t3, 2(a0) 111: addiu a3, a3, -2 addiu t0, a3, -1 bgtz t0, 1b addiu a0, a0, 4 b 3f nop 2: CONVERT_1x8888_TO_1x0565 a1, s0, s1, s2 21: /* a1 = src */ lbu t0, 0(a2) /* t0 = mask */ lbu t1, 1(a2) /* t1 = mask */ or t2, t0, t1 beqz t2, 222f /* if (t0 == 0) && (t1 == 0) */ addiu a2, a2, 2 and t9, t0, t1 move s2, s0 beq t9, t5, 22f /* if (t0 == 0xff) && (t2 == 0xff) */ move s3, s0 lhu t2, 0(a0) /* t2 = dst */ lhu t3, 2(a0) /* t3 = dst */ CONVERT_2x0565_TO_2x8888 t2, t3, s2, s3, t7, t8, s4, s5, s6, s7 OVER_2x8888_2x8_2x8888 a1, a1, t0, t1, s2, s3, \ t2, t3, t4, t9, s4, s5, s6, s7, s8 CONVERT_2x8888_TO_2x0565 t2, t3, s2, s3, t6, t7, t8, s4, s5 22: sh s2, 0(a0) sh s3, 2(a0) 222: addiu a3, a3, -2 addiu t0, a3, -1 bgtz t0, 21b addiu a0, a0, 4 3: blez a3, 4f nop /* a1 = src */ lbu t0, 0(a2) /* t0 = mask */ beqz t0, 4f /* if (t0 == 0) */ nop lhu t1, 0(a0) /* t1 = dst */ CONVERT_1x0565_TO_1x8888 t1, t2, t3, t7 beq t0, t5, 31f /* if (t0 == 0xff) */ move t3, a1 MIPS_UN8x4_MUL_UN8 a1, t0, t3, t4, t7, t8, t9 31: not t6, t3 srl t6, t6, 24 MIPS_UN8x4_MUL_UN8 t2, t6, t2, t4, t7, t8, t9 addu_s.qb t1, t2, t3 CONVERT_1x8888_TO_1x0565 t1, t2, t3, t7 sh t2, 0(a0) 4: RESTORE_REGS_FROM_STACK 24, v0, s0, s1, s2, s3, s4, s5, s6, s7, s8 j ra nop END(pixman_composite_over_n_8_0565_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_over_8888_n_8888_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (a8r8g8b8) * a2 - mask (32bit constant) * a3 - w */ SAVE_REGS_ON_STACK 0, s0 li t4, 0x00ff00ff beqz a3, 3f nop addiu t1, a3, -1 srl a2, a2, 24 beqz t1, 2f nop 1: lw t0, 0(a1) /* t0 = source (a8r8g8b8) */ lw t1, 4(a1) /* t1 = source (a8r8g8b8) */ /* a2 = mask (32bit constant) */ lw t2, 0(a0) /* t2 = destination (a8r8g8b8) */ lw t3, 4(a0) /* t3 = destination (a8r8g8b8) */ addiu a1, a1, 8 OVER_2x8888_2x8_2x8888 t0, t1, a2, a2, t2, t3, \ t5, t6, t4, t7, t8, t9, t0, t1, s0 sw t5, 0(a0) sw t6, 4(a0) addiu a3, a3, -2 addiu t1, a3, -1 bgtz t1, 1b addiu a0, a0, 8 2: beqz a3, 3f nop lw t0, 0(a1) /* t0 = source (a8r8g8b8) */ /* a2 = mask (32bit constant) */ lw t1, 0(a0) /* t1 = destination (a8r8g8b8) */ OVER_8888_8_8888 t0, a2, t1, t3, t4, t5, t6, t7, t8 sw t3, 0(a0) 3: RESTORE_REGS_FROM_STACK 0, s0 j ra nop END(pixman_composite_over_8888_n_8888_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_over_8888_n_0565_asm_mips) /* * a0 - dst (r5g6b5) * a1 - src (a8r8g8b8) * a2 - mask (32bit constant) * a3 - w */ SAVE_REGS_ON_STACK 0, s0, s1, s2, s3 li t6, 0x00ff00ff li t7, 0xf800f800 li t8, 0x07e007e0 li t9, 0x001F001F beqz a3, 3f nop srl a2, a2, 24 addiu t1, a3, -1 beqz t1, 2f nop 1: lw t0, 0(a1) /* t0 = source (a8r8g8b8) */ lw t1, 4(a1) /* t1 = source (a8r8g8b8) */ /* a2 = mask (32bit constant) */ lhu t2, 0(a0) /* t2 = destination (r5g6b5) */ lhu t3, 2(a0) /* t2 = destination (r5g6b5) */ addiu a1, a1, 8 CONVERT_2x0565_TO_2x8888 t2, t3, t4, t5, t8, t9, s0, s1, t2, t3 OVER_2x8888_2x8_2x8888 t0, t1, a2, a2, t4, t5, \ t2, t3, t6, t0, t1, s0, s1, s2, s3 CONVERT_2x8888_TO_2x0565 t2, t3, t4, t5, t7, t8, t9, s0, s1 sh t4, 0(a0) sh t5, 2(a0) addiu a3, a3, -2 addiu t1, a3, -1 bgtz t1, 1b addiu a0, a0, 4 2: beqz a3, 3f nop lw t0, 0(a1) /* t0 = source (a8r8g8b8) */ /* a2 = mask (32bit constant) */ lhu t1, 0(a0) /* t1 = destination (r5g6b5) */ CONVERT_1x0565_TO_1x8888 t1, t2, t4, t5 OVER_8888_8_8888 t0, a2, t2, t1, t6, t3, t4, t5, t7 CONVERT_1x8888_TO_1x0565 t1, t3, t4, t5 sh t3, 0(a0) 3: RESTORE_REGS_FROM_STACK 0, s0, s1, s2, s3 j ra nop END(pixman_composite_over_8888_n_0565_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_over_0565_n_0565_asm_mips) /* * a0 - dst (r5g6b5) * a1 - src (r5g6b5) * a2 - mask (32bit constant) * a3 - w */ SAVE_REGS_ON_STACK 20, s0, s1, s2, s3, s4, s5 li t6, 0x00ff00ff li t7, 0xf800f800 li t8, 0x07e007e0 li t9, 0x001F001F beqz a3, 3f nop srl a2, a2, 24 addiu t1, a3, -1 beqz t1, 2f nop 1: lhu t0, 0(a1) /* t0 = source (r5g6b5) */ lhu t1, 2(a1) /* t1 = source (r5g6b5) */ /* a2 = mask (32bit constant) */ lhu t2, 0(a0) /* t2 = destination (r5g6b5) */ lhu t3, 2(a0) /* t3 = destination (r5g6b5) */ addiu a1, a1, 4 CONVERT_2x0565_TO_2x8888 t0, t1, t4, t5, t8, t9, s0, s1, s2, s3 CONVERT_2x0565_TO_2x8888 t2, t3, s0, s1, t8, t9, s2, s3, s4, s5 OVER_2x8888_2x8_2x8888 t4, t5, a2, a2, s0, s1, \ t0, t1, t6, s2, s3, s4, s5, t4, t5 CONVERT_2x8888_TO_2x0565 t0, t1, s0, s1, t7, t8, t9, s2, s3 sh s0, 0(a0) sh s1, 2(a0) addiu a3, a3, -2 addiu t1, a3, -1 bgtz t1, 1b addiu a0, a0, 4 2: beqz a3, 3f nop lhu t0, 0(a1) /* t0 = source (r5g6b5) */ /* a2 = mask (32bit constant) */ lhu t1, 0(a0) /* t1 = destination (r5g6b5) */ CONVERT_1x0565_TO_1x8888 t0, t2, t4, t5 CONVERT_1x0565_TO_1x8888 t1, t3, t4, t5 OVER_8888_8_8888 t2, a2, t3, t0, t6, t1, t4, t5, t7 CONVERT_1x8888_TO_1x0565 t0, t3, t4, t5 sh t3, 0(a0) 3: RESTORE_REGS_FROM_STACK 20, s0, s1, s2, s3, s4, s5 j ra nop END(pixman_composite_over_0565_n_0565_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_over_8888_8_8888_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (a8r8g8b8) * a2 - mask (a8) * a3 - w */ SAVE_REGS_ON_STACK 0, s0, s1 li t4, 0x00ff00ff beqz a3, 3f nop addiu t1, a3, -1 beqz t1, 2f nop 1: lw t0, 0(a1) /* t0 = source (a8r8g8b8) */ lw t1, 4(a1) /* t1 = source (a8r8g8b8) */ lbu t2, 0(a2) /* t2 = mask (a8) */ lbu t3, 1(a2) /* t3 = mask (a8) */ lw t5, 0(a0) /* t5 = destination (a8r8g8b8) */ lw t6, 4(a0) /* t6 = destination (a8r8g8b8) */ addiu a1, a1, 8 addiu a2, a2, 2 OVER_2x8888_2x8_2x8888 t0, t1, t2, t3, t5, t6, \ t7, t8, t4, t9, s0, s1, t0, t1, t2 sw t7, 0(a0) sw t8, 4(a0) addiu a3, a3, -2 addiu t1, a3, -1 bgtz t1, 1b addiu a0, a0, 8 2: beqz a3, 3f nop lw t0, 0(a1) /* t0 = source (a8r8g8b8) */ lbu t1, 0(a2) /* t1 = mask (a8) */ lw t2, 0(a0) /* t2 = destination (a8r8g8b8) */ OVER_8888_8_8888 t0, t1, t2, t3, t4, t5, t6, t7, t8 sw t3, 0(a0) 3: RESTORE_REGS_FROM_STACK 0, s0, s1 j ra nop END(pixman_composite_over_8888_8_8888_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_over_8888_8_0565_asm_mips) /* * a0 - dst (r5g6b5) * a1 - src (a8r8g8b8) * a2 - mask (a8) * a3 - w */ SAVE_REGS_ON_STACK 20, s0, s1, s2, s3, s4, s5 li t6, 0x00ff00ff li t7, 0xf800f800 li t8, 0x07e007e0 li t9, 0x001F001F beqz a3, 3f nop addiu t1, a3, -1 beqz t1, 2f nop 1: lw t0, 0(a1) /* t0 = source (a8r8g8b8) */ lw t1, 4(a1) /* t1 = source (a8r8g8b8) */ lbu t2, 0(a2) /* t2 = mask (a8) */ lbu t3, 1(a2) /* t3 = mask (a8) */ lhu t4, 0(a0) /* t4 = destination (r5g6b5) */ lhu t5, 2(a0) /* t5 = destination (r5g6b5) */ addiu a1, a1, 8 addiu a2, a2, 2 CONVERT_2x0565_TO_2x8888 t4, t5, s0, s1, t8, t9, s2, s3, s4, s5 OVER_2x8888_2x8_2x8888 t0, t1, t2, t3, s0, s1, \ t4, t5, t6, s2, s3, s4, s5, t0, t1 CONVERT_2x8888_TO_2x0565 t4, t5, s0, s1, t7, t8, t9, s2, s3 sh s0, 0(a0) sh s1, 2(a0) addiu a3, a3, -2 addiu t1, a3, -1 bgtz t1, 1b addiu a0, a0, 4 2: beqz a3, 3f nop lw t0, 0(a1) /* t0 = source (a8r8g8b8) */ lbu t1, 0(a2) /* t1 = mask (a8) */ lhu t2, 0(a0) /* t2 = destination (r5g6b5) */ CONVERT_1x0565_TO_1x8888 t2, t3, t4, t5 OVER_8888_8_8888 t0, t1, t3, t2, t6, t4, t5, t7, t8 CONVERT_1x8888_TO_1x0565 t2, t3, t4, t5 sh t3, 0(a0) 3: RESTORE_REGS_FROM_STACK 20, s0, s1, s2, s3, s4, s5 j ra nop END(pixman_composite_over_8888_8_0565_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_over_0565_8_0565_asm_mips) /* * a0 - dst (r5g6b5) * a1 - src (r5g6b5) * a2 - mask (a8) * a3 - w */ SAVE_REGS_ON_STACK 20, s0, s1, s2, s3, s4, s5 li t4, 0xf800f800 li t5, 0x07e007e0 li t6, 0x001F001F li t7, 0x00ff00ff beqz a3, 3f nop addiu t1, a3, -1 beqz t1, 2f nop 1: lhu t0, 0(a1) /* t0 = source (r5g6b5) */ lhu t1, 2(a1) /* t1 = source (r5g6b5) */ lbu t2, 0(a2) /* t2 = mask (a8) */ lbu t3, 1(a2) /* t3 = mask (a8) */ lhu t8, 0(a0) /* t8 = destination (r5g6b5) */ lhu t9, 2(a0) /* t9 = destination (r5g6b5) */ addiu a1, a1, 4 addiu a2, a2, 2 CONVERT_2x0565_TO_2x8888 t0, t1, s0, s1, t5, t6, s2, s3, s4, s5 CONVERT_2x0565_TO_2x8888 t8, t9, s2, s3, t5, t6, s4, s5, t0, t1 OVER_2x8888_2x8_2x8888 s0, s1, t2, t3, s2, s3, \ t0, t1, t7, s4, s5, t8, t9, s0, s1 CONVERT_2x8888_TO_2x0565 t0, t1, s0, s1, t4, t5, t6, s2, s3 sh s0, 0(a0) sh s1, 2(a0) addiu a3, a3, -2 addiu t1, a3, -1 bgtz t1, 1b addiu a0, a0, 4 2: beqz a3, 3f nop lhu t0, 0(a1) /* t0 = source (r5g6b5) */ lbu t1, 0(a2) /* t1 = mask (a8) */ lhu t2, 0(a0) /* t2 = destination (r5g6b5) */ CONVERT_1x0565_TO_1x8888 t0, t3, t4, t5 CONVERT_1x0565_TO_1x8888 t2, t4, t5, t6 OVER_8888_8_8888 t3, t1, t4, t0, t7, t2, t5, t6, t8 CONVERT_1x8888_TO_1x0565 t0, t3, t4, t5 sh t3, 0(a0) 3: RESTORE_REGS_FROM_STACK 20, s0, s1, s2, s3, s4, s5 j ra nop END(pixman_composite_over_0565_8_0565_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_over_8888_8888_8888_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (a8r8g8b8) * a2 - mask (a8r8g8b8) * a3 - w */ SAVE_REGS_ON_STACK 0, s0, s1, s2 li t4, 0x00ff00ff beqz a3, 3f nop addiu t1, a3, -1 beqz t1, 2f nop 1: lw t0, 0(a1) /* t0 = source (a8r8g8b8) */ lw t1, 4(a1) /* t1 = source (a8r8g8b8) */ lw t2, 0(a2) /* t2 = mask (a8r8g8b8) */ lw t3, 4(a2) /* t3 = mask (a8r8g8b8) */ lw t5, 0(a0) /* t5 = destination (a8r8g8b8) */ lw t6, 4(a0) /* t6 = destination (a8r8g8b8) */ addiu a1, a1, 8 addiu a2, a2, 8 srl t2, t2, 24 srl t3, t3, 24 OVER_2x8888_2x8_2x8888 t0, t1, t2, t3, t5, t6, t7, t8, t4, t9, s0, s1, s2, t0, t1 sw t7, 0(a0) sw t8, 4(a0) addiu a3, a3, -2 addiu t1, a3, -1 bgtz t1, 1b addiu a0, a0, 8 2: beqz a3, 3f nop lw t0, 0(a1) /* t0 = source (a8r8g8b8) */ lw t1, 0(a2) /* t1 = mask (a8r8g8b8) */ lw t2, 0(a0) /* t2 = destination (a8r8g8b8) */ srl t1, t1, 24 OVER_8888_8_8888 t0, t1, t2, t3, t4, t5, t6, t7, t8 sw t3, 0(a0) 3: RESTORE_REGS_FROM_STACK 0, s0, s1, s2 j ra nop END(pixman_composite_over_8888_8888_8888_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_over_8888_8888_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (a8r8g8b8) * a2 - w */ SAVE_REGS_ON_STACK 0, s0, s1, s2 li t4, 0x00ff00ff beqz a2, 3f nop addiu t1, a2, -1 beqz t1, 2f nop 1: lw t0, 0(a1) /* t0 = source (a8r8g8b8) */ lw t1, 4(a1) /* t1 = source (a8r8g8b8) */ lw t2, 0(a0) /* t2 = destination (a8r8g8b8) */ lw t3, 4(a0) /* t3 = destination (a8r8g8b8) */ addiu a1, a1, 8 not t5, t0 srl t5, t5, 24 not t6, t1 srl t6, t6, 24 or t7, t5, t6 beqz t7, 11f or t8, t0, t1 beqz t8, 12f MIPS_2xUN8x4_MUL_2xUN8 t2, t3, t5, t6, t7, t8, t4, t9, s0, s1, s2, t2, t3 addu_s.qb t0, t7, t0 addu_s.qb t1, t8, t1 11: sw t0, 0(a0) sw t1, 4(a0) 12: addiu a2, a2, -2 addiu t1, a2, -1 bgtz t1, 1b addiu a0, a0, 8 2: beqz a2, 3f nop lw t0, 0(a1) /* t0 = source (a8r8g8b8) */ lw t1, 0(a0) /* t1 = destination (a8r8g8b8) */ addiu a1, a1, 4 not t2, t0 srl t2, t2, 24 beqz t2, 21f nop beqz t0, 3f MIPS_UN8x4_MUL_UN8 t1, t2, t3, t4, t5, t6, t7 addu_s.qb t0, t3, t0 21: sw t0, 0(a0) 3: RESTORE_REGS_FROM_STACK 0, s0, s1, s2 j ra nop END(pixman_composite_over_8888_8888_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_over_8888_0565_asm_mips) /* * a0 - dst (r5g6b5) * a1 - src (a8r8g8b8) * a2 - w */ SAVE_REGS_ON_STACK 8, s0, s1, s2, s3, s4, s5 li t4, 0x00ff00ff li s3, 0xf800f800 li s4, 0x07e007e0 li s5, 0x001F001F beqz a2, 3f nop addiu t1, a2, -1 beqz t1, 2f nop 1: lw t0, 0(a1) /* t0 = source (a8r8g8b8) */ lw t1, 4(a1) /* t1 = source (a8r8g8b8) */ lhu t2, 0(a0) /* t2 = destination (r5g6b5) */ lhu t3, 2(a0) /* t3 = destination (r5g6b5) */ addiu a1, a1, 8 not t5, t0 srl t5, t5, 24 not t6, t1 srl t6, t6, 24 or t7, t5, t6 beqz t7, 11f or t8, t0, t1 beqz t8, 12f CONVERT_2x0565_TO_2x8888 t2, t3, s0, s1, s4, s5, t7, t8, t9, s2 MIPS_2xUN8x4_MUL_2xUN8 s0, s1, t5, t6, t7, t8, t4, t9, t2, t3, s2, s0, s1 addu_s.qb t0, t7, t0 addu_s.qb t1, t8, t1 11: CONVERT_2x8888_TO_2x0565 t0, t1, t7, t8, s3, s4, s5, t2, t3 sh t7, 0(a0) sh t8, 2(a0) 12: addiu a2, a2, -2 addiu t1, a2, -1 bgtz t1, 1b addiu a0, a0, 4 2: beqz a2, 3f nop lw t0, 0(a1) /* t0 = source (a8r8g8b8) */ lhu t1, 0(a0) /* t1 = destination (r5g6b5) */ addiu a1, a1, 4 not t2, t0 srl t2, t2, 24 beqz t2, 21f nop beqz t0, 3f CONVERT_1x0565_TO_1x8888 t1, s0, t8, t9 MIPS_UN8x4_MUL_UN8 s0, t2, t3, t4, t5, t6, t7 addu_s.qb t0, t3, t0 21: CONVERT_1x8888_TO_1x0565 t0, s0, t8, t9 sh s0, 0(a0) 3: RESTORE_REGS_FROM_STACK 8, s0, s1, s2, s3, s4, s5 j ra nop END(pixman_composite_over_8888_0565_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_over_n_0565_asm_mips) /* * a0 - dst (r5g6b5) * a1 - src (32bit constant) * a2 - w */ beqz a2, 5f nop not t0, a1 srl t0, t0, 24 bgtz t0, 1f nop CONVERT_1x8888_TO_1x0565 a1, t1, t2, t3 0: sh t1, 0(a0) addiu a2, a2, -1 bgtz a2, 0b addiu a0, a0, 2 j ra nop 1: SAVE_REGS_ON_STACK 0, s0, s1, s2 li t4, 0x00ff00ff li t5, 0xf800f800 li t6, 0x07e007e0 li t7, 0x001F001F addiu t1, a2, -1 beqz t1, 3f nop 2: lhu t1, 0(a0) /* t1 = destination (r5g6b5) */ lhu t2, 2(a0) /* t2 = destination (r5g6b5) */ CONVERT_2x0565_TO_2x8888 t1, t2, t3, t8, t6, t7, t9, s0, s1, s2 MIPS_2xUN8x4_MUL_2xUN8 t3, t8, t0, t0, t1, t2, t4, t9, s0, s1, s2, t3, t8 addu_s.qb t1, t1, a1 addu_s.qb t2, t2, a1 CONVERT_2x8888_TO_2x0565 t1, t2, t3, t8, t5, t6, t7, s0, s1 sh t3, 0(a0) sh t8, 2(a0) addiu a2, a2, -2 addiu t1, a2, -1 bgtz t1, 2b addiu a0, a0, 4 3: beqz a2, 4f nop lhu t1, 0(a0) /* t1 = destination (r5g6b5) */ CONVERT_1x0565_TO_1x8888 t1, t2, s0, s1 MIPS_UN8x4_MUL_UN8 t2, t0, t1, t4, s0, s1, s2 addu_s.qb t1, t1, a1 CONVERT_1x8888_TO_1x0565 t1, t2, s0, s1 sh t2, 0(a0) 4: RESTORE_REGS_FROM_STACK 0, s0, s1, s2 5: j ra nop END(pixman_composite_over_n_0565_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_over_n_8888_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (32bit constant) * a2 - w */ beqz a2, 5f nop not t0, a1 srl t0, t0, 24 bgtz t0, 1f nop 0: sw a1, 0(a0) addiu a2, a2, -1 bgtz a2, 0b addiu a0, a0, 4 j ra nop 1: SAVE_REGS_ON_STACK 0, s0, s1, s2 li t4, 0x00ff00ff addiu t1, a2, -1 beqz t1, 3f nop 2: lw t2, 0(a0) /* t2 = destination (a8r8g8b8) */ lw t3, 4(a0) /* t3 = destination (a8r8g8b8) */ MIPS_2xUN8x4_MUL_2xUN8 t2, t3, t0, t0, t7, t8, t4, t9, s0, s1, s2, t2, t3 addu_s.qb t7, t7, a1 addu_s.qb t8, t8, a1 sw t7, 0(a0) sw t8, 4(a0) addiu a2, a2, -2 addiu t1, a2, -1 bgtz t1, 2b addiu a0, a0, 8 3: beqz a2, 4f nop lw t1, 0(a0) /* t1 = destination (a8r8g8b8) */ MIPS_UN8x4_MUL_UN8 t1, t0, t3, t4, t5, t6, t7 addu_s.qb t3, t3, a1 sw t3, 0(a0) 4: RESTORE_REGS_FROM_STACK 0, s0, s1, s2 5: j ra nop END(pixman_composite_over_n_8888_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_add_8_8_8_asm_mips) /* * a0 - dst (a8) * a1 - src (a8) * a2 - mask (a8) * a3 - w */ SAVE_REGS_ON_STACK 0, v0, v1 li t9, 0x00ff00ff beqz a3, 3f nop srl v0, a3, 2 /* v0 = how many multiples of 4 dst pixels */ beqz v0, 1f /* branch if less than 4 src pixels */ nop 0: beqz v0, 1f addiu v0, v0, -1 lbu t0, 0(a2) lbu t1, 1(a2) lbu t2, 2(a2) lbu t3, 3(a2) lbu t4, 0(a0) lbu t5, 1(a0) lbu t6, 2(a0) lbu t7, 3(a0) addiu a2, a2, 4 precr_sra.ph.w t1, t0, 0 precr_sra.ph.w t3, t2, 0 precr_sra.ph.w t5, t4, 0 precr_sra.ph.w t7, t6, 0 precr.qb.ph t0, t3, t1 precr.qb.ph t1, t7, t5 lbu t4, 0(a1) lbu v1, 1(a1) lbu t7, 2(a1) lbu t8, 3(a1) addiu a1, a1, 4 precr_sra.ph.w v1, t4, 0 precr_sra.ph.w t8, t7, 0 muleu_s.ph.qbl t2, t0, t8 muleu_s.ph.qbr t3, t0, v1 shra_r.ph t4, t2, 8 shra_r.ph t5, t3, 8 and t4, t4, t9 and t5, t5, t9 addq.ph t2, t2, t4 addq.ph t3, t3, t5 shra_r.ph t2, t2, 8 shra_r.ph t3, t3, 8 precr.qb.ph t0, t2, t3 addu_s.qb t2, t0, t1 sb t2, 0(a0) srl t2, t2, 8 sb t2, 1(a0) srl t2, t2, 8 sb t2, 2(a0) srl t2, t2, 8 sb t2, 3(a0) addiu a3, a3, -4 b 0b addiu a0, a0, 4 1: beqz a3, 3f nop 2: lbu t8, 0(a1) lbu t0, 0(a2) lbu t1, 0(a0) addiu a1, a1, 1 addiu a2, a2, 1 mul t2, t0, t8 shra_r.ph t3, t2, 8 andi t3, t3, 0xff addq.ph t2, t2, t3 shra_r.ph t2, t2, 8 andi t2, t2, 0xff addu_s.qb t2, t2, t1 sb t2, 0(a0) addiu a3, a3, -1 bnez a3, 2b addiu a0, a0, 1 3: RESTORE_REGS_FROM_STACK 0, v0, v1 j ra nop END(pixman_composite_add_8_8_8_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_add_n_8_8_asm_mips) /* * a0 - dst (a8) * a1 - src (32bit constant) * a2 - mask (a8) * a3 - w */ SAVE_REGS_ON_STACK 0, v0 li t9, 0x00ff00ff beqz a3, 3f nop srl v0, a3, 2 /* v0 = how many multiples of 4 dst pixels */ beqz v0, 1f /* branch if less than 4 src pixels */ nop srl t8, a1, 24 replv.ph t8, t8 0: beqz v0, 1f addiu v0, v0, -1 lbu t0, 0(a2) lbu t1, 1(a2) lbu t2, 2(a2) lbu t3, 3(a2) lbu t4, 0(a0) lbu t5, 1(a0) lbu t6, 2(a0) lbu t7, 3(a0) addiu a2, a2, 4 precr_sra.ph.w t1, t0, 0 precr_sra.ph.w t3, t2, 0 precr_sra.ph.w t5, t4, 0 precr_sra.ph.w t7, t6, 0 precr.qb.ph t0, t3, t1 precr.qb.ph t1, t7, t5 muleu_s.ph.qbl t2, t0, t8 muleu_s.ph.qbr t3, t0, t8 shra_r.ph t4, t2, 8 shra_r.ph t5, t3, 8 and t4, t4, t9 and t5, t5, t9 addq.ph t2, t2, t4 addq.ph t3, t3, t5 shra_r.ph t2, t2, 8 shra_r.ph t3, t3, 8 precr.qb.ph t0, t2, t3 addu_s.qb t2, t0, t1 sb t2, 0(a0) srl t2, t2, 8 sb t2, 1(a0) srl t2, t2, 8 sb t2, 2(a0) srl t2, t2, 8 sb t2, 3(a0) addiu a3, a3, -4 b 0b addiu a0, a0, 4 1: beqz a3, 3f nop srl t8, a1, 24 2: lbu t0, 0(a2) lbu t1, 0(a0) addiu a2, a2, 1 mul t2, t0, t8 shra_r.ph t3, t2, 8 andi t3, t3, 0xff addq.ph t2, t2, t3 shra_r.ph t2, t2, 8 andi t2, t2, 0xff addu_s.qb t2, t2, t1 sb t2, 0(a0) addiu a3, a3, -1 bnez a3, 2b addiu a0, a0, 1 3: RESTORE_REGS_FROM_STACK 0, v0 j ra nop END(pixman_composite_add_n_8_8_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_add_n_8_8888_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (32bit constant) * a2 - mask (a8) * a3 - w */ SAVE_REGS_ON_STACK 0, s0, s1, s2 li t4, 0x00ff00ff beqz a3, 3f nop addiu t1, a3, -1 beqz t1, 2f nop 1: /* a1 = source (32bit constant) */ lbu t0, 0(a2) /* t0 = mask (a8) */ lbu t1, 1(a2) /* t1 = mask (a8) */ lw t2, 0(a0) /* t2 = destination (a8r8g8b8) */ lw t3, 4(a0) /* t3 = destination (a8r8g8b8) */ addiu a2, a2, 2 MIPS_2xUN8x4_MUL_2xUN8_ADD_2xUN8x4 a1, a1, \ t0, t1, \ t2, t3, \ t5, t6, \ t4, t7, t8, t9, s0, s1, s2 sw t5, 0(a0) sw t6, 4(a0) addiu a3, a3, -2 addiu t1, a3, -1 bgtz t1, 1b addiu a0, a0, 8 2: beqz a3, 3f nop /* a1 = source (32bit constant) */ lbu t0, 0(a2) /* t0 = mask (a8) */ lw t1, 0(a0) /* t1 = destination (a8r8g8b8) */ MIPS_UN8x4_MUL_UN8_ADD_UN8x4 a1, t0, t1, t2, t4, t3, t5, t6 sw t2, 0(a0) 3: RESTORE_REGS_FROM_STACK 0, s0, s1, s2 j ra nop END(pixman_composite_add_n_8_8888_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_add_0565_8_0565_asm_mips) /* * a0 - dst (r5g6b5) * a1 - src (r5g6b5) * a2 - mask (a8) * a3 - w */ SAVE_REGS_ON_STACK 20, s0, s1, s2, s3, s4, s5, s6, s7 li t4, 0xf800f800 li t5, 0x07e007e0 li t6, 0x001F001F li t7, 0x00ff00ff beqz a3, 3f nop addiu t1, a3, -1 beqz t1, 2f nop 1: lhu t0, 0(a1) /* t0 = source (r5g6b5) */ lhu t1, 2(a1) /* t1 = source (r5g6b5) */ lbu t2, 0(a2) /* t2 = mask (a8) */ lbu t3, 1(a2) /* t3 = mask (a8) */ lhu t8, 0(a0) /* t8 = destination (r5g6b5) */ lhu t9, 2(a0) /* t9 = destination (r5g6b5) */ addiu a1, a1, 4 addiu a2, a2, 2 CONVERT_2x0565_TO_2x8888 t0, t1, s0, s1, t5, t6, s2, s3, s4, s5 CONVERT_2x0565_TO_2x8888 t8, t9, s2, s3, t5, t6, s4, s5, s6, s7 MIPS_2xUN8x4_MUL_2xUN8_ADD_2xUN8x4 s0, s1, \ t2, t3, \ s2, s3, \ t0, t1, \ t7, s4, s5, s6, s7, t8, t9 CONVERT_2x8888_TO_2x0565 t0, t1, s0, s1, t4, t5, t6, s2, s3 sh s0, 0(a0) sh s1, 2(a0) addiu a3, a3, -2 addiu t1, a3, -1 bgtz t1, 1b addiu a0, a0, 4 2: beqz a3, 3f nop lhu t0, 0(a1) /* t0 = source (r5g6b5) */ lbu t1, 0(a2) /* t1 = mask (a8) */ lhu t2, 0(a0) /* t2 = destination (r5g6b5) */ CONVERT_1x0565_TO_1x8888 t0, t3, t4, t5 CONVERT_1x0565_TO_1x8888 t2, t4, t5, t6 MIPS_UN8x4_MUL_UN8_ADD_UN8x4 t3, t1, t4, t0, t7, t2, t5, t6 CONVERT_1x8888_TO_1x0565 t0, t3, t4, t5 sh t3, 0(a0) 3: RESTORE_REGS_FROM_STACK 20, s0, s1, s2, s3, s4, s5, s6, s7 j ra nop END(pixman_composite_add_0565_8_0565_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_add_8888_8_8888_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (a8r8g8b8) * a2 - mask (a8) * a3 - w */ SAVE_REGS_ON_STACK 0, s0, s1, s2 li t4, 0x00ff00ff beqz a3, 3f nop addiu t1, a3, -1 beqz t1, 2f nop 1: lw t0, 0(a1) /* t0 = source (a8r8g8b8) */ lw t1, 4(a1) /* t1 = source (a8r8g8b8) */ lbu t2, 0(a2) /* t2 = mask (a8) */ lbu t3, 1(a2) /* t3 = mask (a8) */ lw t5, 0(a0) /* t5 = destination (a8r8g8b8) */ lw t6, 4(a0) /* t6 = destination (a8r8g8b8) */ addiu a1, a1, 8 addiu a2, a2, 2 MIPS_2xUN8x4_MUL_2xUN8_ADD_2xUN8x4 t0, t1, \ t2, t3, \ t5, t6, \ t7, t8, \ t4, t9, s0, s1, s2, t0, t1 sw t7, 0(a0) sw t8, 4(a0) addiu a3, a3, -2 addiu t1, a3, -1 bgtz t1, 1b addiu a0, a0, 8 2: beqz a3, 3f nop lw t0, 0(a1) /* t0 = source (a8r8g8b8) */ lbu t1, 0(a2) /* t1 = mask (a8) */ lw t2, 0(a0) /* t2 = destination (a8r8g8b8) */ MIPS_UN8x4_MUL_UN8_ADD_UN8x4 t0, t1, t2, t3, t4, t5, t6, t7 sw t3, 0(a0) 3: RESTORE_REGS_FROM_STACK 0, s0, s1, s2 j ra nop END(pixman_composite_add_8888_8_8888_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_add_8888_n_8888_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (a8r8g8b8) * a2 - mask (32bit constant) * a3 - w */ SAVE_REGS_ON_STACK 0, s0, s1, s2 li t4, 0x00ff00ff beqz a3, 3f nop srl a2, a2, 24 addiu t1, a3, -1 beqz t1, 2f nop 1: lw t0, 0(a1) /* t0 = source (a8r8g8b8) */ lw t1, 4(a1) /* t1 = source (a8r8g8b8) */ /* a2 = mask (32bit constant) */ lw t2, 0(a0) /* t2 = destination (a8r8g8b8) */ lw t3, 4(a0) /* t3 = destination (a8r8g8b8) */ addiu a1, a1, 8 MIPS_2xUN8x4_MUL_2xUN8_ADD_2xUN8x4 t0, t1, \ a2, a2, \ t2, t3, \ t5, t6, \ t4, t7, t8, t9, s0, s1, s2 sw t5, 0(a0) sw t6, 4(a0) addiu a3, a3, -2 addiu t1, a3, -1 bgtz t1, 1b addiu a0, a0, 8 2: beqz a3, 3f nop lw t0, 0(a1) /* t0 = source (a8r8g8b8) */ /* a2 = mask (32bit constant) */ lw t1, 0(a0) /* t1 = destination (a8r8g8b8) */ MIPS_UN8x4_MUL_UN8_ADD_UN8x4 t0, a2, t1, t3, t4, t5, t6, t7 sw t3, 0(a0) 3: RESTORE_REGS_FROM_STACK 0, s0, s1, s2 j ra nop END(pixman_composite_add_8888_n_8888_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_add_8888_8888_8888_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (a8r8g8b8) * a2 - mask (a8r8g8b8) * a3 - w */ SAVE_REGS_ON_STACK 0, s0, s1, s2 li t4, 0x00ff00ff beqz a3, 3f nop addiu t1, a3, -1 beqz t1, 2f nop 1: lw t0, 0(a1) /* t0 = source (a8r8g8b8) */ lw t1, 4(a1) /* t1 = source (a8r8g8b8) */ lw t2, 0(a2) /* t2 = mask (a8r8g8b8) */ lw t3, 4(a2) /* t3 = mask (a8r8g8b8) */ lw t5, 0(a0) /* t5 = destination (a8r8g8b8) */ lw t6, 4(a0) /* t6 = destination (a8r8g8b8) */ addiu a1, a1, 8 addiu a2, a2, 8 srl t2, t2, 24 srl t3, t3, 24 MIPS_2xUN8x4_MUL_2xUN8_ADD_2xUN8x4 t0, t1, \ t2, t3, \ t5, t6, \ t7, t8, \ t4, t9, s0, s1, s2, t0, t1 sw t7, 0(a0) sw t8, 4(a0) addiu a3, a3, -2 addiu t1, a3, -1 bgtz t1, 1b addiu a0, a0, 8 2: beqz a3, 3f nop lw t0, 0(a1) /* t0 = source (a8r8g8b8) */ lw t1, 0(a2) /* t1 = mask (a8r8g8b8) */ lw t2, 0(a0) /* t2 = destination (a8r8g8b8) */ srl t1, t1, 24 MIPS_UN8x4_MUL_UN8_ADD_UN8x4 t0, t1, t2, t3, t4, t5, t6, t7 sw t3, 0(a0) 3: RESTORE_REGS_FROM_STACK 0, s0, s1, s2 j ra nop END(pixman_composite_add_8888_8888_8888_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_add_8_8_asm_mips) /* * a0 - dst (a8) * a1 - src (a8) * a2 - w */ beqz a2, 3f nop srl t9, a2, 2 /* t9 = how many multiples of 4 dst pixels */ beqz t9, 1f /* branch if less than 4 src pixels */ nop 0: beqz t9, 1f addiu t9, t9, -1 lbu t0, 0(a1) lbu t1, 1(a1) lbu t2, 2(a1) lbu t3, 3(a1) lbu t4, 0(a0) lbu t5, 1(a0) lbu t6, 2(a0) lbu t7, 3(a0) addiu a1, a1, 4 precr_sra.ph.w t1, t0, 0 precr_sra.ph.w t3, t2, 0 precr_sra.ph.w t5, t4, 0 precr_sra.ph.w t7, t6, 0 precr.qb.ph t0, t3, t1 precr.qb.ph t1, t7, t5 addu_s.qb t2, t0, t1 sb t2, 0(a0) srl t2, t2, 8 sb t2, 1(a0) srl t2, t2, 8 sb t2, 2(a0) srl t2, t2, 8 sb t2, 3(a0) addiu a2, a2, -4 b 0b addiu a0, a0, 4 1: beqz a2, 3f nop 2: lbu t0, 0(a1) lbu t1, 0(a0) addiu a1, a1, 1 addu_s.qb t2, t0, t1 sb t2, 0(a0) addiu a2, a2, -1 bnez a2, 2b addiu a0, a0, 1 3: j ra nop END(pixman_composite_add_8_8_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_add_8888_8888_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (a8r8g8b8) * a2 - w */ beqz a2, 4f nop srl t9, a2, 2 /* t1 = how many multiples of 4 src pixels */ beqz t9, 3f /* branch if less than 4 src pixels */ nop 1: addiu t9, t9, -1 beqz t9, 2f addiu a2, a2, -4 lw t0, 0(a1) lw t1, 4(a1) lw t2, 8(a1) lw t3, 12(a1) lw t4, 0(a0) lw t5, 4(a0) lw t6, 8(a0) lw t7, 12(a0) addiu a1, a1, 16 addu_s.qb t4, t4, t0 addu_s.qb t5, t5, t1 addu_s.qb t6, t6, t2 addu_s.qb t7, t7, t3 sw t4, 0(a0) sw t5, 4(a0) sw t6, 8(a0) sw t7, 12(a0) b 1b addiu a0, a0, 16 2: lw t0, 0(a1) lw t1, 4(a1) lw t2, 8(a1) lw t3, 12(a1) lw t4, 0(a0) lw t5, 4(a0) lw t6, 8(a0) lw t7, 12(a0) addiu a1, a1, 16 addu_s.qb t4, t4, t0 addu_s.qb t5, t5, t1 addu_s.qb t6, t6, t2 addu_s.qb t7, t7, t3 sw t4, 0(a0) sw t5, 4(a0) sw t6, 8(a0) sw t7, 12(a0) beqz a2, 4f addiu a0, a0, 16 3: lw t0, 0(a1) lw t1, 0(a0) addiu a1, a1, 4 addiu a2, a2, -1 addu_s.qb t1, t1, t0 sw t1, 0(a0) bnez a2, 3b addiu a0, a0, 4 4: jr ra nop END(pixman_composite_add_8888_8888_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_out_reverse_8_0565_asm_mips) /* * a0 - dst (r5g6b5) * a1 - src (a8) * a2 - w */ beqz a2, 4f nop SAVE_REGS_ON_STACK 0, s0, s1, s2, s3 li t2, 0xf800f800 li t3, 0x07e007e0 li t4, 0x001F001F li t5, 0x00ff00ff addiu t1, a2, -1 beqz t1, 2f nop 1: lbu t0, 0(a1) /* t0 = source (a8) */ lbu t1, 1(a1) /* t1 = source (a8) */ lhu t6, 0(a0) /* t6 = destination (r5g6b5) */ lhu t7, 2(a0) /* t7 = destination (r5g6b5) */ addiu a1, a1, 2 not t0, t0 not t1, t1 andi t0, 0xff /* t0 = neg source1 */ andi t1, 0xff /* t1 = neg source2 */ CONVERT_2x0565_TO_2x8888 t6, t7, t8, t9, t3, t4, s0, s1, s2, s3 MIPS_2xUN8x4_MUL_2xUN8 t8, t9, t0, t1, t6, t7, t5, s0, s1, s2, s3, t8, t9 CONVERT_2x8888_TO_2x0565 t6, t7, t8, t9, t2, t3, t4, s0, s1 sh t8, 0(a0) sh t9, 2(a0) addiu a2, a2, -2 addiu t1, a2, -1 bgtz t1, 1b addiu a0, a0, 4 2: beqz a2, 3f nop lbu t0, 0(a1) /* t0 = source (a8) */ lhu t1, 0(a0) /* t1 = destination (r5g6b5) */ not t0, t0 andi t0, 0xff /* t0 = neg source */ CONVERT_1x0565_TO_1x8888 t1, t2, t3, t4 MIPS_UN8x4_MUL_UN8 t2, t0, t1, t5, t3, t4, t6 CONVERT_1x8888_TO_1x0565 t1, t2, t3, t4 sh t2, 0(a0) 3: RESTORE_REGS_FROM_STACK 0, s0, s1, s2, s3 4: j ra nop END(pixman_composite_out_reverse_8_0565_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_out_reverse_8_8888_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (a8) * a2 - w */ beqz a2, 3f nop li t4, 0x00ff00ff addiu t1, a2, -1 beqz t1, 2f nop 1: lbu t0, 0(a1) /* t0 = source (a8) */ lbu t1, 1(a1) /* t1 = source (a8) */ lw t2, 0(a0) /* t2 = destination (a8r8g8b8) */ lw t3, 4(a0) /* t3 = destination (a8r8g8b8) */ addiu a1, a1, 2 not t0, t0 not t1, t1 andi t0, 0xff /* t0 = neg source */ andi t1, 0xff /* t1 = neg source */ MIPS_2xUN8x4_MUL_2xUN8 t2, t3, t0, t1, t5, t6, t4, t7, t8, t9, t2, t3, t0 sw t5, 0(a0) sw t6, 4(a0) addiu a2, a2, -2 addiu t1, a2, -1 bgtz t1, 1b addiu a0, a0, 8 2: beqz a2, 3f nop lbu t0, 0(a1) /* t0 = source (a8) */ lw t1, 0(a0) /* t1 = destination (a8r8g8b8) */ not t0, t0 andi t0, 0xff /* t0 = neg source */ MIPS_UN8x4_MUL_UN8 t1, t0, t2, t4, t3, t5, t6 sw t2, 0(a0) 3: j ra nop END(pixman_composite_out_reverse_8_8888_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_over_reverse_n_8888_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (32bit constant) * a2 - w */ beqz a2, 5f nop SAVE_REGS_ON_STACK 20, s0, s1, s2, s3, s4, s5, s6, s7 li t0, 0x00ff00ff srl t9, a2, 2 /* t9 = how many multiples of 4 src pixels */ beqz t9, 2f /* branch if less than 4 src pixels */ nop 1: beqz t9, 2f addiu t9, t9, -1 lw t1, 0(a0) lw t2, 4(a0) lw t3, 8(a0) lw t4, 12(a0) addiu a2, a2, -4 not t5, t1 not t6, t2 not t7, t3 not t8, t4 srl t5, t5, 24 srl t6, t6, 24 srl t7, t7, 24 srl t8, t8, 24 replv.ph t5, t5 replv.ph t6, t6 replv.ph t7, t7 replv.ph t8, t8 muleu_s.ph.qbl s0, a1, t5 muleu_s.ph.qbr s1, a1, t5 muleu_s.ph.qbl s2, a1, t6 muleu_s.ph.qbr s3, a1, t6 muleu_s.ph.qbl s4, a1, t7 muleu_s.ph.qbr s5, a1, t7 muleu_s.ph.qbl s6, a1, t8 muleu_s.ph.qbr s7, a1, t8 shra_r.ph t5, s0, 8 shra_r.ph t6, s1, 8 shra_r.ph t7, s2, 8 shra_r.ph t8, s3, 8 and t5, t5, t0 and t6, t6, t0 and t7, t7, t0 and t8, t8, t0 addq.ph s0, s0, t5 addq.ph s1, s1, t6 addq.ph s2, s2, t7 addq.ph s3, s3, t8 shra_r.ph s0, s0, 8 shra_r.ph s1, s1, 8 shra_r.ph s2, s2, 8 shra_r.ph s3, s3, 8 shra_r.ph t5, s4, 8 shra_r.ph t6, s5, 8 shra_r.ph t7, s6, 8 shra_r.ph t8, s7, 8 and t5, t5, t0 and t6, t6, t0 and t7, t7, t0 and t8, t8, t0 addq.ph s4, s4, t5 addq.ph s5, s5, t6 addq.ph s6, s6, t7 addq.ph s7, s7, t8 shra_r.ph s4, s4, 8 shra_r.ph s5, s5, 8 shra_r.ph s6, s6, 8 shra_r.ph s7, s7, 8 precr.qb.ph t5, s0, s1 precr.qb.ph t6, s2, s3 precr.qb.ph t7, s4, s5 precr.qb.ph t8, s6, s7 addu_s.qb t5, t1, t5 addu_s.qb t6, t2, t6 addu_s.qb t7, t3, t7 addu_s.qb t8, t4, t8 sw t5, 0(a0) sw t6, 4(a0) sw t7, 8(a0) sw t8, 12(a0) b 1b addiu a0, a0, 16 2: beqz a2, 4f nop 3: lw t1, 0(a0) not t2, t1 srl t2, t2, 24 replv.ph t2, t2 muleu_s.ph.qbl t4, a1, t2 muleu_s.ph.qbr t5, a1, t2 shra_r.ph t6, t4, 8 shra_r.ph t7, t5, 8 and t6,t6,t0 and t7,t7,t0 addq.ph t8, t4, t6 addq.ph t9, t5, t7 shra_r.ph t8, t8, 8 shra_r.ph t9, t9, 8 precr.qb.ph t9, t8, t9 addu_s.qb t9, t1, t9 sw t9, 0(a0) addiu a2, a2, -1 bnez a2, 3b addiu a0, a0, 4 4: RESTORE_REGS_FROM_STACK 20, s0, s1, s2, s3, s4, s5, s6, s7 5: j ra nop END(pixman_composite_over_reverse_n_8888_asm_mips) LEAF_MIPS_DSPR2(pixman_composite_in_n_8_asm_mips) /* * a0 - dst (a8) * a1 - src (32bit constant) * a2 - w */ li t9, 0x00ff00ff beqz a2, 3f nop srl t7, a2, 2 /* t7 = how many multiples of 4 dst pixels */ beqz t7, 1f /* branch if less than 4 src pixels */ nop srl t8, a1, 24 replv.ph t8, t8 0: beqz t7, 1f addiu t7, t7, -1 lbu t0, 0(a0) lbu t1, 1(a0) lbu t2, 2(a0) lbu t3, 3(a0) precr_sra.ph.w t1, t0, 0 precr_sra.ph.w t3, t2, 0 precr.qb.ph t0, t3, t1 muleu_s.ph.qbl t2, t0, t8 muleu_s.ph.qbr t3, t0, t8 shra_r.ph t4, t2, 8 shra_r.ph t5, t3, 8 and t4, t4, t9 and t5, t5, t9 addq.ph t2, t2, t4 addq.ph t3, t3, t5 shra_r.ph t2, t2, 8 shra_r.ph t3, t3, 8 precr.qb.ph t2, t2, t3 sb t2, 0(a0) srl t2, t2, 8 sb t2, 1(a0) srl t2, t2, 8 sb t2, 2(a0) srl t2, t2, 8 sb t2, 3(a0) addiu a2, a2, -4 b 0b addiu a0, a0, 4 1: beqz a2, 3f nop srl t8, a1, 24 2: lbu t0, 0(a0) mul t2, t0, t8 shra_r.ph t3, t2, 8 andi t3, t3, 0x00ff addq.ph t2, t2, t3 shra_r.ph t2, t2, 8 sb t2, 0(a0) addiu a2, a2, -1 bnez a2, 2b addiu a0, a0, 1 3: j ra nop END(pixman_composite_in_n_8_asm_mips) LEAF_MIPS_DSPR2(pixman_scaled_nearest_scanline_8888_8888_OVER_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (a8r8g8b8) * a2 - w * a3 - vx * 16(sp) - unit_x */ SAVE_REGS_ON_STACK 0, s0, s1, s2, s3 lw t8, 16(sp) /* t8 = unit_x */ li t6, 0x00ff00ff beqz a2, 3f nop addiu t1, a2, -1 beqz t1, 2f nop 1: sra t0, a3, 16 /* t0 = vx >> 16 */ sll t0, t0, 2 /* t0 = t0 * 4 (a8r8g8b8) */ addu t0, a1, t0 lw t0, 0(t0) /* t0 = source (a8r8g8b8) */ addu a3, a3, t8 /* a3 = vx + unit_x */ sra t1, a3, 16 /* t0 = vx >> 16 */ sll t1, t1, 2 /* t0 = t0 * 4 (a8r8g8b8) */ addu t1, a1, t1 lw t1, 0(t1) /* t1 = source (a8r8g8b8) */ addu a3, a3, t8 /* a3 = vx + unit_x */ lw t2, 0(a0) /* t2 = destination (a8r8g8b8) */ lw t3, 4(a0) /* t3 = destination (a8r8g8b8) */ OVER_2x8888_2x8888 t0, t1, t2, t3, t4, t5, t6, t7, t9, s0, s1, s2, s3 sw t4, 0(a0) sw t5, 4(a0) addiu a2, a2, -2 addiu t1, a2, -1 bgtz t1, 1b addiu a0, a0, 8 2: beqz a2, 3f nop sra t0, a3, 16 /* t0 = vx >> 16 */ sll t0, t0, 2 /* t0 = t0 * 4 (a8r8g8b8) */ addu t0, a1, t0 lw t0, 0(t0) /* t0 = source (a8r8g8b8) */ lw t1, 0(a0) /* t1 = destination (a8r8g8b8) */ addu a3, a3, t8 /* a3 = vx + unit_x */ OVER_8888_8888 t0, t1, t2, t6, t4, t5, t3, t7 sw t2, 0(a0) 3: RESTORE_REGS_FROM_STACK 0, s0, s1, s2, s3 j ra nop END(pixman_scaled_nearest_scanline_8888_8888_OVER_asm_mips) LEAF_MIPS_DSPR2(pixman_scaled_nearest_scanline_8888_0565_OVER_asm_mips) /* * a0 - dst (r5g6b5) * a1 - src (a8r8g8b8) * a2 - w * a3 - vx * 16(sp) - unit_x */ SAVE_REGS_ON_STACK 24, s0, s1, s2, s3, s4, v0, v1 lw t8, 40(sp) /* t8 = unit_x */ li t4, 0x00ff00ff li t5, 0xf800f800 li t6, 0x07e007e0 li t7, 0x001F001F beqz a2, 3f nop addiu t1, a2, -1 beqz t1, 2f nop 1: sra t0, a3, 16 /* t0 = vx >> 16 */ sll t0, t0, 2 /* t0 = t0 * 4 (a8r8g8b8) */ addu t0, a1, t0 lw t0, 0(t0) /* t0 = source (a8r8g8b8) */ addu a3, a3, t8 /* a3 = vx + unit_x */ sra t1, a3, 16 /* t0 = vx >> 16 */ sll t1, t1, 2 /* t0 = t0 * 4 (a8r8g8b8) */ addu t1, a1, t1 lw t1, 0(t1) /* t1 = source (a8r8g8b8) */ addu a3, a3, t8 /* a3 = vx + unit_x */ lhu t2, 0(a0) /* t2 = destination (r5g6b5) */ lhu t3, 2(a0) /* t3 = destination (r5g6b5) */ CONVERT_2x0565_TO_2x8888 t2, t3, v0, v1, t6, t7, s0, s1, s2, s3 OVER_2x8888_2x8888 t0, t1, v0, v1, t2, t3, t4, t9, s0, s1, s2, s3, s4 CONVERT_2x8888_TO_2x0565 t2, t3, v0, v1, t5, t6, t7, t9, s2 sh v0, 0(a0) sh v1, 2(a0) addiu a2, a2, -2 addiu t1, a2, -1 bgtz t1, 1b addiu a0, a0, 4 2: beqz a2, 3f nop sra t0, a3, 16 /* t0 = vx >> 16 */ sll t0, t0, 2 /* t0 = t0 * 4 (a8r8g8b8) */ addu t0, a1, t0 lw t0, 0(t0) /* t0 = source (a8r8g8b8) */ lhu t1, 0(a0) /* t1 = destination (r5g6b5) */ addu a3, a3, t8 /* a3 = vx + unit_x */ CONVERT_1x0565_TO_1x8888 t1, t2, t5, t6 OVER_8888_8888 t0, t2, t1, t4, t3, t5, t6, t7 CONVERT_1x8888_TO_1x0565 t1, t2, t5, t6 sh t2, 0(a0) 3: RESTORE_REGS_FROM_STACK 24, s0, s1, s2, s3, s4, v0, v1 j ra nop END(pixman_scaled_nearest_scanline_8888_0565_OVER_asm_mips) LEAF_MIPS_DSPR2(pixman_scaled_nearest_scanline_0565_8888_SRC_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - src (r5g6b5) * a2 - w * a3 - vx * 16(sp) - unit_x */ SAVE_REGS_ON_STACK 0, v0 beqz a2, 3f nop lw v0, 16(sp) /* v0 = unit_x */ addiu t1, a2, -1 beqz t1, 2f nop li t4, 0x07e007e0 li t5, 0x001F001F 1: sra t0, a3, 16 /* t0 = vx >> 16 */ sll t0, t0, 1 /* t0 = t0 * 2 ((r5g6b5)) */ addu t0, a1, t0 lhu t0, 0(t0) /* t0 = source ((r5g6b5)) */ addu a3, a3, v0 /* a3 = vx + unit_x */ sra t1, a3, 16 /* t1 = vx >> 16 */ sll t1, t1, 1 /* t1 = t1 * 2 ((r5g6b5)) */ addu t1, a1, t1 lhu t1, 0(t1) /* t1 = source ((r5g6b5)) */ addu a3, a3, v0 /* a3 = vx + unit_x */ addiu a2, a2, -2 CONVERT_2x0565_TO_2x8888 t0, t1, t2, t3, t4, t5, t6, t7, t8, t9 sw t2, 0(a0) sw t3, 4(a0) addiu t2, a2, -1 bgtz t2, 1b addiu a0, a0, 8 2: beqz a2, 3f nop sra t0, a3, 16 /* t0 = vx >> 16 */ sll t0, t0, 1 /* t0 = t0 * 2 ((r5g6b5)) */ addu t0, a1, t0 lhu t0, 0(t0) /* t0 = source ((r5g6b5)) */ CONVERT_1x0565_TO_1x8888 t0, t1, t2, t3 sw t1, 0(a0) 3: RESTORE_REGS_FROM_STACK 0, v0 j ra nop END(pixman_scaled_nearest_scanline_0565_8888_SRC_asm_mips) LEAF_MIPS_DSPR2(pixman_scaled_nearest_scanline_8888_8_0565_OVER_asm_mips) /* * a0 - dst (r5g6b5) * a1 - src (a8r8g8b8) * a2 - mask (a8) * a3 - w * 16(sp) - vx * 20(sp) - unit_x */ beqz a3, 4f nop SAVE_REGS_ON_STACK 20, v0, v1, s0, s1, s2, s3, s4, s5 lw v0, 36(sp) /* v0 = vx */ lw v1, 40(sp) /* v1 = unit_x */ li t6, 0x00ff00ff li t7, 0xf800f800 li t8, 0x07e007e0 li t9, 0x001F001F addiu t1, a3, -1 beqz t1, 2f nop 1: sra t0, v0, 16 /* t0 = vx >> 16 */ sll t0, t0, 2 /* t0 = t0 * 4 (a8r8g8b8) */ addu t0, a1, t0 lw t0, 0(t0) /* t0 = source (a8r8g8b8) */ addu v0, v0, v1 /* v0 = vx + unit_x */ sra t1, v0, 16 /* t1 = vx >> 16 */ sll t1, t1, 2 /* t1 = t1 * 4 (a8r8g8b8) */ addu t1, a1, t1 lw t1, 0(t1) /* t1 = source (a8r8g8b8) */ addu v0, v0, v1 /* v0 = vx + unit_x */ lbu t2, 0(a2) /* t2 = mask (a8) */ lbu t3, 1(a2) /* t3 = mask (a8) */ lhu t4, 0(a0) /* t4 = destination (r5g6b5) */ lhu t5, 2(a0) /* t5 = destination (r5g6b5) */ addiu a2, a2, 2 CONVERT_2x0565_TO_2x8888 t4, t5, s0, s1, t8, t9, s2, s3, s4, s5 OVER_2x8888_2x8_2x8888 t0, t1, \ t2, t3, \ s0, s1, \ t4, t5, \ t6, s2, s3, s4, s5, t2, t3 CONVERT_2x8888_TO_2x0565 t4, t5, s0, s1, t7, t8, t9, s2, s3 sh s0, 0(a0) sh s1, 2(a0) addiu a3, a3, -2 addiu t1, a3, -1 bgtz t1, 1b addiu a0, a0, 4 2: beqz a3, 3f nop sra t0, v0, 16 /* t0 = vx >> 16 */ sll t0, t0, 2 /* t0 = t0 * 4 (a8r8g8b8) */ addu t0, a1, t0 lw t0, 0(t0) /* t0 = source (a8r8g8b8) */ lbu t1, 0(a2) /* t1 = mask (a8) */ lhu t2, 0(a0) /* t2 = destination (r5g6b5) */ CONVERT_1x0565_TO_1x8888 t2, t3, t4, t5 OVER_8888_8_8888 t0, t1, t3, t2, t6, t4, t5, t7, t8 CONVERT_1x8888_TO_1x0565 t2, t3, t4, t5 sh t3, 0(a0) 3: RESTORE_REGS_FROM_STACK 20, v0, v1, s0, s1, s2, s3, s4, s5 4: j ra nop END(pixman_scaled_nearest_scanline_8888_8_0565_OVER_asm_mips) LEAF_MIPS_DSPR2(pixman_scaled_nearest_scanline_0565_8_0565_OVER_asm_mips) /* * a0 - dst (r5g6b5) * a1 - src (r5g6b5) * a2 - mask (a8) * a3 - w * 16(sp) - vx * 20(sp) - unit_x */ beqz a3, 4f nop SAVE_REGS_ON_STACK 20, v0, v1, s0, s1, s2, s3, s4, s5 lw v0, 36(sp) /* v0 = vx */ lw v1, 40(sp) /* v1 = unit_x */ li t4, 0xf800f800 li t5, 0x07e007e0 li t6, 0x001F001F li t7, 0x00ff00ff addiu t1, a3, -1 beqz t1, 2f nop 1: sra t0, v0, 16 /* t0 = vx >> 16 */ sll t0, t0, 1 /* t0 = t0 * 2 (r5g6b5) */ addu t0, a1, t0 lhu t0, 0(t0) /* t0 = source (r5g6b5) */ addu v0, v0, v1 /* v0 = vx + unit_x */ sra t1, v0, 16 /* t1 = vx >> 16 */ sll t1, t1, 1 /* t1 = t1 * 2 (r5g6b5) */ addu t1, a1, t1 lhu t1, 0(t1) /* t1 = source (r5g6b5) */ addu v0, v0, v1 /* v0 = vx + unit_x */ lbu t2, 0(a2) /* t2 = mask (a8) */ lbu t3, 1(a2) /* t3 = mask (a8) */ lhu t8, 0(a0) /* t8 = destination (r5g6b5) */ lhu t9, 2(a0) /* t9 = destination (r5g6b5) */ addiu a2, a2, 2 CONVERT_2x0565_TO_2x8888 t0, t1, s0, s1, t5, t6, s2, s3, s4, s5 CONVERT_2x0565_TO_2x8888 t8, t9, s2, s3, t5, t6, s4, s5, t0, t1 OVER_2x8888_2x8_2x8888 s0, s1, \ t2, t3, \ s2, s3, \ t0, t1, \ t7, t8, t9, s4, s5, s0, s1 CONVERT_2x8888_TO_2x0565 t0, t1, s0, s1, t4, t5, t6, s2, s3 sh s0, 0(a0) sh s1, 2(a0) addiu a3, a3, -2 addiu t1, a3, -1 bgtz t1, 1b addiu a0, a0, 4 2: beqz a3, 3f nop sra t0, v0, 16 /* t0 = vx >> 16 */ sll t0, t0, 1 /* t0 = t0 * 2 (r5g6b5) */ addu t0, a1, t0 lhu t0, 0(t0) /* t0 = source (r5g6b5) */ lbu t1, 0(a2) /* t1 = mask (a8) */ lhu t2, 0(a0) /* t2 = destination (r5g6b5) */ CONVERT_1x0565_TO_1x8888 t0, t3, t4, t5 CONVERT_1x0565_TO_1x8888 t2, t4, t5, t6 OVER_8888_8_8888 t3, t1, t4, t0, t7, t2, t5, t6, t8 CONVERT_1x8888_TO_1x0565 t0, t3, t4, t5 sh t3, 0(a0) 3: RESTORE_REGS_FROM_STACK 20, v0, v1, s0, s1, s2, s3, s4, s5 4: j ra nop END(pixman_scaled_nearest_scanline_0565_8_0565_OVER_asm_mips) LEAF_MIPS_DSPR2(pixman_scaled_bilinear_scanline_8888_8888_SRC_asm_mips) /* * a0 - *dst * a1 - *src_top * a2 - *src_bottom * a3 - w * 16(sp) - wt * 20(sp) - wb * 24(sp) - vx * 28(sp) - unit_x */ beqz a3, 1f nop SAVE_REGS_ON_STACK 20, v0, s0, s1, s2, s3, s4, s5, s6, s7 lw s0, 36(sp) /* s0 = wt */ lw s1, 40(sp) /* s1 = wb */ lw s2, 44(sp) /* s2 = vx */ lw s3, 48(sp) /* s3 = unit_x */ li v0, BILINEAR_INTERPOLATION_RANGE sll s0, s0, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) sll s1, s1, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) 0: andi t4, s2, 0xffff /* t4 = (short)vx */ srl t4, t4, (16 - BILINEAR_INTERPOLATION_BITS) /* t4 = vx >> 8 */ subu t5, v0, t4 /* t5 = ( 256 - (vx>>8)) */ mul s4, s0, t5 /* s4 = wt*(256-(vx>>8)) */ mul s5, s0, t4 /* s5 = wt*(vx>>8) */ mul s6, s1, t5 /* s6 = wb*(256-(vx>>8)) */ mul s7, s1, t4 /* s7 = wb*(vx>>8) */ sra t9, s2, 16 sll t9, t9, 2 addiu t8, t9, 4 lwx t0, t9(a1) /* t0 = tl */ lwx t1, t8(a1) /* t1 = tr */ addiu a3, a3, -1 lwx t2, t9(a2) /* t2 = bl */ lwx t3, t8(a2) /* t3 = br */ BILINEAR_INTERPOLATE_SINGLE_PIXEL t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, s4, s5, s6, s7 addu s2, s2, s3 /* vx += unit_x; */ sw t0, 0(a0) bnez a3, 0b addiu a0, a0, 4 RESTORE_REGS_FROM_STACK 20, v0, s0, s1, s2, s3, s4, s5, s6, s7 1: j ra nop END(pixman_scaled_bilinear_scanline_8888_8888_SRC_asm_mips) LEAF_MIPS_DSPR2(pixman_scaled_bilinear_scanline_8888_0565_SRC_asm_mips) /* * a0 - *dst * a1 - *src_top * a2 - *src_bottom * a3 - w * 16(sp) - wt * 20(sp) - wb * 24(sp) - vx * 28(sp) - unit_x */ beqz a3, 1f nop SAVE_REGS_ON_STACK 20, v0, s0, s1, s2, s3, s4, s5, s6, s7 lw s0, 36(sp) /* s0 = wt */ lw s1, 40(sp) /* s1 = wb */ lw s2, 44(sp) /* s2 = vx */ lw s3, 48(sp) /* s3 = unit_x */ li v0, BILINEAR_INTERPOLATION_RANGE sll s0, s0, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) sll s1, s1, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) 0: andi t4, s2, 0xffff /* t4 = (short)vx */ srl t4, t4, (16 - BILINEAR_INTERPOLATION_BITS) /* t4 = vx >> 8 */ subu t5, v0, t4 /* t5 = ( 256 - (vx>>8)) */ mul s4, s0, t5 /* s4 = wt*(256-(vx>>8)) */ mul s5, s0, t4 /* s5 = wt*(vx>>8) */ mul s6, s1, t5 /* s6 = wb*(256-(vx>>8)) */ mul s7, s1, t4 /* s7 = wb*(vx>>8) */ sra t9, s2, 16 sll t9, t9, 2 addiu t8, t9, 4 lwx t0, t9(a1) /* t0 = tl */ lwx t1, t8(a1) /* t1 = tr */ addiu a3, a3, -1 lwx t2, t9(a2) /* t2 = bl */ lwx t3, t8(a2) /* t3 = br */ BILINEAR_INTERPOLATE_SINGLE_PIXEL t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, s4, s5, s6, s7 CONVERT_1x8888_TO_1x0565 t0, t1, t2, t3 addu s2, s2, s3 /* vx += unit_x; */ sh t1, 0(a0) bnez a3, 0b addiu a0, a0, 2 RESTORE_REGS_FROM_STACK 20, v0, s0, s1, s2, s3, s4, s5, s6, s7 1: j ra nop END(pixman_scaled_bilinear_scanline_8888_0565_SRC_asm_mips) LEAF_MIPS_DSPR2(pixman_scaled_bilinear_scanline_0565_8888_SRC_asm_mips) /* * a0 - *dst * a1 - *src_top * a2 - *src_bottom * a3 - w * 16(sp) - wt * 20(sp) - wb * 24(sp) - vx * 28(sp) - unit_x */ beqz a3, 1f nop SAVE_REGS_ON_STACK 28, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8 lw s0, 44(sp) /* s0 = wt */ lw s1, 48(sp) /* s1 = wb */ lw s2, 52(sp) /* s2 = vx */ lw s3, 56(sp) /* s3 = unit_x */ li v0, BILINEAR_INTERPOLATION_RANGE li v1, 0x07e007e0 li s8, 0x001f001f sll s0, s0, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) sll s1, s1, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) 0: andi t4, s2, 0xffff /* t4 = (short)vx */ srl t4, t4, (16 - BILINEAR_INTERPOLATION_BITS) /* t4 = vx >> 8 */ subu t5, v0, t4 /* t5 = ( 256 - (vx>>8)) */ mul s4, s0, t5 /* s4 = wt*(256-(vx>>8)) */ mul s5, s0, t4 /* s5 = wt*(vx>>8) */ mul s6, s1, t5 /* s6 = wb*(256-(vx>>8)) */ mul s7, s1, t4 /* s7 = wb*(vx>>8) */ sra t9, s2, 16 sll t9, t9, 1 addiu t8, t9, 2 lhx t0, t9(a1) /* t0 = tl */ lhx t1, t8(a1) /* t1 = tr */ andi t1, t1, 0xffff addiu a3, a3, -1 lhx t2, t9(a2) /* t2 = bl */ lhx t3, t8(a2) /* t3 = br */ andi t3, t3, 0xffff CONVERT_2x0565_TO_2x8888 t0, t1, t0, t1, v1, s8, t4, t5, t6, t7 CONVERT_2x0565_TO_2x8888 t2, t3, t2, t3, v1, s8, t4, t5, t6, t7 BILINEAR_INTERPOLATE_SINGLE_PIXEL t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, s4, s5, s6, s7 addu s2, s2, s3 /* vx += unit_x; */ sw t0, 0(a0) bnez a3, 0b addiu a0, a0, 4 RESTORE_REGS_FROM_STACK 28, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8 1: j ra nop END(pixman_scaled_bilinear_scanline_0565_8888_SRC_asm_mips) LEAF_MIPS_DSPR2(pixman_scaled_bilinear_scanline_0565_0565_SRC_asm_mips) /* * a0 - *dst * a1 - *src_top * a2 - *src_bottom * a3 - w * 16(sp) - wt * 20(sp) - wb * 24(sp) - vx * 28(sp) - unit_x */ beqz a3, 1f nop SAVE_REGS_ON_STACK 28, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8 lw s0, 44(sp) /* s0 = wt */ lw s1, 48(sp) /* s1 = wb */ lw s2, 52(sp) /* s2 = vx */ lw s3, 56(sp) /* s3 = unit_x */ li v0, BILINEAR_INTERPOLATION_RANGE li v1, 0x07e007e0 li s8, 0x001f001f sll s0, s0, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) sll s1, s1, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) 0: andi t4, s2, 0xffff /* t4 = (short)vx */ srl t4, t4, (16 - BILINEAR_INTERPOLATION_BITS) /* t4 = vx >> 8 */ subu t5, v0, t4 /* t5 = ( 256 - (vx>>8)) */ mul s4, s0, t5 /* s4 = wt*(256-(vx>>8)) */ mul s5, s0, t4 /* s5 = wt*(vx>>8) */ mul s6, s1, t5 /* s6 = wb*(256-(vx>>8)) */ mul s7, s1, t4 /* s7 = wb*(vx>>8) */ sra t9, s2, 16 sll t9, t9, 1 addiu t8, t9, 2 lhx t0, t9(a1) /* t0 = tl */ lhx t1, t8(a1) /* t1 = tr */ andi t1, t1, 0xffff addiu a3, a3, -1 lhx t2, t9(a2) /* t2 = bl */ lhx t3, t8(a2) /* t3 = br */ andi t3, t3, 0xffff CONVERT_2x0565_TO_2x8888 t0, t1, t0, t1, v1, s8, t4, t5, t6, t7 CONVERT_2x0565_TO_2x8888 t2, t3, t2, t3, v1, s8, t4, t5, t6, t7 BILINEAR_INTERPOLATE_SINGLE_PIXEL t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, s4, s5, s6, s7 CONVERT_1x8888_TO_1x0565 t0, t1, t2, t3 addu s2, s2, s3 /* vx += unit_x; */ sh t1, 0(a0) bnez a3, 0b addiu a0, a0, 2 RESTORE_REGS_FROM_STACK 28, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8 1: j ra nop END(pixman_scaled_bilinear_scanline_0565_0565_SRC_asm_mips) LEAF_MIPS_DSPR2(pixman_scaled_bilinear_scanline_8888_8888_OVER_asm_mips) /* * a0 - *dst * a1 - *src_top * a2 - *src_bottom * a3 - w * 16(sp) - wt * 20(sp) - wb * 24(sp) - vx * 28(sp) - unit_x */ beqz a3, 1f nop SAVE_REGS_ON_STACK 24, v0, s0, s1, s2, s3, s4, s5, s6, s7, s8 lw s0, 40(sp) /* s0 = wt */ lw s1, 44(sp) /* s1 = wb */ lw s2, 48(sp) /* s2 = vx */ lw s3, 52(sp) /* s3 = unit_x */ li v0, BILINEAR_INTERPOLATION_RANGE li s8, 0x00ff00ff sll s0, s0, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) sll s1, s1, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) 0: andi t4, s2, 0xffff /* t4 = (short)vx */ srl t4, t4, (16 - BILINEAR_INTERPOLATION_BITS) /* t4 = vx >> 8 */ subu t5, v0, t4 /* t5 = ( 256 - (vx>>8)) */ mul s4, s0, t5 /* s4 = wt*(256-(vx>>8)) */ mul s5, s0, t4 /* s5 = wt*(vx>>8) */ mul s6, s1, t5 /* s6 = wb*(256-(vx>>8)) */ mul s7, s1, t4 /* s7 = wb*(vx>>8) */ sra t9, s2, 16 sll t9, t9, 2 addiu t8, t9, 4 lwx t0, t9(a1) /* t0 = tl */ lwx t1, t8(a1) /* t1 = tr */ addiu a3, a3, -1 lwx t2, t9(a2) /* t2 = bl */ lwx t3, t8(a2) /* t3 = br */ BILINEAR_INTERPOLATE_SINGLE_PIXEL t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, s4, s5, s6, s7 lw t1, 0(a0) /* t1 = dest */ OVER_8888_8888 t0, t1, t2, s8, t3, t4, t5, t6 addu s2, s2, s3 /* vx += unit_x; */ sw t2, 0(a0) bnez a3, 0b addiu a0, a0, 4 RESTORE_REGS_FROM_STACK 24, v0, s0, s1, s2, s3, s4, s5, s6, s7, s8 1: j ra nop END(pixman_scaled_bilinear_scanline_8888_8888_OVER_asm_mips) LEAF_MIPS_DSPR2(pixman_scaled_bilinear_scanline_8888_8888_ADD_asm_mips) /* * a0 - *dst * a1 - *src_top * a2 - *src_bottom * a3 - w * 16(sp) - wt * 20(sp) - wb * 24(sp) - vx * 28(sp) - unit_x */ beqz a3, 1f nop SAVE_REGS_ON_STACK 20, v0, s0, s1, s2, s3, s4, s5, s6, s7 lw s0, 36(sp) /* s0 = wt */ lw s1, 40(sp) /* s1 = wb */ lw s2, 44(sp) /* s2 = vx */ lw s3, 48(sp) /* s3 = unit_x */ li v0, BILINEAR_INTERPOLATION_RANGE sll s0, s0, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) sll s1, s1, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) 0: andi t4, s2, 0xffff /* t4 = (short)vx */ srl t4, t4, (16 - BILINEAR_INTERPOLATION_BITS) /* t4 = vx >> 8 */ subu t5, v0, t4 /* t5 = ( 256 - (vx>>8)) */ mul s4, s0, t5 /* s4 = wt*(256-(vx>>8)) */ mul s5, s0, t4 /* s5 = wt*(vx>>8) */ mul s6, s1, t5 /* s6 = wb*(256-(vx>>8)) */ mul s7, s1, t4 /* s7 = wb*(vx>>8) */ sra t9, s2, 16 sll t9, t9, 2 addiu t8, t9, 4 lwx t0, t9(a1) /* t0 = tl */ lwx t1, t8(a1) /* t1 = tr */ addiu a3, a3, -1 lwx t2, t9(a2) /* t2 = bl */ lwx t3, t8(a2) /* t3 = br */ BILINEAR_INTERPOLATE_SINGLE_PIXEL t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, s4, s5, s6, s7 lw t1, 0(a0) addu_s.qb t2, t0, t1 addu s2, s2, s3 /* vx += unit_x; */ sw t2, 0(a0) bnez a3, 0b addiu a0, a0, 4 RESTORE_REGS_FROM_STACK 20, v0, s0, s1, s2, s3, s4, s5, s6, s7 1: j ra nop END(pixman_scaled_bilinear_scanline_8888_8888_ADD_asm_mips) LEAF_MIPS_DSPR2(pixman_scaled_bilinear_scanline_8888_8_8888_SRC_asm_mips) /* * a0 - *dst * a1 - *mask * a2 - *src_top * a3 - *src_bottom * 16(sp) - wt * 20(sp) - wb * 24(sp) - vx * 28(sp) - unit_x * 32(sp) - w */ lw v1, 32(sp) beqz v1, 1f nop SAVE_REGS_ON_STACK 28, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8 lw s0, 44(sp) /* s0 = wt */ lw s1, 48(sp) /* s1 = wb */ lw s2, 52(sp) /* s2 = vx */ lw s3, 56(sp) /* s3 = unit_x */ li v0, BILINEAR_INTERPOLATION_RANGE li s8, 0x00ff00ff sll s0, s0, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) sll s1, s1, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) 0: andi t4, s2, 0xffff /* t4 = (short)vx */ srl t4, t4, (16 - BILINEAR_INTERPOLATION_BITS) /* t4 = vx >> 8 */ subu t5, v0, t4 /* t5 = ( 256 - (vx>>8)) */ mul s4, s0, t5 /* s4 = wt*(256-(vx>>8)) */ mul s5, s0, t4 /* s5 = wt*(vx>>8) */ mul s6, s1, t5 /* s6 = wb*(256-(vx>>8)) */ mul s7, s1, t4 /* s7 = wb*(vx>>8) */ sra t9, s2, 16 sll t9, t9, 2 addiu t8, t9, 4 lwx t0, t9(a2) /* t0 = tl */ lwx t1, t8(a2) /* t1 = tr */ addiu v1, v1, -1 lwx t2, t9(a3) /* t2 = bl */ lwx t3, t8(a3) /* t3 = br */ BILINEAR_INTERPOLATE_SINGLE_PIXEL t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, s4, s5, s6, s7 lbu t1, 0(a1) /* t1 = mask */ addiu a1, a1, 1 MIPS_UN8x4_MUL_UN8 t0, t1, t0, s8, t2, t3, t4 addu s2, s2, s3 /* vx += unit_x; */ sw t0, 0(a0) bnez v1, 0b addiu a0, a0, 4 RESTORE_REGS_FROM_STACK 28, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8 1: j ra nop END(pixman_scaled_bilinear_scanline_8888_8_8888_SRC_asm_mips) LEAF_MIPS_DSPR2(pixman_scaled_bilinear_scanline_8888_8_0565_SRC_asm_mips) /* * a0 - *dst * a1 - *mask * a2 - *src_top * a3 - *src_bottom * 16(sp) - wt * 20(sp) - wb * 24(sp) - vx * 28(sp) - unit_x * 32(sp) - w */ lw v1, 32(sp) beqz v1, 1f nop SAVE_REGS_ON_STACK 28, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8 lw s0, 44(sp) /* s0 = wt */ lw s1, 48(sp) /* s1 = wb */ lw s2, 52(sp) /* s2 = vx */ lw s3, 56(sp) /* s3 = unit_x */ li v0, BILINEAR_INTERPOLATION_RANGE li s8, 0x00ff00ff sll s0, s0, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) sll s1, s1, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) 0: andi t4, s2, 0xffff /* t4 = (short)vx */ srl t4, t4, (16 - BILINEAR_INTERPOLATION_BITS) /* t4 = vx >> 8 */ subu t5, v0, t4 /* t5 = ( 256 - (vx>>8)) */ mul s4, s0, t5 /* s4 = wt*(256-(vx>>8)) */ mul s5, s0, t4 /* s5 = wt*(vx>>8) */ mul s6, s1, t5 /* s6 = wb*(256-(vx>>8)) */ mul s7, s1, t4 /* s7 = wb*(vx>>8) */ sra t9, s2, 16 sll t9, t9, 2 addiu t8, t9, 4 lwx t0, t9(a2) /* t0 = tl */ lwx t1, t8(a2) /* t1 = tr */ addiu v1, v1, -1 lwx t2, t9(a3) /* t2 = bl */ lwx t3, t8(a3) /* t3 = br */ BILINEAR_INTERPOLATE_SINGLE_PIXEL t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, s4, s5, s6, s7 lbu t1, 0(a1) /* t1 = mask */ addiu a1, a1, 1 MIPS_UN8x4_MUL_UN8 t0, t1, t0, s8, t2, t3, t4 CONVERT_1x8888_TO_1x0565 t0, t1, t2, t3 addu s2, s2, s3 /* vx += unit_x; */ sh t1, 0(a0) bnez v1, 0b addiu a0, a0, 2 RESTORE_REGS_FROM_STACK 28, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8 1: j ra nop END(pixman_scaled_bilinear_scanline_8888_8_0565_SRC_asm_mips) LEAF_MIPS_DSPR2(pixman_scaled_bilinear_scanline_0565_8_x888_SRC_asm_mips) /* * a0 - *dst * a1 - *mask * a2 - *src_top * a3 - *src_bottom * 16(sp) - wt * 20(sp) - wb * 24(sp) - vx * 28(sp) - unit_x * 32(sp) - w */ lw t0, 32(sp) beqz t0, 1f nop SAVE_REGS_ON_STACK 32, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8, ra lw s0, 48(sp) /* s0 = wt */ lw s1, 52(sp) /* s1 = wb */ lw s2, 56(sp) /* s2 = vx */ lw s3, 60(sp) /* s3 = unit_x */ lw ra, 64(sp) /* ra = w */ li v0, 0x00ff00ff li v1, 0x07e007e0 li s8, 0x001f001f sll s0, s0, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) sll s1, s1, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) 0: andi t4, s2, 0xffff /* t4 = (short)vx */ srl t4, t4, (16 - BILINEAR_INTERPOLATION_BITS) /* t4 = vx >> 8 */ li t5, BILINEAR_INTERPOLATION_RANGE subu t5, t5, t4 /* t5 = ( 256 - (vx>>8)) */ mul s4, s0, t5 /* s4 = wt*(256-(vx>>8)) */ mul s5, s0, t4 /* s5 = wt*(vx>>8) */ mul s6, s1, t5 /* s6 = wb*(256-(vx>>8)) */ mul s7, s1, t4 /* s7 = wb*(vx>>8) */ sra t9, s2, 16 sll t9, t9, 1 addiu t8, t9, 2 lhx t0, t9(a2) /* t0 = tl */ lhx t1, t8(a2) /* t1 = tr */ andi t1, t1, 0xffff addiu ra, ra, -1 lhx t2, t9(a3) /* t2 = bl */ lhx t3, t8(a3) /* t3 = br */ andi t3, t3, 0xffff CONVERT_2x0565_TO_2x8888 t0, t1, t0, t1, v1, s8, t4, t5, t6, t7 CONVERT_2x0565_TO_2x8888 t2, t3, t2, t3, v1, s8, t4, t5, t6, t7 BILINEAR_INTERPOLATE_SINGLE_PIXEL t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, s4, s5, s6, s7 lbu t1, 0(a1) /* t1 = mask */ addiu a1, a1, 1 MIPS_UN8x4_MUL_UN8 t0, t1, t0, v0, t2, t3, t4 addu s2, s2, s3 /* vx += unit_x; */ sw t0, 0(a0) bnez ra, 0b addiu a0, a0, 4 RESTORE_REGS_FROM_STACK 32, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8, ra 1: j ra nop END(pixman_scaled_bilinear_scanline_0565_8_x888_SRC_asm_mips) LEAF_MIPS_DSPR2(pixman_scaled_bilinear_scanline_0565_8_0565_SRC_asm_mips) /* * a0 - *dst * a1 - *mask * a2 - *src_top * a3 - *src_bottom * 16(sp) - wt * 20(sp) - wb * 24(sp) - vx * 28(sp) - unit_x * 32(sp) - w */ lw t0, 32(sp) beqz t0, 1f nop SAVE_REGS_ON_STACK 32, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8, ra lw s0, 48(sp) /* s0 = wt */ lw s1, 52(sp) /* s1 = wb */ lw s2, 56(sp) /* s2 = vx */ lw s3, 60(sp) /* s3 = unit_x */ lw ra, 64(sp) /* ra = w */ li v0, 0x00ff00ff li v1, 0x07e007e0 li s8, 0x001f001f sll s0, s0, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) sll s1, s1, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) 0: andi t4, s2, 0xffff /* t4 = (short)vx */ srl t4, t4, (16 - BILINEAR_INTERPOLATION_BITS) /* t4 = vx >> 8 */ li t5, BILINEAR_INTERPOLATION_RANGE subu t5, t5, t4 /* t5 = ( 256 - (vx>>8)) */ mul s4, s0, t5 /* s4 = wt*(256-(vx>>8)) */ mul s5, s0, t4 /* s5 = wt*(vx>>8) */ mul s6, s1, t5 /* s6 = wb*(256-(vx>>8)) */ mul s7, s1, t4 /* s7 = wb*(vx>>8) */ sra t9, s2, 16 sll t9, t9, 1 addiu t8, t9, 2 lhx t0, t9(a2) /* t0 = tl */ lhx t1, t8(a2) /* t1 = tr */ andi t1, t1, 0xffff addiu ra, ra, -1 lhx t2, t9(a3) /* t2 = bl */ lhx t3, t8(a3) /* t3 = br */ andi t3, t3, 0xffff CONVERT_2x0565_TO_2x8888 t0, t1, t0, t1, v1, s8, t4, t5, t6, t7 CONVERT_2x0565_TO_2x8888 t2, t3, t2, t3, v1, s8, t4, t5, t6, t7 BILINEAR_INTERPOLATE_SINGLE_PIXEL t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, s4, s5, s6, s7 lbu t1, 0(a1) /* t1 = mask */ addiu a1, a1, 1 MIPS_UN8x4_MUL_UN8 t0, t1, t0, v0, t2, t3, t4 CONVERT_1x8888_TO_1x0565 t0, t1, t2, t3 addu s2, s2, s3 /* vx += unit_x; */ sh t1, 0(a0) bnez ra, 0b addiu a0, a0, 2 RESTORE_REGS_FROM_STACK 32, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8, ra 1: j ra nop END(pixman_scaled_bilinear_scanline_0565_8_0565_SRC_asm_mips) LEAF_MIPS_DSPR2(pixman_scaled_bilinear_scanline_8888_8_8888_OVER_asm_mips) /* * a0 - dst (a8r8g8b8) * a1 - mask (a8) * a2 - src_top (a8r8g8b8) * a3 - src_bottom (a8r8g8b8) * 16(sp) - wt * 20(sp) - wb * 24(sp) - vx * 28(sp) - unit_x * 32(sp) - w */ SAVE_REGS_ON_STACK 28, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8 lw v1, 60(sp) /* v1 = w(sp + 32 + 28 save regs stack offset)*/ beqz v1, 1f nop lw s0, 44(sp) /* s0 = wt */ lw s1, 48(sp) /* s1 = wb */ lw s2, 52(sp) /* s2 = vx */ lw s3, 56(sp) /* s3 = unit_x */ li v0, BILINEAR_INTERPOLATION_RANGE li s8, 0x00ff00ff sll s0, s0, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) sll s1, s1, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) 0: andi t4, s2, 0xffff /* t4 = (short)vx */ srl t4, t4, (16 - BILINEAR_INTERPOLATION_BITS) /* t4 = vx >> 8 */ subu t5, v0, t4 /* t5 = ( 256 - (vx>>8)) */ mul s4, s0, t5 /* s4 = wt*(256-(vx>>8)) */ mul s5, s0, t4 /* s5 = wt*(vx>>8) */ mul s6, s1, t5 /* s6 = wb*(256-(vx>>8)) */ mul s7, s1, t4 /* s7 = wb*(vx>>8) */ sra t9, s2, 16 sll t9, t9, 2 addiu t8, t9, 4 lwx t0, t9(a2) /* t0 = tl */ lwx t1, t8(a2) /* t1 = tr */ addiu v1, v1, -1 lwx t2, t9(a3) /* t2 = bl */ lwx t3, t8(a3) /* t3 = br */ BILINEAR_INTERPOLATE_SINGLE_PIXEL t0, t1, t2, t3, \ t4, t5, t6, t7, t8, t9, s4, s5, s6, s7 lbu t1, 0(a1) /* t1 = mask */ lw t2, 0(a0) /* t2 = dst */ addiu a1, a1, 1 OVER_8888_8_8888 t0, t1, t2, t0, s8, t3, t4, t5, t6 addu s2, s2, s3 /* vx += unit_x; */ sw t0, 0(a0) bnez v1, 0b addiu a0, a0, 4 1: RESTORE_REGS_FROM_STACK 28, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8 j ra nop END(pixman_scaled_bilinear_scanline_8888_8_8888_OVER_asm_mips) LEAF_MIPS_DSPR2(pixman_scaled_bilinear_scanline_8888_8_8888_ADD_asm_mips) /* * a0 - *dst * a1 - *mask * a2 - *src_top * a3 - *src_bottom * 16(sp) - wt * 20(sp) - wb * 24(sp) - vx * 28(sp) - unit_x * 32(sp) - w */ lw v1, 32(sp) beqz v1, 1f nop SAVE_REGS_ON_STACK 28, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8 lw s0, 44(sp) /* s0 = wt */ lw s1, 48(sp) /* s1 = wb */ lw s2, 52(sp) /* s2 = vx */ lw s3, 56(sp) /* s3 = unit_x */ li v0, BILINEAR_INTERPOLATION_RANGE li s8, 0x00ff00ff sll s0, s0, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) sll s1, s1, (2 * (8 - BILINEAR_INTERPOLATION_BITS)) 0: andi t4, s2, 0xffff /* t4 = (short)vx */ srl t4, t4, (16 - BILINEAR_INTERPOLATION_BITS) /* t4 = vx >> 8 */ subu t5, v0, t4 /* t5 = ( 256 - (vx>>8)) */ mul s4, s0, t5 /* s4 = wt*(256-(vx>>8)) */ mul s5, s0, t4 /* s5 = wt*(vx>>8) */ mul s6, s1, t5 /* s6 = wb*(256-(vx>>8)) */ mul s7, s1, t4 /* s7 = wb*(vx>>8) */ sra t9, s2, 16 sll t9, t9, 2 addiu t8, t9, 4 lwx t0, t9(a2) /* t0 = tl */ lwx t1, t8(a2) /* t1 = tr */ addiu v1, v1, -1 lwx t2, t9(a3) /* t2 = bl */ lwx t3, t8(a3) /* t3 = br */ BILINEAR_INTERPOLATE_SINGLE_PIXEL t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, s4, s5, s6, s7 lbu t1, 0(a1) /* t1 = mask */ lw t2, 0(a0) /* t2 = dst */ addiu a1, a1, 1 MIPS_UN8x4_MUL_UN8_ADD_UN8x4 t0, t1, t2, t0, s8, t3, t4, t5 addu s2, s2, s3 /* vx += unit_x; */ sw t0, 0(a0) bnez v1, 0b addiu a0, a0, 4 RESTORE_REGS_FROM_STACK 28, v0, v1, s0, s1, s2, s3, s4, s5, s6, s7, s8 1: j ra nop END(pixman_scaled_bilinear_scanline_8888_8_8888_ADD_asm_mips) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-mips-dspr2-asm.h0000664000175000017500000006655514712446423020654 0ustar00mattst88mattst88/* * Copyright (c) 2012 * MIPS Technologies, Inc., California. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Author: Nemanja Lukic (nemanja.lukic@rt-rk.com) */ #ifndef PIXMAN_MIPS_DSPR2_ASM_H #define PIXMAN_MIPS_DSPR2_ASM_H #define zero $0 #define AT $1 #define v0 $2 #define v1 $3 #define a0 $4 #define a1 $5 #define a2 $6 #define a3 $7 #define t0 $8 #define t1 $9 #define t2 $10 #define t3 $11 #define t4 $12 #define t5 $13 #define t6 $14 #define t7 $15 #define s0 $16 #define s1 $17 #define s2 $18 #define s3 $19 #define s4 $20 #define s5 $21 #define s6 $22 #define s7 $23 #define t8 $24 #define t9 $25 #define k0 $26 #define k1 $27 #define gp $28 #define sp $29 #define fp $30 #define s8 $30 #define ra $31 /* * LEAF_MIPS32R2 - declare leaf routine for MIPS32r2 */ #define LEAF_MIPS32R2(symbol) \ .globl symbol; \ .align 2; \ .hidden symbol; \ .type symbol, @function; \ .ent symbol, 0; \ symbol: .frame sp, 0, ra; \ .set push; \ .set arch=mips32r2; \ .set noreorder; \ .set noat; /* * LEAF_MIPS32R2 - declare leaf routine for MIPS DSPr2 */ #define LEAF_MIPS_DSPR2(symbol) \ LEAF_MIPS32R2(symbol) \ .set dspr2; /* * END - mark end of function */ #define END(function) \ .set pop; \ .end function; \ .size function,.-function /* * Checks if stack offset is big enough for storing/restoring regs_num * number of register to/from stack. Stack offset must be greater than * or equal to the number of bytes needed for storing registers (regs_num*4). * Since MIPS ABI allows usage of first 16 bytes of stack frame (this is * preserved for input arguments of the functions, already stored in a0-a3), * stack size can be further optimized by utilizing this space. */ .macro CHECK_STACK_OFFSET regs_num, stack_offset .if \stack_offset < \regs_num * 4 - 16 .error "Stack offset too small." .endif .endm /* * Saves set of registers on stack. Maximum number of registers that * can be saved on stack is limitted to 14 (a0-a3, v0-v1 and s0-s7). * Stack offset is number of bytes that are added to stack pointer (sp) * before registers are pushed in order to provide enough space on stack * (offset must be multiple of 4, and must be big enough, as described by * CHECK_STACK_OFFSET macro). This macro is intended to be used in * combination with RESTORE_REGS_FROM_STACK macro. Example: * SAVE_REGS_ON_STACK 4, v0, v1, s0, s1 * RESTORE_REGS_FROM_STACK 4, v0, v1, s0, s1 */ .macro SAVE_REGS_ON_STACK stack_offset = 0, r1, \ r2 = 0, r3 = 0, r4 = 0, \ r5 = 0, r6 = 0, r7 = 0, \ r8 = 0, r9 = 0, r10 = 0, \ r11 = 0, r12 = 0, r13 = 0, \ r14 = 0 .if (\stack_offset < 0) || (\stack_offset - (\stack_offset / 4) * 4) .error "Stack offset must be pozitive and multiple of 4." .endif .if \stack_offset != 0 addiu sp, sp, -\stack_offset .endif sw \r1, 0(sp) .if \r2 != 0 sw \r2, 4(sp) .endif .if \r3 != 0 sw \r3, 8(sp) .endif .if \r4 != 0 sw \r4, 12(sp) .endif .if \r5 != 0 CHECK_STACK_OFFSET 5, \stack_offset sw \r5, 16(sp) .endif .if \r6 != 0 CHECK_STACK_OFFSET 6, \stack_offset sw \r6, 20(sp) .endif .if \r7 != 0 CHECK_STACK_OFFSET 7, \stack_offset sw \r7, 24(sp) .endif .if \r8 != 0 CHECK_STACK_OFFSET 8, \stack_offset sw \r8, 28(sp) .endif .if \r9 != 0 CHECK_STACK_OFFSET 9, \stack_offset sw \r9, 32(sp) .endif .if \r10 != 0 CHECK_STACK_OFFSET 10, \stack_offset sw \r10, 36(sp) .endif .if \r11 != 0 CHECK_STACK_OFFSET 11, \stack_offset sw \r11, 40(sp) .endif .if \r12 != 0 CHECK_STACK_OFFSET 12, \stack_offset sw \r12, 44(sp) .endif .if \r13 != 0 CHECK_STACK_OFFSET 13, \stack_offset sw \r13, 48(sp) .endif .if \r14 != 0 CHECK_STACK_OFFSET 14, \stack_offset sw \r14, 52(sp) .endif .endm /* * Restores set of registers from stack. Maximum number of registers that * can be restored from stack is limitted to 14 (a0-a3, v0-v1 and s0-s7). * Stack offset is number of bytes that are added to stack pointer (sp) * after registers are restored (offset must be multiple of 4, and must * be big enough, as described by CHECK_STACK_OFFSET macro). This macro is * intended to be used in combination with RESTORE_REGS_FROM_STACK macro. * Example: * SAVE_REGS_ON_STACK 4, v0, v1, s0, s1 * RESTORE_REGS_FROM_STACK 4, v0, v1, s0, s1 */ .macro RESTORE_REGS_FROM_STACK stack_offset = 0, r1, \ r2 = 0, r3 = 0, r4 = 0, \ r5 = 0, r6 = 0, r7 = 0, \ r8 = 0, r9 = 0, r10 = 0, \ r11 = 0, r12 = 0, r13 = 0, \ r14 = 0 .if (\stack_offset < 0) || (\stack_offset - (\stack_offset/4)*4) .error "Stack offset must be pozitive and multiple of 4." .endif lw \r1, 0(sp) .if \r2 != 0 lw \r2, 4(sp) .endif .if \r3 != 0 lw \r3, 8(sp) .endif .if \r4 != 0 lw \r4, 12(sp) .endif .if \r5 != 0 CHECK_STACK_OFFSET 5, \stack_offset lw \r5, 16(sp) .endif .if \r6 != 0 CHECK_STACK_OFFSET 6, \stack_offset lw \r6, 20(sp) .endif .if \r7 != 0 CHECK_STACK_OFFSET 7, \stack_offset lw \r7, 24(sp) .endif .if \r8 != 0 CHECK_STACK_OFFSET 8, \stack_offset lw \r8, 28(sp) .endif .if \r9 != 0 CHECK_STACK_OFFSET 9, \stack_offset lw \r9, 32(sp) .endif .if \r10 != 0 CHECK_STACK_OFFSET 10, \stack_offset lw \r10, 36(sp) .endif .if \r11 != 0 CHECK_STACK_OFFSET 11, \stack_offset lw \r11, 40(sp) .endif .if \r12 != 0 CHECK_STACK_OFFSET 12, \stack_offset lw \r12, 44(sp) .endif .if \r13 != 0 CHECK_STACK_OFFSET 13, \stack_offset lw \r13, 48(sp) .endif .if \r14 != 0 CHECK_STACK_OFFSET 14, \stack_offset lw \r14, 52(sp) .endif .if \stack_offset != 0 addiu sp, sp, \stack_offset .endif .endm /* * Conversion of single r5g6b5 pixel (in_565) to single a8r8g8b8 pixel * returned in (out_8888) register. Requires two temporary registers * (scratch1 and scratch2). */ .macro CONVERT_1x0565_TO_1x8888 in_565, \ out_8888, \ scratch1, scratch2 lui \out_8888, 0xff00 sll \scratch1, \in_565, 0x3 andi \scratch2, \scratch1, 0xff ext \scratch1, \in_565, 0x2, 0x3 or \scratch1, \scratch2, \scratch1 or \out_8888, \out_8888, \scratch1 sll \scratch1, \in_565, 0x5 andi \scratch1, \scratch1, 0xfc00 srl \scratch2, \in_565, 0x1 andi \scratch2, \scratch2, 0x300 or \scratch2, \scratch1, \scratch2 or \out_8888, \out_8888, \scratch2 andi \scratch1, \in_565, 0xf800 srl \scratch2, \scratch1, 0x5 andi \scratch2, \scratch2, 0xff00 or \scratch1, \scratch1, \scratch2 sll \scratch1, \scratch1, 0x8 or \out_8888, \out_8888, \scratch1 .endm /* * Conversion of two r5g6b5 pixels (in1_565 and in2_565) to two a8r8g8b8 pixels * returned in (out1_8888 and out2_8888) registers. Requires four scratch * registers (scratch1 ... scratch4). It also requires maskG and maskB for * color component extractions. These masks must have following values: * li maskG, 0x07e007e0 * li maskB, 0x001F001F */ .macro CONVERT_2x0565_TO_2x8888 in1_565, in2_565, \ out1_8888, out2_8888, \ maskG, maskB, \ scratch1, scratch2, scratch3, scratch4 sll \scratch1, \in1_565, 16 or \scratch1, \scratch1, \in2_565 lui \out2_8888, 0xff00 ori \out2_8888, \out2_8888, 0xff00 shrl.ph \scratch2, \scratch1, 11 and \scratch3, \scratch1, \maskG shra.ph \scratch4, \scratch2, 2 shll.ph \scratch2, \scratch2, 3 shll.ph \scratch3, \scratch3, 5 or \scratch2, \scratch2, \scratch4 shrl.qb \scratch4, \scratch3, 6 or \out2_8888, \out2_8888, \scratch2 or \scratch3, \scratch3, \scratch4 and \scratch1, \scratch1, \maskB shll.ph \scratch2, \scratch1, 3 shra.ph \scratch4, \scratch1, 2 or \scratch2, \scratch2, \scratch4 or \scratch3, \scratch2, \scratch3 precrq.ph.w \out1_8888, \out2_8888, \scratch3 precr_sra.ph.w \out2_8888, \scratch3, 0 .endm /* * Conversion of single a8r8g8b8 pixel (in_8888) to single r5g6b5 pixel * returned in (out_565) register. Requires two temporary registers * (scratch1 and scratch2). */ .macro CONVERT_1x8888_TO_1x0565 in_8888, \ out_565, \ scratch1, scratch2 ext \out_565, \in_8888, 0x3, 0x5 srl \scratch1, \in_8888, 0x5 andi \scratch1, \scratch1, 0x07e0 srl \scratch2, \in_8888, 0x8 andi \scratch2, \scratch2, 0xf800 or \out_565, \out_565, \scratch1 or \out_565, \out_565, \scratch2 .endm /* * Conversion of two a8r8g8b8 pixels (in1_8888 and in2_8888) to two r5g6b5 * pixels returned in (out1_565 and out2_565) registers. Requires two temporary * registers (scratch1 and scratch2). It also requires maskR, maskG and maskB * for color component extractions. These masks must have following values: * li maskR, 0xf800f800 * li maskG, 0x07e007e0 * li maskB, 0x001F001F * Value of input register in2_8888 is lost. */ .macro CONVERT_2x8888_TO_2x0565 in1_8888, in2_8888, \ out1_565, out2_565, \ maskR, maskG, maskB, \ scratch1, scratch2 precr.qb.ph \scratch1, \in2_8888, \in1_8888 precrq.qb.ph \in2_8888, \in2_8888, \in1_8888 and \out1_565, \scratch1, \maskR shrl.ph \scratch1, \scratch1, 3 shll.ph \in2_8888, \in2_8888, 3 and \scratch1, \scratch1, \maskB or \out1_565, \out1_565, \scratch1 and \in2_8888, \in2_8888, \maskG or \out1_565, \out1_565, \in2_8888 srl \out2_565, \out1_565, 16 .endm /* * Multiply pixel (a8) with single pixel (a8r8g8b8). It requires maskLSR needed * for rounding process. maskLSR must have following value: * li maskLSR, 0x00ff00ff */ .macro MIPS_UN8x4_MUL_UN8 s_8888, \ m_8, \ d_8888, \ maskLSR, \ scratch1, scratch2, scratch3 replv.ph \m_8, \m_8 /* 0 | M | 0 | M */ muleu_s.ph.qbl \scratch1, \s_8888, \m_8 /* A*M | R*M */ muleu_s.ph.qbr \scratch2, \s_8888, \m_8 /* G*M | B*M */ shra_r.ph \scratch3, \scratch1, 8 shra_r.ph \d_8888, \scratch2, 8 and \scratch3, \scratch3, \maskLSR /* 0 |A*M| 0 |R*M */ and \d_8888, \d_8888, \maskLSR /* 0 |G*M| 0 |B*M */ addq.ph \scratch1, \scratch1, \scratch3 /* A*M+A*M | R*M+R*M */ addq.ph \scratch2, \scratch2, \d_8888 /* G*M+G*M | B*M+B*M */ shra_r.ph \scratch1, \scratch1, 8 shra_r.ph \scratch2, \scratch2, 8 precr.qb.ph \d_8888, \scratch1, \scratch2 .endm /* * Multiply two pixels (a8) with two pixels (a8r8g8b8). It requires maskLSR * needed for rounding process. maskLSR must have following value: * li maskLSR, 0x00ff00ff */ .macro MIPS_2xUN8x4_MUL_2xUN8 s1_8888, \ s2_8888, \ m1_8, \ m2_8, \ d1_8888, \ d2_8888, \ maskLSR, \ scratch1, scratch2, scratch3, \ scratch4, scratch5, scratch6 replv.ph \m1_8, \m1_8 /* 0 | M1 | 0 | M1 */ replv.ph \m2_8, \m2_8 /* 0 | M2 | 0 | M2 */ muleu_s.ph.qbl \scratch1, \s1_8888, \m1_8 /* A1*M1 | R1*M1 */ muleu_s.ph.qbr \scratch2, \s1_8888, \m1_8 /* G1*M1 | B1*M1 */ muleu_s.ph.qbl \scratch3, \s2_8888, \m2_8 /* A2*M2 | R2*M2 */ muleu_s.ph.qbr \scratch4, \s2_8888, \m2_8 /* G2*M2 | B2*M2 */ shra_r.ph \scratch5, \scratch1, 8 shra_r.ph \d1_8888, \scratch2, 8 shra_r.ph \scratch6, \scratch3, 8 shra_r.ph \d2_8888, \scratch4, 8 and \scratch5, \scratch5, \maskLSR /* 0 |A1*M1| 0 |R1*M1 */ and \d1_8888, \d1_8888, \maskLSR /* 0 |G1*M1| 0 |B1*M1 */ and \scratch6, \scratch6, \maskLSR /* 0 |A2*M2| 0 |R2*M2 */ and \d2_8888, \d2_8888, \maskLSR /* 0 |G2*M2| 0 |B2*M2 */ addq.ph \scratch1, \scratch1, \scratch5 addq.ph \scratch2, \scratch2, \d1_8888 addq.ph \scratch3, \scratch3, \scratch6 addq.ph \scratch4, \scratch4, \d2_8888 shra_r.ph \scratch1, \scratch1, 8 shra_r.ph \scratch2, \scratch2, 8 shra_r.ph \scratch3, \scratch3, 8 shra_r.ph \scratch4, \scratch4, 8 precr.qb.ph \d1_8888, \scratch1, \scratch2 precr.qb.ph \d2_8888, \scratch3, \scratch4 .endm /* * Multiply pixel (a8r8g8b8) with single pixel (a8r8g8b8). It requires maskLSR * needed for rounding process. maskLSR must have following value: * li maskLSR, 0x00ff00ff */ .macro MIPS_UN8x4_MUL_UN8x4 s_8888, \ m_8888, \ d_8888, \ maskLSR, \ scratch1, scratch2, scratch3, scratch4 preceu.ph.qbl \scratch1, \m_8888 /* 0 | A | 0 | R */ preceu.ph.qbr \scratch2, \m_8888 /* 0 | G | 0 | B */ muleu_s.ph.qbl \scratch3, \s_8888, \scratch1 /* A*A | R*R */ muleu_s.ph.qbr \scratch4, \s_8888, \scratch2 /* G*G | B*B */ shra_r.ph \scratch1, \scratch3, 8 shra_r.ph \scratch2, \scratch4, 8 and \scratch1, \scratch1, \maskLSR /* 0 |A*A| 0 |R*R */ and \scratch2, \scratch2, \maskLSR /* 0 |G*G| 0 |B*B */ addq.ph \scratch1, \scratch1, \scratch3 addq.ph \scratch2, \scratch2, \scratch4 shra_r.ph \scratch1, \scratch1, 8 shra_r.ph \scratch2, \scratch2, 8 precr.qb.ph \d_8888, \scratch1, \scratch2 .endm /* * Multiply two pixels (a8r8g8b8) with two pixels (a8r8g8b8). It requires * maskLSR needed for rounding process. maskLSR must have following value: * li maskLSR, 0x00ff00ff */ .macro MIPS_2xUN8x4_MUL_2xUN8x4 s1_8888, \ s2_8888, \ m1_8888, \ m2_8888, \ d1_8888, \ d2_8888, \ maskLSR, \ scratch1, scratch2, scratch3, \ scratch4, scratch5, scratch6 preceu.ph.qbl \scratch1, \m1_8888 /* 0 | A | 0 | R */ preceu.ph.qbr \scratch2, \m1_8888 /* 0 | G | 0 | B */ preceu.ph.qbl \scratch3, \m2_8888 /* 0 | A | 0 | R */ preceu.ph.qbr \scratch4, \m2_8888 /* 0 | G | 0 | B */ muleu_s.ph.qbl \scratch5, \s1_8888, \scratch1 /* A*A | R*R */ muleu_s.ph.qbr \scratch6, \s1_8888, \scratch2 /* G*G | B*B */ muleu_s.ph.qbl \scratch1, \s2_8888, \scratch3 /* A*A | R*R */ muleu_s.ph.qbr \scratch2, \s2_8888, \scratch4 /* G*G | B*B */ shra_r.ph \scratch3, \scratch5, 8 shra_r.ph \scratch4, \scratch6, 8 shra_r.ph \d1_8888, \scratch1, 8 shra_r.ph \d2_8888, \scratch2, 8 and \scratch3, \scratch3, \maskLSR /* 0 |A*A| 0 |R*R */ and \scratch4, \scratch4, \maskLSR /* 0 |G*G| 0 |B*B */ and \d1_8888, \d1_8888, \maskLSR /* 0 |A*A| 0 |R*R */ and \d2_8888, \d2_8888, \maskLSR /* 0 |G*G| 0 |B*B */ addq.ph \scratch3, \scratch3, \scratch5 addq.ph \scratch4, \scratch4, \scratch6 addq.ph \d1_8888, \d1_8888, \scratch1 addq.ph \d2_8888, \d2_8888, \scratch2 shra_r.ph \scratch3, \scratch3, 8 shra_r.ph \scratch4, \scratch4, 8 shra_r.ph \scratch5, \d1_8888, 8 shra_r.ph \scratch6, \d2_8888, 8 precr.qb.ph \d1_8888, \scratch3, \scratch4 precr.qb.ph \d2_8888, \scratch5, \scratch6 .endm /* * OVER operation on single a8r8g8b8 source pixel (s_8888) and single a8r8g8b8 * destination pixel (d_8888) using a8 mask (m_8). It also requires maskLSR * needed for rounding process. maskLSR must have following value: * li maskLSR, 0x00ff00ff */ .macro OVER_8888_8_8888 s_8888, \ m_8, \ d_8888, \ out_8888, \ maskLSR, \ scratch1, scratch2, scratch3, scratch4 MIPS_UN8x4_MUL_UN8 \s_8888, \m_8, \ \scratch1, \maskLSR, \ \scratch2, \scratch3, \scratch4 not \scratch2, \scratch1 srl \scratch2, \scratch2, 24 MIPS_UN8x4_MUL_UN8 \d_8888, \scratch2, \ \d_8888, \maskLSR, \ \scratch3, \scratch4, \out_8888 addu_s.qb \out_8888, \d_8888, \scratch1 .endm /* * OVER operation on two a8r8g8b8 source pixels (s1_8888 and s2_8888) and two * a8r8g8b8 destination pixels (d1_8888 and d2_8888) using a8 masks (m1_8 and * m2_8). It also requires maskLSR needed for rounding process. maskLSR must * have following value: * li maskLSR, 0x00ff00ff */ .macro OVER_2x8888_2x8_2x8888 s1_8888, \ s2_8888, \ m1_8, \ m2_8, \ d1_8888, \ d2_8888, \ out1_8888, \ out2_8888, \ maskLSR, \ scratch1, scratch2, scratch3, \ scratch4, scratch5, scratch6 MIPS_2xUN8x4_MUL_2xUN8 \s1_8888, \s2_8888, \ \m1_8, \m2_8, \ \scratch1, \scratch2, \ \maskLSR, \ \scratch3, \scratch4, \out1_8888, \ \out2_8888, \scratch5, \scratch6 not \scratch3, \scratch1 srl \scratch3, \scratch3, 24 not \scratch4, \scratch2 srl \scratch4, \scratch4, 24 MIPS_2xUN8x4_MUL_2xUN8 \d1_8888, \d2_8888, \ \scratch3, \scratch4, \ \d1_8888, \d2_8888, \ \maskLSR, \ \scratch5, \scratch6, \out1_8888, \ \out2_8888, \scratch3, \scratch4 addu_s.qb \out1_8888, \d1_8888, \scratch1 addu_s.qb \out2_8888, \d2_8888, \scratch2 .endm /* * OVER operation on single a8r8g8b8 source pixel (s_8888) and single a8r8g8b8 * destination pixel (d_8888). It also requires maskLSR needed for rounding * process. maskLSR must have following value: * li maskLSR, 0x00ff00ff */ .macro OVER_8888_8888 s_8888, \ d_8888, \ out_8888, \ maskLSR, \ scratch1, scratch2, scratch3, scratch4 not \scratch1, \s_8888 srl \scratch1, \scratch1, 24 MIPS_UN8x4_MUL_UN8 \d_8888, \scratch1, \ \out_8888, \maskLSR, \ \scratch2, \scratch3, \scratch4 addu_s.qb \out_8888, \out_8888, \s_8888 .endm /* * OVER operation on two a8r8g8b8 source pixels (s1_8888 and s2_8888) and two * a8r8g8b8 destination pixels (d1_8888 and d2_8888). It also requires maskLSR * needed for rounding process. maskLSR must have following value: * li maskLSR, 0x00ff00ff */ .macro OVER_2x8888_2x8888 s1_8888, \ s2_8888, \ d1_8888, \ d2_8888, \ out1_8888, \ out2_8888, \ maskLSR, \ scratch1, scratch2, scratch3, \ scratch4, scratch5, scratch6 not \scratch1, \s1_8888 srl \scratch1, \scratch1, 24 not \scratch2, \s2_8888 srl \scratch2, \scratch2, 24 MIPS_2xUN8x4_MUL_2xUN8 \d1_8888, \d2_8888, \ \scratch1, \scratch2, \ \out1_8888, \out2_8888, \ \maskLSR, \ \scratch3, \scratch4, \scratch5, \ \scratch6, \d1_8888, \d2_8888 addu_s.qb \out1_8888, \out1_8888, \s1_8888 addu_s.qb \out2_8888, \out2_8888, \s2_8888 .endm .macro MIPS_UN8x4_MUL_UN8_ADD_UN8x4 s_8888, \ m_8, \ d_8888, \ out_8888, \ maskLSR, \ scratch1, scratch2, scratch3 MIPS_UN8x4_MUL_UN8 \s_8888, \m_8, \ \out_8888, \maskLSR, \ \scratch1, \scratch2, \scratch3 addu_s.qb \out_8888, \out_8888, \d_8888 .endm .macro MIPS_2xUN8x4_MUL_2xUN8_ADD_2xUN8x4 s1_8888, \ s2_8888, \ m1_8, \ m2_8, \ d1_8888, \ d2_8888, \ out1_8888, \ out2_8888, \ maskLSR, \ scratch1, scratch2, scratch3, \ scratch4, scratch5, scratch6 MIPS_2xUN8x4_MUL_2xUN8 \s1_8888, \s2_8888, \ \m1_8, \m2_8, \ \out1_8888, \out2_8888, \ \maskLSR, \ \scratch1, \scratch2, \scratch3, \ \scratch4, \scratch5, \scratch6 addu_s.qb \out1_8888, \out1_8888, \d1_8888 addu_s.qb \out2_8888, \out2_8888, \d2_8888 .endm .macro BILINEAR_INTERPOLATE_SINGLE_PIXEL tl, tr, bl, br, \ scratch1, scratch2, \ alpha, red, green, blue \ wt1, wt2, wb1, wb2 andi \scratch1, \tl, 0xff andi \scratch2, \tr, 0xff andi \alpha, \bl, 0xff andi \red, \br, 0xff multu $ac0, \wt1, \scratch1 maddu $ac0, \wt2, \scratch2 maddu $ac0, \wb1, \alpha maddu $ac0, \wb2, \red ext \scratch1, \tl, 8, 8 ext \scratch2, \tr, 8, 8 ext \alpha, \bl, 8, 8 ext \red, \br, 8, 8 multu $ac1, \wt1, \scratch1 maddu $ac1, \wt2, \scratch2 maddu $ac1, \wb1, \alpha maddu $ac1, \wb2, \red ext \scratch1, \tl, 16, 8 ext \scratch2, \tr, 16, 8 ext \alpha, \bl, 16, 8 ext \red, \br, 16, 8 mflo \blue, $ac0 multu $ac2, \wt1, \scratch1 maddu $ac2, \wt2, \scratch2 maddu $ac2, \wb1, \alpha maddu $ac2, \wb2, \red ext \scratch1, \tl, 24, 8 ext \scratch2, \tr, 24, 8 ext \alpha, \bl, 24, 8 ext \red, \br, 24, 8 mflo \green, $ac1 multu $ac3, \wt1, \scratch1 maddu $ac3, \wt2, \scratch2 maddu $ac3, \wb1, \alpha maddu $ac3, \wb2, \red mflo \red, $ac2 mflo \alpha, $ac3 precr.qb.ph \alpha, \alpha, \red precr.qb.ph \scratch1, \green, \blue precrq.qb.ph \tl, \alpha, \scratch1 .endm #endif //PIXMAN_MIPS_DSPR2_ASM_H ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-mips-dspr2.c0000664000175000017500000006134514712446423020061 0ustar00mattst88mattst88/* * Copyright (c) 2012 * MIPS Technologies, Inc., California. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Author: Nemanja Lukic (nemanja.lukic@rt-rk.com) */ #ifdef HAVE_CONFIG_H #include #endif #include "pixman-private.h" #include "pixman-mips-dspr2.h" PIXMAN_MIPS_BIND_FAST_PATH_SRC_DST (0, src_x888_8888, uint32_t, 1, uint32_t, 1) PIXMAN_MIPS_BIND_FAST_PATH_SRC_DST (0, src_8888_0565, uint32_t, 1, uint16_t, 1) PIXMAN_MIPS_BIND_FAST_PATH_SRC_DST (0, src_0565_8888, uint16_t, 1, uint32_t, 1) PIXMAN_MIPS_BIND_FAST_PATH_SRC_DST (DO_FAST_MEMCPY, src_0565_0565, uint16_t, 1, uint16_t, 1) PIXMAN_MIPS_BIND_FAST_PATH_SRC_DST (DO_FAST_MEMCPY, src_8888_8888, uint32_t, 1, uint32_t, 1) PIXMAN_MIPS_BIND_FAST_PATH_SRC_DST (DO_FAST_MEMCPY, src_0888_0888, uint8_t, 3, uint8_t, 3) #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) PIXMAN_MIPS_BIND_FAST_PATH_SRC_DST (0, src_0888_8888_rev, uint8_t, 3, uint32_t, 1) PIXMAN_MIPS_BIND_FAST_PATH_SRC_DST (0, src_0888_0565_rev, uint8_t, 3, uint16_t, 1) #endif PIXMAN_MIPS_BIND_FAST_PATH_SRC_DST (0, src_pixbuf_8888, uint32_t, 1, uint32_t, 1) PIXMAN_MIPS_BIND_FAST_PATH_SRC_DST (0, src_rpixbuf_8888, uint32_t, 1, uint32_t, 1) PIXMAN_MIPS_BIND_FAST_PATH_SRC_DST (0, over_8888_8888, uint32_t, 1, uint32_t, 1) PIXMAN_MIPS_BIND_FAST_PATH_SRC_DST (0, over_8888_0565, uint32_t, 1, uint16_t, 1) PIXMAN_MIPS_BIND_FAST_PATH_SRC_DST (0, add_8_8, uint8_t, 1, uint8_t, 1) PIXMAN_MIPS_BIND_FAST_PATH_SRC_DST (0, add_8888_8888, uint32_t, 1, uint32_t, 1) PIXMAN_MIPS_BIND_FAST_PATH_SRC_DST (0, out_reverse_8_0565, uint8_t, 1, uint16_t, 1) PIXMAN_MIPS_BIND_FAST_PATH_SRC_DST (0, out_reverse_8_8888, uint8_t, 1, uint32_t, 1) PIXMAN_MIPS_BIND_FAST_PATH_N_MASK_DST (0, src_n_8_8888, uint8_t, 1, uint32_t, 1) PIXMAN_MIPS_BIND_FAST_PATH_N_MASK_DST (0, src_n_8_8, uint8_t, 1, uint8_t, 1) PIXMAN_MIPS_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, over_n_8888_8888_ca, uint32_t, 1, uint32_t, 1) PIXMAN_MIPS_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, over_n_8888_0565_ca, uint32_t, 1, uint16_t, 1) PIXMAN_MIPS_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, over_n_8_8, uint8_t, 1, uint8_t, 1) PIXMAN_MIPS_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, over_n_8_8888, uint8_t, 1, uint32_t, 1) PIXMAN_MIPS_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, over_n_8_0565, uint8_t, 1, uint16_t, 1) PIXMAN_MIPS_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, add_n_8_8, uint8_t, 1, uint8_t, 1) PIXMAN_MIPS_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, add_n_8_8888, uint8_t, 1, uint32_t, 1) PIXMAN_MIPS_BIND_FAST_PATH_SRC_N_DST (SKIP_ZERO_MASK, over_8888_n_8888, uint32_t, 1, uint32_t, 1) PIXMAN_MIPS_BIND_FAST_PATH_SRC_N_DST (SKIP_ZERO_MASK, over_8888_n_0565, uint32_t, 1, uint16_t, 1) PIXMAN_MIPS_BIND_FAST_PATH_SRC_N_DST (SKIP_ZERO_MASK, over_0565_n_0565, uint16_t, 1, uint16_t, 1) PIXMAN_MIPS_BIND_FAST_PATH_SRC_N_DST (SKIP_ZERO_MASK, add_8888_n_8888, uint32_t, 1, uint32_t, 1) PIXMAN_MIPS_BIND_FAST_PATH_N_DST (SKIP_ZERO_SRC, over_n_0565, uint16_t, 1) PIXMAN_MIPS_BIND_FAST_PATH_N_DST (SKIP_ZERO_SRC, over_n_8888, uint32_t, 1) PIXMAN_MIPS_BIND_FAST_PATH_N_DST (SKIP_ZERO_SRC, over_reverse_n_8888, uint32_t, 1) PIXMAN_MIPS_BIND_FAST_PATH_N_DST (0, in_n_8, uint8_t, 1) PIXMAN_MIPS_BIND_FAST_PATH_SRC_MASK_DST (add_8_8_8, uint8_t, 1, uint8_t, 1, uint8_t, 1) PIXMAN_MIPS_BIND_FAST_PATH_SRC_MASK_DST (add_8888_8_8888, uint32_t, 1, uint8_t, 1, uint32_t, 1) PIXMAN_MIPS_BIND_FAST_PATH_SRC_MASK_DST (add_8888_8888_8888, uint32_t, 1, uint32_t, 1, uint32_t, 1) PIXMAN_MIPS_BIND_FAST_PATH_SRC_MASK_DST (add_0565_8_0565, uint16_t, 1, uint8_t, 1, uint16_t, 1) PIXMAN_MIPS_BIND_FAST_PATH_SRC_MASK_DST (over_8888_8_8888, uint32_t, 1, uint8_t, 1, uint32_t, 1) PIXMAN_MIPS_BIND_FAST_PATH_SRC_MASK_DST (over_8888_8_0565, uint32_t, 1, uint8_t, 1, uint16_t, 1) PIXMAN_MIPS_BIND_FAST_PATH_SRC_MASK_DST (over_0565_8_0565, uint16_t, 1, uint8_t, 1, uint16_t, 1) PIXMAN_MIPS_BIND_FAST_PATH_SRC_MASK_DST (over_8888_8888_8888, uint32_t, 1, uint32_t, 1, uint32_t, 1) PIXMAN_MIPS_BIND_SCALED_NEAREST_SRC_DST (8888_8888, OVER, uint32_t, uint32_t) PIXMAN_MIPS_BIND_SCALED_NEAREST_SRC_DST (8888_0565, OVER, uint32_t, uint16_t) PIXMAN_MIPS_BIND_SCALED_NEAREST_SRC_DST (0565_8888, SRC, uint16_t, uint32_t) PIXMAN_MIPS_BIND_SCALED_BILINEAR_SRC_DST (0, 8888_8888, SRC, uint32_t, uint32_t) PIXMAN_MIPS_BIND_SCALED_BILINEAR_SRC_DST (0, 8888_0565, SRC, uint32_t, uint16_t) PIXMAN_MIPS_BIND_SCALED_BILINEAR_SRC_DST (0, 0565_8888, SRC, uint16_t, uint32_t) PIXMAN_MIPS_BIND_SCALED_BILINEAR_SRC_DST (0, 0565_0565, SRC, uint16_t, uint16_t) PIXMAN_MIPS_BIND_SCALED_BILINEAR_SRC_DST (SKIP_ZERO_SRC, 8888_8888, OVER, uint32_t, uint32_t) PIXMAN_MIPS_BIND_SCALED_BILINEAR_SRC_DST (SKIP_ZERO_SRC, 8888_8888, ADD, uint32_t, uint32_t) PIXMAN_MIPS_BIND_SCALED_NEAREST_SRC_A8_DST (SKIP_ZERO_SRC, 8888_8_0565, OVER, uint32_t, uint16_t) PIXMAN_MIPS_BIND_SCALED_NEAREST_SRC_A8_DST (SKIP_ZERO_SRC, 0565_8_0565, OVER, uint16_t, uint16_t) PIXMAN_MIPS_BIND_SCALED_BILINEAR_SRC_A8_DST (0, 8888_8_8888, SRC, uint32_t, uint32_t) PIXMAN_MIPS_BIND_SCALED_BILINEAR_SRC_A8_DST (0, 8888_8_0565, SRC, uint32_t, uint16_t) PIXMAN_MIPS_BIND_SCALED_BILINEAR_SRC_A8_DST (0, 0565_8_x888, SRC, uint16_t, uint32_t) PIXMAN_MIPS_BIND_SCALED_BILINEAR_SRC_A8_DST (0, 0565_8_0565, SRC, uint16_t, uint16_t) PIXMAN_MIPS_BIND_SCALED_BILINEAR_SRC_A8_DST (SKIP_ZERO_SRC, 8888_8_8888, OVER, uint32_t, uint32_t) PIXMAN_MIPS_BIND_SCALED_BILINEAR_SRC_A8_DST (SKIP_ZERO_SRC, 8888_8_8888, ADD, uint32_t, uint32_t) static pixman_bool_t mips_dspr2_fill (pixman_implementation_t *imp, uint32_t * bits, int stride, int bpp, int x, int y, int width, int height, uint32_t _xor) { uint8_t *byte_line; uint32_t byte_width; switch (bpp) { case 16: stride = stride * (int) sizeof (uint32_t) / 2; byte_line = (uint8_t *)(((uint16_t *)bits) + stride * y + x); byte_width = width * 2; stride *= 2; while (height--) { uint8_t *dst = byte_line; byte_line += stride; pixman_fill_buff16_mips (dst, byte_width, _xor & 0xffff); } return TRUE; case 32: stride = stride * (int) sizeof (uint32_t) / 4; byte_line = (uint8_t *)(((uint32_t *)bits) + stride * y + x); byte_width = width * 4; stride *= 4; while (height--) { uint8_t *dst = byte_line; byte_line += stride; pixman_fill_buff32_mips (dst, byte_width, _xor); } return TRUE; default: return FALSE; } } static pixman_bool_t mips_dspr2_blt (pixman_implementation_t *imp, uint32_t * src_bits, uint32_t * dst_bits, int src_stride, int dst_stride, int src_bpp, int dst_bpp, int src_x, int src_y, int dest_x, int dest_y, int width, int height) { if (src_bpp != dst_bpp) return FALSE; uint8_t *src_bytes; uint8_t *dst_bytes; uint32_t byte_width; switch (src_bpp) { case 16: src_stride = src_stride * (int) sizeof (uint32_t) / 2; dst_stride = dst_stride * (int) sizeof (uint32_t) / 2; src_bytes =(uint8_t *)(((uint16_t *)src_bits) + src_stride * (src_y) + (src_x)); dst_bytes = (uint8_t *)(((uint16_t *)dst_bits) + dst_stride * (dest_y) + (dest_x)); byte_width = width * 2; src_stride *= 2; dst_stride *= 2; while (height--) { uint8_t *src = src_bytes; uint8_t *dst = dst_bytes; src_bytes += src_stride; dst_bytes += dst_stride; pixman_mips_fast_memcpy (dst, src, byte_width); } return TRUE; case 32: src_stride = src_stride * (int) sizeof (uint32_t) / 4; dst_stride = dst_stride * (int) sizeof (uint32_t) / 4; src_bytes = (uint8_t *)(((uint32_t *)src_bits) + src_stride * (src_y) + (src_x)); dst_bytes = (uint8_t *)(((uint32_t *)dst_bits) + dst_stride * (dest_y) + (dest_x)); byte_width = width * 4; src_stride *= 4; dst_stride *= 4; while (height--) { uint8_t *src = src_bytes; uint8_t *dst = dst_bytes; src_bytes += src_stride; dst_bytes += dst_stride; pixman_mips_fast_memcpy (dst, src, byte_width); } return TRUE; default: return FALSE; } } static const pixman_fast_path_t mips_dspr2_fast_paths[] = { PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, r5g6b5, mips_composite_src_0565_0565), PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, b5g6r5, mips_composite_src_0565_0565), PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, r5g6b5, mips_composite_src_8888_0565), PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, r5g6b5, mips_composite_src_8888_0565), PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, b5g6r5, mips_composite_src_8888_0565), PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, b5g6r5, mips_composite_src_8888_0565), PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, a8r8g8b8, mips_composite_src_0565_8888), PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, x8r8g8b8, mips_composite_src_0565_8888), PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, a8b8g8r8, mips_composite_src_0565_8888), PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, x8b8g8r8, mips_composite_src_0565_8888), PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, x8r8g8b8, mips_composite_src_8888_8888), PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, x8r8g8b8, mips_composite_src_8888_8888), PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, x8b8g8r8, mips_composite_src_8888_8888), PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, x8b8g8r8, mips_composite_src_8888_8888), PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, a8r8g8b8, mips_composite_src_8888_8888), PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, a8b8g8r8, mips_composite_src_8888_8888), PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, a8r8g8b8, mips_composite_src_x888_8888), PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, a8b8g8r8, mips_composite_src_x888_8888), PIXMAN_STD_FAST_PATH (SRC, r8g8b8, null, r8g8b8, mips_composite_src_0888_0888), #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) PIXMAN_STD_FAST_PATH (SRC, b8g8r8, null, x8r8g8b8, mips_composite_src_0888_8888_rev), PIXMAN_STD_FAST_PATH (SRC, b8g8r8, null, r5g6b5, mips_composite_src_0888_0565_rev), #endif PIXMAN_STD_FAST_PATH (SRC, pixbuf, pixbuf, a8r8g8b8, mips_composite_src_pixbuf_8888), PIXMAN_STD_FAST_PATH (SRC, pixbuf, pixbuf, a8b8g8r8, mips_composite_src_rpixbuf_8888), PIXMAN_STD_FAST_PATH (SRC, rpixbuf, rpixbuf, a8r8g8b8, mips_composite_src_rpixbuf_8888), PIXMAN_STD_FAST_PATH (SRC, rpixbuf, rpixbuf, a8b8g8r8, mips_composite_src_pixbuf_8888), PIXMAN_STD_FAST_PATH (SRC, solid, a8, a8r8g8b8, mips_composite_src_n_8_8888), PIXMAN_STD_FAST_PATH (SRC, solid, a8, x8r8g8b8, mips_composite_src_n_8_8888), PIXMAN_STD_FAST_PATH (SRC, solid, a8, a8b8g8r8, mips_composite_src_n_8_8888), PIXMAN_STD_FAST_PATH (SRC, solid, a8, x8b8g8r8, mips_composite_src_n_8_8888), PIXMAN_STD_FAST_PATH (SRC, solid, a8, a8, mips_composite_src_n_8_8), PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, a8r8g8b8, mips_composite_over_n_8888_8888_ca), PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, x8r8g8b8, mips_composite_over_n_8888_8888_ca), PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, a8b8g8r8, mips_composite_over_n_8888_8888_ca), PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, x8b8g8r8, mips_composite_over_n_8888_8888_ca), PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, r5g6b5, mips_composite_over_n_8888_0565_ca), PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, b5g6r5, mips_composite_over_n_8888_0565_ca), PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8, mips_composite_over_n_8_8), PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8r8g8b8, mips_composite_over_n_8_8888), PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8r8g8b8, mips_composite_over_n_8_8888), PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8b8g8r8, mips_composite_over_n_8_8888), PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8b8g8r8, mips_composite_over_n_8_8888), PIXMAN_STD_FAST_PATH (OVER, solid, a8, r5g6b5, mips_composite_over_n_8_0565), PIXMAN_STD_FAST_PATH (OVER, solid, a8, b5g6r5, mips_composite_over_n_8_0565), PIXMAN_STD_FAST_PATH (OVER, solid, null, r5g6b5, mips_composite_over_n_0565), PIXMAN_STD_FAST_PATH (OVER, solid, null, a8r8g8b8, mips_composite_over_n_8888), PIXMAN_STD_FAST_PATH (OVER, solid, null, x8r8g8b8, mips_composite_over_n_8888), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, a8r8g8b8, mips_composite_over_8888_n_8888), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, x8r8g8b8, mips_composite_over_8888_n_8888), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, r5g6b5, mips_composite_over_8888_n_0565), PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, solid, b5g6r5, mips_composite_over_8888_n_0565), PIXMAN_STD_FAST_PATH (OVER, r5g6b5, solid, r5g6b5, mips_composite_over_0565_n_0565), PIXMAN_STD_FAST_PATH (OVER, b5g6r5, solid, b5g6r5, mips_composite_over_0565_n_0565), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, a8, a8r8g8b8, mips_composite_over_8888_8_8888), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, a8, x8r8g8b8, mips_composite_over_8888_8_8888), PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, a8, a8b8g8r8, mips_composite_over_8888_8_8888), PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, a8, x8b8g8r8, mips_composite_over_8888_8_8888), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, a8, r5g6b5, mips_composite_over_8888_8_0565), PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, a8, b5g6r5, mips_composite_over_8888_8_0565), PIXMAN_STD_FAST_PATH (OVER, r5g6b5, a8, r5g6b5, mips_composite_over_0565_8_0565), PIXMAN_STD_FAST_PATH (OVER, b5g6r5, a8, b5g6r5, mips_composite_over_0565_8_0565), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, a8r8g8b8, mips_composite_over_8888_8888_8888), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, a8r8g8b8, mips_composite_over_8888_8888), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, x8r8g8b8, mips_composite_over_8888_8888), PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, a8b8g8r8, mips_composite_over_8888_8888), PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, x8b8g8r8, mips_composite_over_8888_8888), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, r5g6b5, mips_composite_over_8888_0565), PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, b5g6r5, mips_composite_over_8888_0565), PIXMAN_STD_FAST_PATH (ADD, solid, a8, a8, mips_composite_add_n_8_8), PIXMAN_STD_FAST_PATH (ADD, solid, a8, a8r8g8b8, mips_composite_add_n_8_8888), PIXMAN_STD_FAST_PATH (ADD, solid, a8, a8b8g8r8, mips_composite_add_n_8_8888), PIXMAN_STD_FAST_PATH (ADD, a8, a8, a8, mips_composite_add_8_8_8), PIXMAN_STD_FAST_PATH (ADD, r5g6b5, a8, r5g6b5, mips_composite_add_0565_8_0565), PIXMAN_STD_FAST_PATH (ADD, b5g6r5, a8, b5g6r5, mips_composite_add_0565_8_0565), PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, a8, a8r8g8b8, mips_composite_add_8888_8_8888), PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, a8, a8b8g8r8, mips_composite_add_8888_8_8888), PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, a8r8g8b8, a8r8g8b8, mips_composite_add_8888_8888_8888), PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, solid, a8r8g8b8, mips_composite_add_8888_n_8888), PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, solid, a8b8g8r8, mips_composite_add_8888_n_8888), PIXMAN_STD_FAST_PATH (ADD, a8, null, a8, mips_composite_add_8_8), PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, null, a8r8g8b8, mips_composite_add_8888_8888), PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, null, a8b8g8r8, mips_composite_add_8888_8888), PIXMAN_STD_FAST_PATH (OUT_REVERSE, a8, null, r5g6b5, mips_composite_out_reverse_8_0565), PIXMAN_STD_FAST_PATH (OUT_REVERSE, a8, null, b5g6r5, mips_composite_out_reverse_8_0565), PIXMAN_STD_FAST_PATH (OUT_REVERSE, a8, null, a8r8g8b8, mips_composite_out_reverse_8_8888), PIXMAN_STD_FAST_PATH (OUT_REVERSE, a8, null, a8b8g8r8, mips_composite_out_reverse_8_8888), PIXMAN_STD_FAST_PATH (OVER_REVERSE, solid, null, a8r8g8b8, mips_composite_over_reverse_n_8888), PIXMAN_STD_FAST_PATH (OVER_REVERSE, solid, null, a8b8g8r8, mips_composite_over_reverse_n_8888), PIXMAN_STD_FAST_PATH (IN, solid, null, a8, mips_composite_in_n_8), PIXMAN_MIPS_SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, mips_8888_8888), PIXMAN_MIPS_SIMPLE_NEAREST_FAST_PATH (OVER, a8b8g8r8, a8b8g8r8, mips_8888_8888), PIXMAN_MIPS_SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8, mips_8888_8888), PIXMAN_MIPS_SIMPLE_NEAREST_FAST_PATH (OVER, a8b8g8r8, x8b8g8r8, mips_8888_8888), PIXMAN_MIPS_SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, r5g6b5, mips_8888_0565), PIXMAN_MIPS_SIMPLE_NEAREST_FAST_PATH (OVER, a8b8g8r8, b5g6r5, mips_8888_0565), PIXMAN_MIPS_SIMPLE_NEAREST_FAST_PATH (SRC, b5g6r5, x8b8g8r8, mips_0565_8888), PIXMAN_MIPS_SIMPLE_NEAREST_FAST_PATH (SRC, r5g6b5, x8r8g8b8, mips_0565_8888), /* Note: NONE repeat is not supported yet */ SIMPLE_NEAREST_FAST_PATH_COVER (SRC, r5g6b5, a8r8g8b8, mips_0565_8888), SIMPLE_NEAREST_FAST_PATH_COVER (SRC, b5g6r5, a8b8g8r8, mips_0565_8888), SIMPLE_NEAREST_FAST_PATH_PAD (SRC, r5g6b5, a8r8g8b8, mips_0565_8888), SIMPLE_NEAREST_FAST_PATH_PAD (SRC, b5g6r5, a8b8g8r8, mips_0565_8888), SIMPLE_NEAREST_A8_MASK_FAST_PATH (OVER, a8r8g8b8, r5g6b5, mips_8888_8_0565), SIMPLE_NEAREST_A8_MASK_FAST_PATH (OVER, a8b8g8r8, b5g6r5, mips_8888_8_0565), SIMPLE_NEAREST_A8_MASK_FAST_PATH (OVER, r5g6b5, r5g6b5, mips_0565_8_0565), SIMPLE_NEAREST_A8_MASK_FAST_PATH (OVER, b5g6r5, b5g6r5, mips_0565_8_0565), SIMPLE_BILINEAR_FAST_PATH (SRC, a8r8g8b8, a8r8g8b8, mips_8888_8888), SIMPLE_BILINEAR_FAST_PATH (SRC, a8r8g8b8, x8r8g8b8, mips_8888_8888), SIMPLE_BILINEAR_FAST_PATH (SRC, x8r8g8b8, x8r8g8b8, mips_8888_8888), SIMPLE_BILINEAR_FAST_PATH (SRC, a8r8g8b8, r5g6b5, mips_8888_0565), SIMPLE_BILINEAR_FAST_PATH (SRC, x8r8g8b8, r5g6b5, mips_8888_0565), SIMPLE_BILINEAR_FAST_PATH (SRC, r5g6b5, x8r8g8b8, mips_0565_8888), SIMPLE_BILINEAR_FAST_PATH (SRC, r5g6b5, r5g6b5, mips_0565_0565), SIMPLE_BILINEAR_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, mips_8888_8888), SIMPLE_BILINEAR_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8, mips_8888_8888), SIMPLE_BILINEAR_FAST_PATH (ADD, a8r8g8b8, a8r8g8b8, mips_8888_8888), SIMPLE_BILINEAR_FAST_PATH (ADD, a8r8g8b8, x8r8g8b8, mips_8888_8888), SIMPLE_BILINEAR_A8_MASK_FAST_PATH (SRC, a8r8g8b8, a8r8g8b8, mips_8888_8_8888), SIMPLE_BILINEAR_A8_MASK_FAST_PATH (SRC, a8r8g8b8, x8r8g8b8, mips_8888_8_8888), SIMPLE_BILINEAR_A8_MASK_FAST_PATH (SRC, x8r8g8b8, x8r8g8b8, mips_8888_8_8888), SIMPLE_BILINEAR_A8_MASK_FAST_PATH (SRC, a8r8g8b8, r5g6b5, mips_8888_8_0565), SIMPLE_BILINEAR_A8_MASK_FAST_PATH (SRC, x8r8g8b8, r5g6b5, mips_8888_8_0565), SIMPLE_BILINEAR_A8_MASK_FAST_PATH (SRC, r5g6b5, x8r8g8b8, mips_0565_8_x888), SIMPLE_BILINEAR_A8_MASK_FAST_PATH (SRC, r5g6b5, r5g6b5, mips_0565_8_0565), SIMPLE_BILINEAR_A8_MASK_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, mips_8888_8_8888), SIMPLE_BILINEAR_A8_MASK_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8, mips_8888_8_8888), SIMPLE_BILINEAR_A8_MASK_FAST_PATH (ADD, a8r8g8b8, a8r8g8b8, mips_8888_8_8888), SIMPLE_BILINEAR_A8_MASK_FAST_PATH (ADD, a8r8g8b8, x8r8g8b8, mips_8888_8_8888), { PIXMAN_OP_NONE }, }; static void mips_dspr2_combine_over_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { if (mask) pixman_composite_over_8888_8888_8888_asm_mips ( dest, (uint32_t *)src, (uint32_t *)mask, width); else pixman_composite_over_8888_8888_asm_mips ( dest, (uint32_t *)src, width); } pixman_implementation_t * _pixman_implementation_create_mips_dspr2 (pixman_implementation_t *fallback) { pixman_implementation_t *imp = _pixman_implementation_create (fallback, mips_dspr2_fast_paths); imp->combine_32[PIXMAN_OP_OVER] = mips_dspr2_combine_over_u; imp->blt = mips_dspr2_blt; imp->fill = mips_dspr2_fill; return imp; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-mips-dspr2.h0000664000175000017500000006770514712446423020074 0ustar00mattst88mattst88/* * Copyright (c) 2012 * MIPS Technologies, Inc., California. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Author: Nemanja Lukic (nemanja.lukic@rt-rk.com) */ #ifndef PIXMAN_MIPS_DSPR2_H #define PIXMAN_MIPS_DSPR2_H #include "pixman-private.h" #include "pixman-inlines.h" #define SKIP_ZERO_SRC 1 #define SKIP_ZERO_MASK 2 #define DO_FAST_MEMCPY 3 void pixman_mips_fast_memcpy (void *dst, void *src, uint32_t n_bytes); void pixman_fill_buff16_mips (void *dst, uint32_t n_bytes, uint16_t value); void pixman_fill_buff32_mips (void *dst, uint32_t n_bytes, uint32_t value); /****************************************************************/ #define PIXMAN_MIPS_BIND_FAST_PATH_SRC_DST(flags, name, \ src_type, src_cnt, \ dst_type, dst_cnt) \ void \ pixman_composite_##name##_asm_mips (dst_type *dst, \ src_type *src, \ int32_t w); \ \ static void \ mips_composite_##name (pixman_implementation_t *imp, \ pixman_composite_info_t *info) \ { \ PIXMAN_COMPOSITE_ARGS (info); \ dst_type *dst_line, *dst; \ src_type *src_line, *src; \ int32_t dst_stride, src_stride; \ int bpp = PIXMAN_FORMAT_BPP (dest_image->bits.format) / 8; \ \ PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, src_type, \ src_stride, src_line, src_cnt); \ PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, dst_type, \ dst_stride, dst_line, dst_cnt); \ \ while (height--) \ { \ dst = dst_line; \ dst_line += dst_stride; \ src = src_line; \ src_line += src_stride; \ \ if (flags == DO_FAST_MEMCPY) \ pixman_mips_fast_memcpy (dst, src, width * bpp); \ else \ pixman_composite_##name##_asm_mips (dst, src, width); \ } \ } /****************************************************************/ #define PIXMAN_MIPS_BIND_FAST_PATH_N_DST(flags, name, \ dst_type, dst_cnt) \ void \ pixman_composite_##name##_asm_mips (dst_type *dst, \ uint32_t src, \ int32_t w); \ \ static void \ mips_composite_##name (pixman_implementation_t *imp, \ pixman_composite_info_t *info) \ { \ PIXMAN_COMPOSITE_ARGS (info); \ dst_type *dst_line, *dst; \ int32_t dst_stride; \ uint32_t src; \ \ src = _pixman_image_get_solid ( \ imp, src_image, dest_image->bits.format); \ \ if ((flags & SKIP_ZERO_SRC) && src == 0) \ return; \ \ PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, dst_type, \ dst_stride, dst_line, dst_cnt); \ \ while (height--) \ { \ dst = dst_line; \ dst_line += dst_stride; \ \ pixman_composite_##name##_asm_mips (dst, src, width); \ } \ } /*******************************************************************/ #define PIXMAN_MIPS_BIND_FAST_PATH_N_MASK_DST(flags, name, \ mask_type, mask_cnt, \ dst_type, dst_cnt) \ void \ pixman_composite_##name##_asm_mips (dst_type *dst, \ uint32_t src, \ mask_type *mask, \ int32_t w); \ \ static void \ mips_composite_##name (pixman_implementation_t *imp, \ pixman_composite_info_t *info) \ { \ PIXMAN_COMPOSITE_ARGS (info); \ dst_type *dst_line, *dst; \ mask_type *mask_line, *mask; \ int32_t dst_stride, mask_stride; \ uint32_t src; \ \ src = _pixman_image_get_solid ( \ imp, src_image, dest_image->bits.format); \ \ if ((flags & SKIP_ZERO_SRC) && src == 0) \ return; \ \ PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, dst_type, \ dst_stride, dst_line, dst_cnt); \ PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, mask_type, \ mask_stride, mask_line, mask_cnt); \ \ while (height--) \ { \ dst = dst_line; \ dst_line += dst_stride; \ mask = mask_line; \ mask_line += mask_stride; \ pixman_composite_##name##_asm_mips (dst, src, mask, width); \ } \ } /*******************************************************************/ #define PIXMAN_MIPS_BIND_FAST_PATH_SRC_N_DST(flags, name, \ src_type, src_cnt, \ dst_type, dst_cnt) \ void \ pixman_composite_##name##_asm_mips (dst_type *dst, \ src_type *src, \ uint32_t mask, \ int32_t w); \ \ static void \ mips_composite_##name (pixman_implementation_t *imp, \ pixman_composite_info_t *info) \ { \ PIXMAN_COMPOSITE_ARGS (info); \ dst_type *dst_line, *dst; \ src_type *src_line, *src; \ int32_t dst_stride, src_stride; \ uint32_t mask; \ \ mask = _pixman_image_get_solid ( \ imp, mask_image, dest_image->bits.format); \ \ if ((flags & SKIP_ZERO_MASK) && mask == 0) \ return; \ \ PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, dst_type, \ dst_stride, dst_line, dst_cnt); \ PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, src_type, \ src_stride, src_line, src_cnt); \ \ while (height--) \ { \ dst = dst_line; \ dst_line += dst_stride; \ src = src_line; \ src_line += src_stride; \ \ pixman_composite_##name##_asm_mips (dst, src, mask, width); \ } \ } /************************************************************************/ #define PIXMAN_MIPS_BIND_FAST_PATH_SRC_MASK_DST(name, src_type, src_cnt, \ mask_type, mask_cnt, \ dst_type, dst_cnt) \ void \ pixman_composite_##name##_asm_mips (dst_type *dst, \ src_type *src, \ mask_type *mask, \ int32_t w); \ \ static void \ mips_composite_##name (pixman_implementation_t *imp, \ pixman_composite_info_t *info) \ { \ PIXMAN_COMPOSITE_ARGS (info); \ dst_type *dst_line, *dst; \ src_type *src_line, *src; \ mask_type *mask_line, *mask; \ int32_t dst_stride, src_stride, mask_stride; \ \ PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, dst_type, \ dst_stride, dst_line, dst_cnt); \ PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, src_type, \ src_stride, src_line, src_cnt); \ PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, mask_type, \ mask_stride, mask_line, mask_cnt); \ \ while (height--) \ { \ dst = dst_line; \ dst_line += dst_stride; \ mask = mask_line; \ mask_line += mask_stride; \ src = src_line; \ src_line += src_stride; \ pixman_composite_##name##_asm_mips (dst, src, mask, width); \ } \ } /****************************************************************************/ #define PIXMAN_MIPS_BIND_SCALED_NEAREST_SRC_DST(name, op, \ src_type, dst_type) \ void \ pixman_scaled_nearest_scanline_##name##_##op##_asm_mips ( \ dst_type * dst, \ const src_type * src, \ int32_t w, \ pixman_fixed_t vx, \ pixman_fixed_t unit_x); \ \ static force_inline void \ scaled_nearest_scanline_mips_##name##_##op (dst_type * pd, \ const src_type * ps, \ int32_t w, \ pixman_fixed_t vx, \ pixman_fixed_t unit_x, \ pixman_fixed_t max_vx, \ pixman_bool_t zero_src) \ { \ pixman_scaled_nearest_scanline_##name##_##op##_asm_mips (pd, ps, w, \ vx, unit_x); \ } \ \ FAST_NEAREST_MAINLOOP (mips_##name##_cover_##op, \ scaled_nearest_scanline_mips_##name##_##op, \ src_type, dst_type, COVER) \ FAST_NEAREST_MAINLOOP (mips_##name##_none_##op, \ scaled_nearest_scanline_mips_##name##_##op, \ src_type, dst_type, NONE) \ FAST_NEAREST_MAINLOOP (mips_##name##_pad_##op, \ scaled_nearest_scanline_mips_##name##_##op, \ src_type, dst_type, PAD) /* Provide entries for the fast path table */ #define PIXMAN_MIPS_SIMPLE_NEAREST_FAST_PATH(op,s,d,func) \ SIMPLE_NEAREST_FAST_PATH_COVER (op,s,d,func), \ SIMPLE_NEAREST_FAST_PATH_NONE (op,s,d,func), \ SIMPLE_NEAREST_FAST_PATH_PAD (op,s,d,func) /*****************************************************************************/ #define PIXMAN_MIPS_BIND_SCALED_NEAREST_SRC_A8_DST(flags, name, op, \ src_type, dst_type) \ void \ pixman_scaled_nearest_scanline_##name##_##op##_asm_mips ( \ dst_type * dst, \ const src_type * src, \ const uint8_t * mask, \ int32_t w, \ pixman_fixed_t vx, \ pixman_fixed_t unit_x); \ \ static force_inline void \ scaled_nearest_scanline_mips_##name##_##op (const uint8_t * mask, \ dst_type * pd, \ const src_type * ps, \ int32_t w, \ pixman_fixed_t vx, \ pixman_fixed_t unit_x, \ pixman_fixed_t max_vx, \ pixman_bool_t zero_src) \ { \ if ((flags & SKIP_ZERO_SRC) && zero_src) \ return; \ pixman_scaled_nearest_scanline_##name##_##op##_asm_mips (pd, ps, \ mask, w, \ vx, unit_x); \ } \ \ FAST_NEAREST_MAINLOOP_COMMON (mips_##name##_cover_##op, \ scaled_nearest_scanline_mips_##name##_##op, \ src_type, uint8_t, dst_type, COVER, TRUE, FALSE)\ FAST_NEAREST_MAINLOOP_COMMON (mips_##name##_none_##op, \ scaled_nearest_scanline_mips_##name##_##op, \ src_type, uint8_t, dst_type, NONE, TRUE, FALSE) \ FAST_NEAREST_MAINLOOP_COMMON (mips_##name##_pad_##op, \ scaled_nearest_scanline_mips_##name##_##op, \ src_type, uint8_t, dst_type, PAD, TRUE, FALSE) /****************************************************************************/ #define PIXMAN_MIPS_BIND_SCALED_BILINEAR_SRC_DST(flags, name, op, \ src_type, dst_type) \ void \ pixman_scaled_bilinear_scanline_##name##_##op##_asm_mips( \ dst_type * dst, \ const src_type * src_top, \ const src_type * src_bottom, \ int32_t w, \ int wt, \ int wb, \ pixman_fixed_t vx, \ pixman_fixed_t unit_x); \ static force_inline void \ scaled_bilinear_scanline_mips_##name##_##op (dst_type * dst, \ const uint32_t * mask, \ const src_type * src_top, \ const src_type * src_bottom, \ int32_t w, \ int wt, \ int wb, \ pixman_fixed_t vx, \ pixman_fixed_t unit_x, \ pixman_fixed_t max_vx, \ pixman_bool_t zero_src) \ { \ if ((flags & SKIP_ZERO_SRC) && zero_src) \ return; \ pixman_scaled_bilinear_scanline_##name##_##op##_asm_mips (dst, src_top, \ src_bottom, w, \ wt, wb, \ vx, unit_x); \ } \ \ FAST_BILINEAR_MAINLOOP_COMMON (mips_##name##_cover_##op, \ scaled_bilinear_scanline_mips_##name##_##op, \ src_type, uint32_t, dst_type, COVER, FLAG_NONE) \ FAST_BILINEAR_MAINLOOP_COMMON (mips_##name##_none_##op, \ scaled_bilinear_scanline_mips_##name##_##op, \ src_type, uint32_t, dst_type, NONE, FLAG_NONE) \ FAST_BILINEAR_MAINLOOP_COMMON (mips_##name##_pad_##op, \ scaled_bilinear_scanline_mips_##name##_##op, \ src_type, uint32_t, dst_type, PAD, FLAG_NONE) \ FAST_BILINEAR_MAINLOOP_COMMON (mips_##name##_normal_##op, \ scaled_bilinear_scanline_mips_##name##_##op, \ src_type, uint32_t, dst_type, NORMAL, \ FLAG_NONE) /*****************************************************************************/ #define PIXMAN_MIPS_BIND_SCALED_BILINEAR_SRC_A8_DST(flags, name, op, \ src_type, dst_type) \ void \ pixman_scaled_bilinear_scanline_##name##_##op##_asm_mips ( \ dst_type * dst, \ const uint8_t * mask, \ const src_type * top, \ const src_type * bottom, \ int wt, \ int wb, \ pixman_fixed_t x, \ pixman_fixed_t ux, \ int width); \ \ static force_inline void \ scaled_bilinear_scanline_mips_##name##_##op (dst_type * dst, \ const uint8_t * mask, \ const src_type * src_top, \ const src_type * src_bottom, \ int32_t w, \ int wt, \ int wb, \ pixman_fixed_t vx, \ pixman_fixed_t unit_x, \ pixman_fixed_t max_vx, \ pixman_bool_t zero_src) \ { \ if ((flags & SKIP_ZERO_SRC) && zero_src) \ return; \ pixman_scaled_bilinear_scanline_##name##_##op##_asm_mips ( \ dst, mask, src_top, src_bottom, wt, wb, vx, unit_x, w); \ } \ \ FAST_BILINEAR_MAINLOOP_COMMON (mips_##name##_cover_##op, \ scaled_bilinear_scanline_mips_##name##_##op, \ src_type, uint8_t, dst_type, COVER, \ FLAG_HAVE_NON_SOLID_MASK) \ FAST_BILINEAR_MAINLOOP_COMMON (mips_##name##_none_##op, \ scaled_bilinear_scanline_mips_##name##_##op, \ src_type, uint8_t, dst_type, NONE, \ FLAG_HAVE_NON_SOLID_MASK) \ FAST_BILINEAR_MAINLOOP_COMMON (mips_##name##_pad_##op, \ scaled_bilinear_scanline_mips_##name##_##op, \ src_type, uint8_t, dst_type, PAD, \ FLAG_HAVE_NON_SOLID_MASK) \ FAST_BILINEAR_MAINLOOP_COMMON (mips_##name##_normal_##op, \ scaled_bilinear_scanline_mips_##name##_##op, \ src_type, uint8_t, dst_type, NORMAL, \ FLAG_HAVE_NON_SOLID_MASK) #endif //PIXMAN_MIPS_DSPR2_H ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-mips-memcpy-asm.S0000664000175000017500000002447414712446423021061 0ustar00mattst88mattst88/* * Copyright (c) 2012 * MIPS Technologies, Inc., California. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "pixman-mips-dspr2-asm.h" /* * This routine could be optimized for MIPS64. The current code only * uses MIPS32 instructions. */ #ifdef EB # define LWHI lwl /* high part is left in big-endian */ # define SWHI swl /* high part is left in big-endian */ # define LWLO lwr /* low part is right in big-endian */ # define SWLO swr /* low part is right in big-endian */ #else # define LWHI lwr /* high part is right in little-endian */ # define SWHI swr /* high part is right in little-endian */ # define LWLO lwl /* low part is left in big-endian */ # define SWLO swl /* low part is left in big-endian */ #endif LEAF_MIPS32R2(pixman_mips_fast_memcpy) slti AT, a2, 8 bne AT, zero, $last8 move v0, a0 /* memcpy returns the dst pointer */ /* Test if the src and dst are word-aligned, or can be made word-aligned */ xor t8, a1, a0 andi t8, t8, 0x3 /* t8 is a0/a1 word-displacement */ bne t8, zero, $unaligned negu a3, a0 andi a3, a3, 0x3 /* we need to copy a3 bytes to make a0/a1 aligned */ beq a3, zero, $chk16w /* when a3=0 then the dst (a0) is word-aligned */ subu a2, a2, a3 /* now a2 is the remining bytes count */ LWHI t8, 0(a1) addu a1, a1, a3 SWHI t8, 0(a0) addu a0, a0, a3 /* Now the dst/src are mutually word-aligned with word-aligned addresses */ $chk16w: andi t8, a2, 0x3f /* any whole 64-byte chunks? */ /* t8 is the byte count after 64-byte chunks */ beq a2, t8, $chk8w /* if a2==t8, no 64-byte chunks */ /* There will be at most 1 32-byte chunk after it */ subu a3, a2, t8 /* subtract from a2 the reminder */ /* Here a3 counts bytes in 16w chunks */ addu a3, a0, a3 /* Now a3 is the final dst after 64-byte chunks */ addu t0, a0, a2 /* t0 is the "past the end" address */ /* * When in the loop we exercise "pref 30, x(a0)", the a0+x should not be past * the "t0-32" address * This means: for x=128 the last "safe" a0 address is "t0-160" * Alternatively, for x=64 the last "safe" a0 address is "t0-96" * In the current version we use "pref 30, 128(a0)", so "t0-160" is the limit */ subu t9, t0, 160 /* t9 is the "last safe pref 30, 128(a0)" address */ pref 0, 0(a1) /* bring the first line of src, addr 0 */ pref 0, 32(a1) /* bring the second line of src, addr 32 */ pref 0, 64(a1) /* bring the third line of src, addr 64 */ pref 30, 32(a0) /* safe, as we have at least 64 bytes ahead */ /* In case the a0 > t9 don't use "pref 30" at all */ sgtu v1, a0, t9 bgtz v1, $loop16w /* skip "pref 30, 64(a0)" for too short arrays */ nop /* otherwise, start with using pref30 */ pref 30, 64(a0) $loop16w: pref 0, 96(a1) lw t0, 0(a1) bgtz v1, $skip_pref30_96 /* skip "pref 30, 96(a0)" */ lw t1, 4(a1) pref 30, 96(a0) /* continue setting up the dest, addr 96 */ $skip_pref30_96: lw t2, 8(a1) lw t3, 12(a1) lw t4, 16(a1) lw t5, 20(a1) lw t6, 24(a1) lw t7, 28(a1) pref 0, 128(a1) /* bring the next lines of src, addr 128 */ sw t0, 0(a0) sw t1, 4(a0) sw t2, 8(a0) sw t3, 12(a0) sw t4, 16(a0) sw t5, 20(a0) sw t6, 24(a0) sw t7, 28(a0) lw t0, 32(a1) bgtz v1, $skip_pref30_128 /* skip "pref 30, 128(a0)" */ lw t1, 36(a1) pref 30, 128(a0) /* continue setting up the dest, addr 128 */ $skip_pref30_128: lw t2, 40(a1) lw t3, 44(a1) lw t4, 48(a1) lw t5, 52(a1) lw t6, 56(a1) lw t7, 60(a1) pref 0, 160(a1) /* bring the next lines of src, addr 160 */ sw t0, 32(a0) sw t1, 36(a0) sw t2, 40(a0) sw t3, 44(a0) sw t4, 48(a0) sw t5, 52(a0) sw t6, 56(a0) sw t7, 60(a0) addiu a0, a0, 64 /* adding 64 to dest */ sgtu v1, a0, t9 bne a0, a3, $loop16w addiu a1, a1, 64 /* adding 64 to src */ move a2, t8 /* Here we have src and dest word-aligned but less than 64-bytes to go */ $chk8w: pref 0, 0x0(a1) andi t8, a2, 0x1f /* is there a 32-byte chunk? */ /* the t8 is the reminder count past 32-bytes */ beq a2, t8, $chk1w /* when a2=t8, no 32-byte chunk */ nop lw t0, 0(a1) lw t1, 4(a1) lw t2, 8(a1) lw t3, 12(a1) lw t4, 16(a1) lw t5, 20(a1) lw t6, 24(a1) lw t7, 28(a1) addiu a1, a1, 32 sw t0, 0(a0) sw t1, 4(a0) sw t2, 8(a0) sw t3, 12(a0) sw t4, 16(a0) sw t5, 20(a0) sw t6, 24(a0) sw t7, 28(a0) addiu a0, a0, 32 $chk1w: andi a2, t8, 0x3 /* now a2 is the reminder past 1w chunks */ beq a2, t8, $last8 subu a3, t8, a2 /* a3 is count of bytes in 1w chunks */ addu a3, a0, a3 /* now a3 is the dst address past the 1w chunks */ /* copying in words (4-byte chunks) */ $wordCopy_loop: lw t3, 0(a1) /* the first t3 may be equal t0 ... optimize? */ addiu a1, a1, 4 addiu a0, a0, 4 bne a0, a3, $wordCopy_loop sw t3, -4(a0) /* For the last (<8) bytes */ $last8: blez a2, leave addu a3, a0, a2 /* a3 is the last dst address */ $last8loop: lb v1, 0(a1) addiu a1, a1, 1 addiu a0, a0, 1 bne a0, a3, $last8loop sb v1, -1(a0) leave: j ra nop /* * UNALIGNED case */ $unaligned: /* got here with a3="negu a0" */ andi a3, a3, 0x3 /* test if the a0 is word aligned */ beqz a3, $ua_chk16w subu a2, a2, a3 /* bytes left after initial a3 bytes */ LWHI v1, 0(a1) LWLO v1, 3(a1) addu a1, a1, a3 /* a3 may be here 1, 2 or 3 */ SWHI v1, 0(a0) addu a0, a0, a3 /* below the dst will be word aligned (NOTE1) */ $ua_chk16w: andi t8, a2, 0x3f /* any whole 64-byte chunks? */ /* t8 is the byte count after 64-byte chunks */ beq a2, t8, $ua_chk8w /* if a2==t8, no 64-byte chunks */ /* There will be at most 1 32-byte chunk after it */ subu a3, a2, t8 /* subtract from a2 the reminder */ /* Here a3 counts bytes in 16w chunks */ addu a3, a0, a3 /* Now a3 is the final dst after 64-byte chunks */ addu t0, a0, a2 /* t0 is the "past the end" address */ subu t9, t0, 160 /* t9 is the "last safe pref 30, 128(a0)" address */ pref 0, 0(a1) /* bring the first line of src, addr 0 */ pref 0, 32(a1) /* bring the second line of src, addr 32 */ pref 0, 64(a1) /* bring the third line of src, addr 64 */ pref 30, 32(a0) /* safe, as we have at least 64 bytes ahead */ /* In case the a0 > t9 don't use "pref 30" at all */ sgtu v1, a0, t9 bgtz v1, $ua_loop16w /* skip "pref 30, 64(a0)" for too short arrays */ nop /* otherwise, start with using pref30 */ pref 30, 64(a0) $ua_loop16w: pref 0, 96(a1) LWHI t0, 0(a1) LWLO t0, 3(a1) LWHI t1, 4(a1) bgtz v1, $ua_skip_pref30_96 LWLO t1, 7(a1) pref 30, 96(a0) /* continue setting up the dest, addr 96 */ $ua_skip_pref30_96: LWHI t2, 8(a1) LWLO t2, 11(a1) LWHI t3, 12(a1) LWLO t3, 15(a1) LWHI t4, 16(a1) LWLO t4, 19(a1) LWHI t5, 20(a1) LWLO t5, 23(a1) LWHI t6, 24(a1) LWLO t6, 27(a1) LWHI t7, 28(a1) LWLO t7, 31(a1) pref 0, 128(a1) /* bring the next lines of src, addr 128 */ sw t0, 0(a0) sw t1, 4(a0) sw t2, 8(a0) sw t3, 12(a0) sw t4, 16(a0) sw t5, 20(a0) sw t6, 24(a0) sw t7, 28(a0) LWHI t0, 32(a1) LWLO t0, 35(a1) LWHI t1, 36(a1) bgtz v1, $ua_skip_pref30_128 LWLO t1, 39(a1) pref 30, 128(a0) /* continue setting up the dest, addr 128 */ $ua_skip_pref30_128: LWHI t2, 40(a1) LWLO t2, 43(a1) LWHI t3, 44(a1) LWLO t3, 47(a1) LWHI t4, 48(a1) LWLO t4, 51(a1) LWHI t5, 52(a1) LWLO t5, 55(a1) LWHI t6, 56(a1) LWLO t6, 59(a1) LWHI t7, 60(a1) LWLO t7, 63(a1) pref 0, 160(a1) /* bring the next lines of src, addr 160 */ sw t0, 32(a0) sw t1, 36(a0) sw t2, 40(a0) sw t3, 44(a0) sw t4, 48(a0) sw t5, 52(a0) sw t6, 56(a0) sw t7, 60(a0) addiu a0, a0, 64 /* adding 64 to dest */ sgtu v1, a0, t9 bne a0, a3, $ua_loop16w addiu a1, a1, 64 /* adding 64 to src */ move a2, t8 /* Here we have src and dest word-aligned but less than 64-bytes to go */ $ua_chk8w: pref 0, 0x0(a1) andi t8, a2, 0x1f /* is there a 32-byte chunk? */ /* the t8 is the reminder count */ beq a2, t8, $ua_chk1w /* when a2=t8, no 32-byte chunk */ LWHI t0, 0(a1) LWLO t0, 3(a1) LWHI t1, 4(a1) LWLO t1, 7(a1) LWHI t2, 8(a1) LWLO t2, 11(a1) LWHI t3, 12(a1) LWLO t3, 15(a1) LWHI t4, 16(a1) LWLO t4, 19(a1) LWHI t5, 20(a1) LWLO t5, 23(a1) LWHI t6, 24(a1) LWLO t6, 27(a1) LWHI t7, 28(a1) LWLO t7, 31(a1) addiu a1, a1, 32 sw t0, 0(a0) sw t1, 4(a0) sw t2, 8(a0) sw t3, 12(a0) sw t4, 16(a0) sw t5, 20(a0) sw t6, 24(a0) sw t7, 28(a0) addiu a0, a0, 32 $ua_chk1w: andi a2, t8, 0x3 /* now a2 is the reminder past 1w chunks */ beq a2, t8, $ua_smallCopy subu a3, t8, a2 /* a3 is count of bytes in 1w chunks */ addu a3, a0, a3 /* now a3 is the dst address past the 1w chunks */ /* copying in words (4-byte chunks) */ $ua_wordCopy_loop: LWHI v1, 0(a1) LWLO v1, 3(a1) addiu a1, a1, 4 addiu a0, a0, 4 /* note: dst=a0 is word aligned here, see NOTE1 */ bne a0, a3, $ua_wordCopy_loop sw v1, -4(a0) /* Now less than 4 bytes (value in a2) left to copy */ $ua_smallCopy: beqz a2, leave addu a3, a0, a2 /* a3 is the last dst address */ $ua_smallCopy_loop: lb v1, 0(a1) addiu a1, a1, 1 addiu a0, a0, 1 bne a0, a3, $ua_smallCopy_loop sb v1, -1(a0) j ra nop END(pixman_mips_fast_memcpy) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-mips.c0000664000175000017500000000675114712446423017031 0ustar00mattst88mattst88/* * Copyright Âİ 2000 SuSE, Inc. * Copyright Âİ 2007 Red Hat, Inc. * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of SuSE not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. SuSE makes no representations about the * suitability of this software for any purpose. It is provided "as is" * without express or implied warranty. * * SuSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL SuSE * BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifdef HAVE_CONFIG_H #include #endif #include "pixman-private.h" #if defined(USE_MIPS_DSPR2) || defined(USE_LOONGSON_MMI) #include #include static pixman_bool_t have_feature (const char *search_string) { #if defined (__linux__) /* linux ELF */ /* Simple detection of MIPS features at runtime for Linux. * It is based on /proc/cpuinfo, which reveals hardware configuration * to user-space applications. According to MIPS (early 2010), no similar * facility is universally available on the MIPS architectures, so it's up * to individual OSes to provide such. */ const char *file_name = "/proc/cpuinfo"; char cpuinfo_line[256]; FILE *f = NULL; if ((f = fopen (file_name, "r")) == NULL) return FALSE; while (fgets (cpuinfo_line, sizeof (cpuinfo_line), f) != NULL) { if (strstr (cpuinfo_line, search_string) != NULL) { fclose (f); return TRUE; } } fclose (f); #endif #if defined (CI_HAS_ALL_MIPS_CPU_FEATURES) /* Used to force feature discovery in CI where /proc/cpuinfo is unreliable. * It can happen, e.g., if executed in qemu-user-static mode. * * For such a build, MIPS-specific features need to be manually disabled by * using `PIXMAN_DISABLE` env variable * * SHOULD NOT BE USED IN RELEASE BUILD! */ #warning "Building with disabled MIPS feature discovery. SHOULD NOT BE USED IN RELEASE BUILD!" return TRUE; #endif /* Did not find string in the proc file, or not Linux ELF. */ return FALSE; } #endif pixman_implementation_t * _pixman_mips_get_implementations (pixman_implementation_t *imp) { #ifdef USE_LOONGSON_MMI /* I really don't know if some Loongson CPUs don't have MMI. */ if (!_pixman_disabled ("loongson-mmi") && have_feature ("Loongson")) imp = _pixman_implementation_create_mmx (imp); #endif #ifdef USE_MIPS_DSPR2 if (!_pixman_disabled ("mips-dspr2")) { int already_compiling_everything_for_dspr2 = 0; #if defined(__mips_dsp) && (__mips_dsp_rev >= 2) already_compiling_everything_for_dspr2 = 1; #endif if (already_compiling_everything_for_dspr2 || /* Only currently available MIPS core that supports DSPr2 is 74K. */ have_feature ("MIPS 74K")) { imp = _pixman_implementation_create_mips_dspr2 (imp); } } #endif return imp; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-mmx.c0000664000175000017500000030575414712446423016667 0ustar00mattst88mattst88/* * Copyright Âİ 2004, 2005 Red Hat, Inc. * Copyright Âİ 2004 Nicholas Miell * Copyright Âİ 2005 Trolltech AS * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of Red Hat not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. Red Hat makes no representations about the * suitability of this software for any purpose. It is provided "as is" * without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS * SOFTWARE. * * Author: S¸ren Sandmann (sandmann@redhat.com) * Minor Improvements: Nicholas Miell (nmiell@gmail.com) * MMX code paths for fbcompose.c by Lars Knoll (lars@trolltech.com) * * Based on work by Owen Taylor */ #ifdef HAVE_CONFIG_H #include #endif #if defined USE_X86_MMX || defined USE_LOONGSON_MMI #ifdef USE_LOONGSON_MMI #include #else #include #endif #include "pixman-private.h" #include "pixman-combine32.h" #include "pixman-inlines.h" #ifdef VERBOSE #define CHECKPOINT() error_f ("at %s %d\n", __FUNCTION__, __LINE__) #else #define CHECKPOINT() #endif #ifdef USE_X86_MMX # if (defined(__SSE2__) || defined(__SUNPRO_C) || defined(_MSC_VER) || defined(_WIN64)) # include # else /* We have to compile with -msse to use xmmintrin.h, but that causes SSE * instructions to be generated that we don't want. Just duplicate the * functions we want to use. */ extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_movemask_pi8 (__m64 __A) { int ret; asm ("pmovmskb %1, %0\n\t" : "=r" (ret) : "y" (__A) ); return ret; } extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_mulhi_pu16 (__m64 __A, __m64 __B) { asm ("pmulhuw %1, %0\n\t" : "+y" (__A) : "y" (__B) ); return __A; } # define _mm_shuffle_pi16(A, N) \ ({ \ __m64 ret; \ \ asm ("pshufw %2, %1, %0\n\t" \ : "=y" (ret) \ : "y" (A), "K" ((const int8_t)N) \ ); \ \ ret; \ }) # endif #endif #ifndef _MM_SHUFFLE #define _MM_SHUFFLE(fp3,fp2,fp1,fp0) \ (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | (fp0)) #endif /* Notes about writing mmx code * * give memory operands as the second operand. If you give it as the * first, gcc will first load it into a register, then use that * register * * ie. use * * _mm_mullo_pi16 (x, mmx_constant); * * not * * _mm_mullo_pi16 (mmx_constant, x); * * Also try to minimize dependencies. i.e. when you need a value, try * to calculate it from a value that was calculated as early as * possible. */ /* --------------- MMX primitives ------------------------------------- */ /* If __m64 is defined as a struct or union, then define M64_MEMBER to be * the name of the member used to access the data. * If __m64 requires using mm_cvt* intrinsics functions to convert between * uint64_t and __m64 values, then define USE_CVT_INTRINSICS. * If __m64 and uint64_t values can just be cast to each other directly, * then define USE_M64_CASTS. * If __m64 is a double datatype, then define USE_M64_DOUBLE. */ #ifdef _MSC_VER # ifdef __clang__ # define USE_CVT_INTRINSICS # else # define M64_MEMBER m64_u64 # endif #elif defined(__ICC) # define USE_CVT_INTRINSICS #elif defined(USE_LOONGSON_MMI) # define USE_M64_DOUBLE #elif defined(__GNUC__) # define USE_M64_CASTS #elif defined(__SUNPRO_C) # if (__SUNPRO_C >= 0x5120) && !defined(__NOVECTORSIZE__) /* Solaris Studio 12.3 (Sun C 5.12) introduces __attribute__(__vector_size__) * support, and defaults to using it to define __m64, unless __NOVECTORSIZE__ * is defined. If it is used, then the mm_cvt* intrinsics must be used. */ # define USE_CVT_INTRINSICS # else /* For Studio 12.2 or older, or when __attribute__(__vector_size__) is * disabled, __m64 is defined as a struct containing "unsigned long long l_". */ # define M64_MEMBER l_ # endif #endif #if defined(USE_M64_CASTS) || defined(USE_CVT_INTRINSICS) || defined(USE_M64_DOUBLE) typedef uint64_t mmxdatafield; #else typedef __m64 mmxdatafield; #endif typedef struct { mmxdatafield mmx_4x00ff; mmxdatafield mmx_4x0080; mmxdatafield mmx_565_rgb; mmxdatafield mmx_565_unpack_multiplier; mmxdatafield mmx_565_pack_multiplier; mmxdatafield mmx_565_r; mmxdatafield mmx_565_g; mmxdatafield mmx_565_b; mmxdatafield mmx_packed_565_rb; mmxdatafield mmx_packed_565_g; mmxdatafield mmx_expand_565_g; mmxdatafield mmx_expand_565_b; mmxdatafield mmx_expand_565_r; #ifndef USE_LOONGSON_MMI mmxdatafield mmx_mask_0; mmxdatafield mmx_mask_1; mmxdatafield mmx_mask_2; mmxdatafield mmx_mask_3; #endif mmxdatafield mmx_full_alpha; mmxdatafield mmx_4x0101; mmxdatafield mmx_ff000000; } mmx_data_t; #if defined(_MSC_VER) # define MMXDATA_INIT(field, val) { val ## UI64 } #elif defined(M64_MEMBER) /* __m64 is a struct, not an integral type */ # define MMXDATA_INIT(field, val) field = { val ## ULL } #else /* mmxdatafield is an integral type */ # define MMXDATA_INIT(field, val) field = val ## ULL #endif static const mmx_data_t c = { MMXDATA_INIT (.mmx_4x00ff, 0x00ff00ff00ff00ff), MMXDATA_INIT (.mmx_4x0080, 0x0080008000800080), MMXDATA_INIT (.mmx_565_rgb, 0x000001f0003f001f), MMXDATA_INIT (.mmx_565_unpack_multiplier, 0x0000008404100840), MMXDATA_INIT (.mmx_565_pack_multiplier, 0x2000000420000004), MMXDATA_INIT (.mmx_565_r, 0x000000f800000000), MMXDATA_INIT (.mmx_565_g, 0x0000000000fc0000), MMXDATA_INIT (.mmx_565_b, 0x00000000000000f8), MMXDATA_INIT (.mmx_packed_565_rb, 0x00f800f800f800f8), MMXDATA_INIT (.mmx_packed_565_g, 0x0000fc000000fc00), MMXDATA_INIT (.mmx_expand_565_g, 0x07e007e007e007e0), MMXDATA_INIT (.mmx_expand_565_b, 0x001f001f001f001f), MMXDATA_INIT (.mmx_expand_565_r, 0xf800f800f800f800), #ifndef USE_LOONGSON_MMI MMXDATA_INIT (.mmx_mask_0, 0xffffffffffff0000), MMXDATA_INIT (.mmx_mask_1, 0xffffffff0000ffff), MMXDATA_INIT (.mmx_mask_2, 0xffff0000ffffffff), MMXDATA_INIT (.mmx_mask_3, 0x0000ffffffffffff), #endif MMXDATA_INIT (.mmx_full_alpha, 0x00ff000000000000), MMXDATA_INIT (.mmx_4x0101, 0x0101010101010101), MMXDATA_INIT (.mmx_ff000000, 0xff000000ff000000), }; #ifdef USE_CVT_INTRINSICS # define MC(x) to_m64 (c.mmx_ ## x) #elif defined(USE_M64_CASTS) # define MC(x) ((__m64)c.mmx_ ## x) #elif defined(USE_M64_DOUBLE) # define MC(x) (*(__m64 *)&c.mmx_ ## x) #else # define MC(x) c.mmx_ ## x #endif static force_inline __m64 to_m64 (uint64_t x) { #ifdef USE_CVT_INTRINSICS return _mm_cvtsi64_m64 (x); #elif defined M64_MEMBER /* __m64 is a struct, not an integral type */ __m64 res; res.M64_MEMBER = x; return res; #elif defined USE_M64_DOUBLE return *(__m64 *)&x; #else /* USE_M64_CASTS */ return (__m64)x; #endif } static force_inline uint64_t to_uint64 (__m64 x) { #ifdef USE_CVT_INTRINSICS return _mm_cvtm64_si64 (x); #elif defined M64_MEMBER /* __m64 is a struct, not an integral type */ uint64_t res = x.M64_MEMBER; return res; #elif defined USE_M64_DOUBLE return *(uint64_t *)&x; #else /* USE_M64_CASTS */ return (uint64_t)x; #endif } static force_inline __m64 shift (__m64 v, int s) { if (s > 0) return _mm_slli_si64 (v, s); else if (s < 0) return _mm_srli_si64 (v, -s); else return v; } static force_inline __m64 negate (__m64 mask) { return _mm_xor_si64 (mask, MC (4x00ff)); } /* Computes the product of two unsigned fixed-point 8-bit values from 0 to 1 * and maps its result to the same range. * * Jim Blinn gives multiple ways to compute this in "Jim Blinn's Corner: * Notation, Notation, Notation", the first of which is * * prod(a, b) = (a * b + 128) / 255. * * By approximating the division by 255 as 257/65536 it can be replaced by a * multiply and a right shift. This is the implementation that we use in * pix_multiply(), but we _mm_mulhi_pu16() by 257 (part of SSE1 or Extended * 3DNow!, and unavailable at the time of the book's publication) to perform * the multiplication and right shift in a single operation. * * prod(a, b) = ((a * b + 128) * 257) >> 16. * * A third way (how pix_multiply() was implemented prior to 14208344) exists * also that performs the multiplication by 257 with adds and shifts. * * Where temp = a * b + 128 * * prod(a, b) = (temp + (temp >> 8)) >> 8. */ static force_inline __m64 pix_multiply (__m64 a, __m64 b) { __m64 res; res = _mm_mullo_pi16 (a, b); res = _mm_adds_pu16 (res, MC (4x0080)); res = _mm_mulhi_pu16 (res, MC (4x0101)); return res; } static force_inline __m64 pix_add (__m64 a, __m64 b) { return _mm_adds_pu8 (a, b); } static force_inline __m64 expand_alpha (__m64 pixel) { return _mm_shuffle_pi16 (pixel, _MM_SHUFFLE (3, 3, 3, 3)); } static force_inline __m64 expand_alpha_rev (__m64 pixel) { return _mm_shuffle_pi16 (pixel, _MM_SHUFFLE (0, 0, 0, 0)); } static force_inline __m64 invert_colors (__m64 pixel) { return _mm_shuffle_pi16 (pixel, _MM_SHUFFLE (3, 0, 1, 2)); } static force_inline __m64 over (__m64 src, __m64 srca, __m64 dest) { return _mm_adds_pu8 (src, pix_multiply (dest, negate (srca))); } static force_inline __m64 over_rev_non_pre (__m64 src, __m64 dest) { __m64 srca = expand_alpha (src); __m64 srcfaaa = _mm_or_si64 (srca, MC (full_alpha)); return over (pix_multiply (invert_colors (src), srcfaaa), srca, dest); } static force_inline __m64 in (__m64 src, __m64 mask) { return pix_multiply (src, mask); } #ifndef _MSC_VER static force_inline __m64 in_over (__m64 src, __m64 srca, __m64 mask, __m64 dest) { return over (in (src, mask), pix_multiply (srca, mask), dest); } #else #define in_over(src, srca, mask, dest) \ over (in (src, mask), pix_multiply (srca, mask), dest) #endif /* Elemental unaligned loads */ static force_inline __m64 ldq_u(__m64 *p) { #ifdef USE_X86_MMX /* x86's alignment restrictions are very relaxed, but that's no excuse */ __m64 r; memcpy(&r, p, sizeof(__m64)); return r; #else struct __una_u64 { __m64 x __attribute__((packed)); }; const struct __una_u64 *ptr = (const struct __una_u64 *) p; return (__m64) ptr->x; #endif } static force_inline uint32_t ldl_u(const uint32_t *p) { #ifdef USE_X86_MMX /* x86's alignment restrictions are very relaxed. */ uint32_t r; memcpy(&r, p, sizeof(uint32_t)); return r; #else struct __una_u32 { uint32_t x __attribute__((packed)); }; const struct __una_u32 *ptr = (const struct __una_u32 *) p; return ptr->x; #endif } static force_inline __m64 load (const uint32_t *v) { #ifdef USE_LOONGSON_MMI __m64 ret; asm ("lwc1 %0, %1\n\t" : "=f" (ret) : "m" (*v) ); return ret; #else return _mm_cvtsi32_si64 (*v); #endif } static force_inline __m64 load8888 (const uint32_t *v) { #ifdef USE_LOONGSON_MMI return _mm_unpacklo_pi8_f (*(__m32 *)v, _mm_setzero_si64 ()); #else return _mm_unpacklo_pi8 (load (v), _mm_setzero_si64 ()); #endif } static force_inline __m64 load8888u (const uint32_t *v) { uint32_t l = ldl_u (v); return load8888 (&l); } static force_inline __m64 pack8888 (__m64 lo, __m64 hi) { return _mm_packs_pu16 (lo, hi); } static force_inline void store (uint32_t *dest, __m64 v) { #ifdef USE_LOONGSON_MMI asm ("swc1 %1, %0\n\t" : "=m" (*dest) : "f" (v) : "memory" ); #else *dest = _mm_cvtsi64_si32 (v); #endif } static force_inline void store8888 (uint32_t *dest, __m64 v) { v = pack8888 (v, _mm_setzero_si64 ()); store (dest, v); } static force_inline pixman_bool_t is_equal (__m64 a, __m64 b) { #ifdef USE_LOONGSON_MMI /* __m64 is double, we can compare directly. */ return a == b; #else return _mm_movemask_pi8 (_mm_cmpeq_pi8 (a, b)) == 0xff; #endif } static force_inline pixman_bool_t is_opaque (__m64 v) { #ifdef USE_LOONGSON_MMI return is_equal (_mm_and_si64 (v, MC (full_alpha)), MC (full_alpha)); #else __m64 ffs = _mm_cmpeq_pi8 (v, v); return (_mm_movemask_pi8 (_mm_cmpeq_pi8 (v, ffs)) & 0x40); #endif } static force_inline pixman_bool_t is_zero (__m64 v) { return is_equal (v, _mm_setzero_si64 ()); } /* Expand 16 bits positioned at @pos (0-3) of a mmx register into * * 00RR00GG00BB * * --- Expanding 565 in the low word --- * * m = (m << (32 - 3)) | (m << (16 - 5)) | m; * m = m & (01f0003f001f); * m = m * (008404100840); * m = m >> 8; * * Note the trick here - the top word is shifted by another nibble to * avoid it bumping into the middle word */ static force_inline __m64 expand565 (__m64 pixel, int pos) { __m64 p = pixel; __m64 t1, t2; /* move pixel to low 16 bit and zero the rest */ #ifdef USE_LOONGSON_MMI p = loongson_extract_pi16 (p, pos); #else p = shift (shift (p, (3 - pos) * 16), -48); #endif t1 = shift (p, 36 - 11); t2 = shift (p, 16 - 5); p = _mm_or_si64 (t1, p); p = _mm_or_si64 (t2, p); p = _mm_and_si64 (p, MC (565_rgb)); pixel = _mm_mullo_pi16 (p, MC (565_unpack_multiplier)); return _mm_srli_pi16 (pixel, 8); } /* Expand 4 16 bit pixels in an mmx register into two mmx registers of * * AARRGGBBRRGGBB */ static force_inline void expand_4xpacked565 (__m64 vin, __m64 *vout0, __m64 *vout1, int full_alpha) { __m64 t0, t1, alpha = _mm_setzero_si64 (); __m64 r = _mm_and_si64 (vin, MC (expand_565_r)); __m64 g = _mm_and_si64 (vin, MC (expand_565_g)); __m64 b = _mm_and_si64 (vin, MC (expand_565_b)); if (full_alpha) alpha = _mm_cmpeq_pi32 (alpha, alpha); /* Replicate high bits into empty low bits. */ r = _mm_or_si64 (_mm_srli_pi16 (r, 8), _mm_srli_pi16 (r, 13)); g = _mm_or_si64 (_mm_srli_pi16 (g, 3), _mm_srli_pi16 (g, 9)); b = _mm_or_si64 (_mm_slli_pi16 (b, 3), _mm_srli_pi16 (b, 2)); r = _mm_packs_pu16 (r, _mm_setzero_si64 ()); /* 00 00 00 00 R3 R2 R1 R0 */ g = _mm_packs_pu16 (g, _mm_setzero_si64 ()); /* 00 00 00 00 G3 G2 G1 G0 */ b = _mm_packs_pu16 (b, _mm_setzero_si64 ()); /* 00 00 00 00 B3 B2 B1 B0 */ t1 = _mm_unpacklo_pi8 (r, alpha); /* A3 R3 A2 R2 A1 R1 A0 R0 */ t0 = _mm_unpacklo_pi8 (b, g); /* G3 B3 G2 B2 G1 B1 G0 B0 */ *vout0 = _mm_unpacklo_pi16 (t0, t1); /* A1 R1 G1 B1 A0 R0 G0 B0 */ *vout1 = _mm_unpackhi_pi16 (t0, t1); /* A3 R3 G3 B3 A2 R2 G2 B2 */ } static force_inline __m64 expand8888 (__m64 in, int pos) { if (pos == 0) return _mm_unpacklo_pi8 (in, _mm_setzero_si64 ()); else return _mm_unpackhi_pi8 (in, _mm_setzero_si64 ()); } static force_inline __m64 expandx888 (__m64 in, int pos) { return _mm_or_si64 (expand8888 (in, pos), MC (full_alpha)); } static force_inline void expand_4x565 (__m64 vin, __m64 *vout0, __m64 *vout1, __m64 *vout2, __m64 *vout3, int full_alpha) { __m64 v0, v1; expand_4xpacked565 (vin, &v0, &v1, full_alpha); *vout0 = expand8888 (v0, 0); *vout1 = expand8888 (v0, 1); *vout2 = expand8888 (v1, 0); *vout3 = expand8888 (v1, 1); } static force_inline __m64 pack_565 (__m64 pixel, __m64 target, int pos) { __m64 p = pixel; __m64 t = target; __m64 r, g, b; r = _mm_and_si64 (p, MC (565_r)); g = _mm_and_si64 (p, MC (565_g)); b = _mm_and_si64 (p, MC (565_b)); #ifdef USE_LOONGSON_MMI r = shift (r, -(32 - 8)); g = shift (g, -(16 - 3)); b = shift (b, -(0 + 3)); p = _mm_or_si64 (r, g); p = _mm_or_si64 (p, b); return loongson_insert_pi16 (t, p, pos); #else r = shift (r, -(32 - 8) + pos * 16); g = shift (g, -(16 - 3) + pos * 16); b = shift (b, -(0 + 3) + pos * 16); if (pos == 0) t = _mm_and_si64 (t, MC (mask_0)); else if (pos == 1) t = _mm_and_si64 (t, MC (mask_1)); else if (pos == 2) t = _mm_and_si64 (t, MC (mask_2)); else if (pos == 3) t = _mm_and_si64 (t, MC (mask_3)); p = _mm_or_si64 (r, t); p = _mm_or_si64 (g, p); return _mm_or_si64 (b, p); #endif } static force_inline __m64 pack_4xpacked565 (__m64 a, __m64 b) { __m64 rb0 = _mm_and_si64 (a, MC (packed_565_rb)); __m64 rb1 = _mm_and_si64 (b, MC (packed_565_rb)); __m64 t0 = _mm_madd_pi16 (rb0, MC (565_pack_multiplier)); __m64 t1 = _mm_madd_pi16 (rb1, MC (565_pack_multiplier)); __m64 g0 = _mm_and_si64 (a, MC (packed_565_g)); __m64 g1 = _mm_and_si64 (b, MC (packed_565_g)); t0 = _mm_or_si64 (t0, g0); t1 = _mm_or_si64 (t1, g1); t0 = shift(t0, -5); t1 = shift(t1, -5 + 16); return _mm_shuffle_pi16 (_mm_or_si64 (t0, t1), _MM_SHUFFLE (3, 1, 2, 0)); } #ifndef _MSC_VER static force_inline __m64 pack_4x565 (__m64 v0, __m64 v1, __m64 v2, __m64 v3) { return pack_4xpacked565 (pack8888 (v0, v1), pack8888 (v2, v3)); } static force_inline __m64 pix_add_mul (__m64 x, __m64 a, __m64 y, __m64 b) { x = pix_multiply (x, a); y = pix_multiply (y, b); return pix_add (x, y); } #else /* MSVC only handles a "pass by register" of up to three SSE intrinsics */ #define pack_4x565(v0, v1, v2, v3) \ pack_4xpacked565 (pack8888 (v0, v1), pack8888 (v2, v3)) #define pix_add_mul(x, a, y, b) \ ( x = pix_multiply (x, a), \ y = pix_multiply (y, b), \ pix_add (x, y) ) #endif /* --------------- MMX code patch for fbcompose.c --------------------- */ static force_inline __m64 combine (const uint32_t *src, const uint32_t *mask) { __m64 vsrc = load8888 (src); if (mask) { __m64 m = load8888 (mask); m = expand_alpha (m); vsrc = pix_multiply (vsrc, m); } return vsrc; } static force_inline __m64 core_combine_over_u_pixel_mmx (__m64 vsrc, __m64 vdst) { vsrc = _mm_unpacklo_pi8 (vsrc, _mm_setzero_si64 ()); if (is_opaque (vsrc)) { return vsrc; } else if (!is_zero (vsrc)) { return over (vsrc, expand_alpha (vsrc), _mm_unpacklo_pi8 (vdst, _mm_setzero_si64 ())); } return _mm_unpacklo_pi8 (vdst, _mm_setzero_si64 ()); } static void mmx_combine_over_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { const uint32_t *end = dest + width; while (dest < end) { __m64 vsrc = combine (src, mask); if (is_opaque (vsrc)) { store8888 (dest, vsrc); } else if (!is_zero (vsrc)) { __m64 sa = expand_alpha (vsrc); store8888 (dest, over (vsrc, sa, load8888 (dest))); } ++dest; ++src; if (mask) ++mask; } _mm_empty (); } static void mmx_combine_over_reverse_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { const uint32_t *end = dest + width; while (dest < end) { __m64 d, da; __m64 s = combine (src, mask); d = load8888 (dest); da = expand_alpha (d); store8888 (dest, over (d, da, s)); ++dest; ++src; if (mask) mask++; } _mm_empty (); } static void mmx_combine_in_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { const uint32_t *end = dest + width; while (dest < end) { __m64 a; __m64 x = combine (src, mask); a = load8888 (dest); a = expand_alpha (a); x = pix_multiply (x, a); store8888 (dest, x); ++dest; ++src; if (mask) mask++; } _mm_empty (); } static void mmx_combine_in_reverse_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { const uint32_t *end = dest + width; while (dest < end) { __m64 a = combine (src, mask); __m64 x; x = load8888 (dest); a = expand_alpha (a); x = pix_multiply (x, a); store8888 (dest, x); ++dest; ++src; if (mask) mask++; } _mm_empty (); } static void mmx_combine_out_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { const uint32_t *end = dest + width; while (dest < end) { __m64 a; __m64 x = combine (src, mask); a = load8888 (dest); a = expand_alpha (a); a = negate (a); x = pix_multiply (x, a); store8888 (dest, x); ++dest; ++src; if (mask) mask++; } _mm_empty (); } static void mmx_combine_out_reverse_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { const uint32_t *end = dest + width; while (dest < end) { __m64 a = combine (src, mask); __m64 x; x = load8888 (dest); a = expand_alpha (a); a = negate (a); x = pix_multiply (x, a); store8888 (dest, x); ++dest; ++src; if (mask) mask++; } _mm_empty (); } static void mmx_combine_atop_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { const uint32_t *end = dest + width; while (dest < end) { __m64 da, d, sia; __m64 s = combine (src, mask); d = load8888 (dest); sia = expand_alpha (s); sia = negate (sia); da = expand_alpha (d); s = pix_add_mul (s, da, d, sia); store8888 (dest, s); ++dest; ++src; if (mask) mask++; } _mm_empty (); } static void mmx_combine_atop_reverse_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { const uint32_t *end; end = dest + width; while (dest < end) { __m64 dia, d, sa; __m64 s = combine (src, mask); d = load8888 (dest); sa = expand_alpha (s); dia = expand_alpha (d); dia = negate (dia); s = pix_add_mul (s, dia, d, sa); store8888 (dest, s); ++dest; ++src; if (mask) mask++; } _mm_empty (); } static void mmx_combine_xor_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { const uint32_t *end = dest + width; while (dest < end) { __m64 dia, d, sia; __m64 s = combine (src, mask); d = load8888 (dest); sia = expand_alpha (s); dia = expand_alpha (d); sia = negate (sia); dia = negate (dia); s = pix_add_mul (s, dia, d, sia); store8888 (dest, s); ++dest; ++src; if (mask) mask++; } _mm_empty (); } static void mmx_combine_add_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { const uint32_t *end = dest + width; while (dest < end) { __m64 d; __m64 s = combine (src, mask); d = load8888 (dest); s = pix_add (s, d); store8888 (dest, s); ++dest; ++src; if (mask) mask++; } _mm_empty (); } static void mmx_combine_saturate_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { const uint32_t *end = dest + width; while (dest < end) { uint32_t s, sa, da; uint32_t d = *dest; __m64 ms = combine (src, mask); __m64 md = load8888 (dest); store8888(&s, ms); da = ~d >> 24; sa = s >> 24; if (sa > da) { uint32_t quot = DIV_UN8 (da, sa) << 24; __m64 msa = load8888 ("); msa = expand_alpha (msa); ms = pix_multiply (ms, msa); } md = pix_add (md, ms); store8888 (dest, md); ++src; ++dest; if (mask) mask++; } _mm_empty (); } static void mmx_combine_src_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { const uint32_t *end = src + width; while (src < end) { __m64 a = load8888 (mask); __m64 s = load8888 (src); s = pix_multiply (s, a); store8888 (dest, s); ++src; ++mask; ++dest; } _mm_empty (); } static void mmx_combine_over_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { const uint32_t *end = src + width; while (src < end) { __m64 a = load8888 (mask); __m64 s = load8888 (src); __m64 d = load8888 (dest); __m64 sa = expand_alpha (s); store8888 (dest, in_over (s, sa, a, d)); ++src; ++dest; ++mask; } _mm_empty (); } static void mmx_combine_over_reverse_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { const uint32_t *end = src + width; while (src < end) { __m64 a = load8888 (mask); __m64 s = load8888 (src); __m64 d = load8888 (dest); __m64 da = expand_alpha (d); store8888 (dest, over (d, da, in (s, a))); ++src; ++dest; ++mask; } _mm_empty (); } static void mmx_combine_in_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { const uint32_t *end = src + width; while (src < end) { __m64 a = load8888 (mask); __m64 s = load8888 (src); __m64 d = load8888 (dest); __m64 da = expand_alpha (d); s = pix_multiply (s, a); s = pix_multiply (s, da); store8888 (dest, s); ++src; ++dest; ++mask; } _mm_empty (); } static void mmx_combine_in_reverse_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { const uint32_t *end = src + width; while (src < end) { __m64 a = load8888 (mask); __m64 s = load8888 (src); __m64 d = load8888 (dest); __m64 sa = expand_alpha (s); a = pix_multiply (a, sa); d = pix_multiply (d, a); store8888 (dest, d); ++src; ++dest; ++mask; } _mm_empty (); } static void mmx_combine_out_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { const uint32_t *end = src + width; while (src < end) { __m64 a = load8888 (mask); __m64 s = load8888 (src); __m64 d = load8888 (dest); __m64 da = expand_alpha (d); da = negate (da); s = pix_multiply (s, a); s = pix_multiply (s, da); store8888 (dest, s); ++src; ++dest; ++mask; } _mm_empty (); } static void mmx_combine_out_reverse_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { const uint32_t *end = src + width; while (src < end) { __m64 a = load8888 (mask); __m64 s = load8888 (src); __m64 d = load8888 (dest); __m64 sa = expand_alpha (s); a = pix_multiply (a, sa); a = negate (a); d = pix_multiply (d, a); store8888 (dest, d); ++src; ++dest; ++mask; } _mm_empty (); } static void mmx_combine_atop_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { const uint32_t *end = src + width; while (src < end) { __m64 a = load8888 (mask); __m64 s = load8888 (src); __m64 d = load8888 (dest); __m64 da = expand_alpha (d); __m64 sa = expand_alpha (s); s = pix_multiply (s, a); a = pix_multiply (a, sa); a = negate (a); d = pix_add_mul (d, a, s, da); store8888 (dest, d); ++src; ++dest; ++mask; } _mm_empty (); } static void mmx_combine_atop_reverse_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { const uint32_t *end = src + width; while (src < end) { __m64 a = load8888 (mask); __m64 s = load8888 (src); __m64 d = load8888 (dest); __m64 da = expand_alpha (d); __m64 sa = expand_alpha (s); s = pix_multiply (s, a); a = pix_multiply (a, sa); da = negate (da); d = pix_add_mul (d, a, s, da); store8888 (dest, d); ++src; ++dest; ++mask; } _mm_empty (); } static void mmx_combine_xor_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { const uint32_t *end = src + width; while (src < end) { __m64 a = load8888 (mask); __m64 s = load8888 (src); __m64 d = load8888 (dest); __m64 da = expand_alpha (d); __m64 sa = expand_alpha (s); s = pix_multiply (s, a); a = pix_multiply (a, sa); da = negate (da); a = negate (a); d = pix_add_mul (d, a, s, da); store8888 (dest, d); ++src; ++dest; ++mask; } _mm_empty (); } static void mmx_combine_add_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { const uint32_t *end = src + width; while (src < end) { __m64 a = load8888 (mask); __m64 s = load8888 (src); __m64 d = load8888 (dest); s = pix_multiply (s, a); d = pix_add (s, d); store8888 (dest, d); ++src; ++dest; ++mask; } _mm_empty (); } /* ------------- MMX code paths called from fbpict.c -------------------- */ static void mmx_composite_over_n_8888 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t src; uint32_t *dst_line, *dst; int32_t w; int dst_stride; __m64 vsrc, vsrca; CHECKPOINT (); src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); if (src == 0) return; PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); vsrc = load8888 (&src); vsrca = expand_alpha (vsrc); while (height--) { dst = dst_line; dst_line += dst_stride; w = width; CHECKPOINT (); while (w && (uintptr_t)dst & 7) { store8888 (dst, over (vsrc, vsrca, load8888 (dst))); w--; dst++; } while (w >= 2) { __m64 vdest; __m64 dest0, dest1; vdest = *(__m64 *)dst; dest0 = over (vsrc, vsrca, expand8888 (vdest, 0)); dest1 = over (vsrc, vsrca, expand8888 (vdest, 1)); *(__m64 *)dst = pack8888 (dest0, dest1); dst += 2; w -= 2; } CHECKPOINT (); if (w) { store8888 (dst, over (vsrc, vsrca, load8888 (dst))); } } _mm_empty (); } static void mmx_composite_over_n_0565 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t src; uint16_t *dst_line, *dst; int32_t w; int dst_stride; __m64 vsrc, vsrca; CHECKPOINT (); src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); if (src == 0) return; PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1); vsrc = load8888 (&src); vsrca = expand_alpha (vsrc); while (height--) { dst = dst_line; dst_line += dst_stride; w = width; CHECKPOINT (); while (w && (uintptr_t)dst & 7) { uint64_t d = *dst; __m64 vdest = expand565 (to_m64 (d), 0); vdest = pack_565 (over (vsrc, vsrca, vdest), vdest, 0); *dst = to_uint64 (vdest); w--; dst++; } while (w >= 4) { __m64 vdest = *(__m64 *)dst; __m64 v0, v1, v2, v3; expand_4x565 (vdest, &v0, &v1, &v2, &v3, 0); v0 = over (vsrc, vsrca, v0); v1 = over (vsrc, vsrca, v1); v2 = over (vsrc, vsrca, v2); v3 = over (vsrc, vsrca, v3); *(__m64 *)dst = pack_4x565 (v0, v1, v2, v3); dst += 4; w -= 4; } CHECKPOINT (); while (w) { uint64_t d = *dst; __m64 vdest = expand565 (to_m64 (d), 0); vdest = pack_565 (over (vsrc, vsrca, vdest), vdest, 0); *dst = to_uint64 (vdest); w--; dst++; } } _mm_empty (); } static void mmx_composite_over_n_8888_8888_ca (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t src; uint32_t *dst_line; uint32_t *mask_line; int dst_stride, mask_stride; __m64 vsrc, vsrca; CHECKPOINT (); src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); if (src == 0) return; PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1); vsrc = load8888 (&src); vsrca = expand_alpha (vsrc); while (height--) { int twidth = width; uint32_t *p = (uint32_t *)mask_line; uint32_t *q = (uint32_t *)dst_line; while (twidth && (uintptr_t)q & 7) { uint32_t m = *(uint32_t *)p; if (m) { __m64 vdest = load8888 (q); vdest = in_over (vsrc, vsrca, load8888 (&m), vdest); store8888 (q, vdest); } twidth--; p++; q++; } while (twidth >= 2) { uint32_t m0, m1; m0 = *p; m1 = *(p + 1); if (m0 | m1) { __m64 dest0, dest1; __m64 vdest = *(__m64 *)q; dest0 = in_over (vsrc, vsrca, load8888 (&m0), expand8888 (vdest, 0)); dest1 = in_over (vsrc, vsrca, load8888 (&m1), expand8888 (vdest, 1)); *(__m64 *)q = pack8888 (dest0, dest1); } p += 2; q += 2; twidth -= 2; } if (twidth) { uint32_t m = *(uint32_t *)p; if (m) { __m64 vdest = load8888 (q); vdest = in_over (vsrc, vsrca, load8888 (&m), vdest); store8888 (q, vdest); } twidth--; p++; q++; } dst_line += dst_stride; mask_line += mask_stride; } _mm_empty (); } static void mmx_composite_over_8888_n_8888 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t *dst_line, *dst; uint32_t *src_line, *src; uint32_t mask; __m64 vmask; int dst_stride, src_stride; int32_t w; CHECKPOINT (); PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); mask = _pixman_image_get_solid (imp, mask_image, dest_image->bits.format); vmask = expand_alpha (load8888 (&mask)); while (height--) { dst = dst_line; dst_line += dst_stride; src = src_line; src_line += src_stride; w = width; while (w && (uintptr_t)dst & 7) { __m64 s = load8888 (src); __m64 d = load8888 (dst); store8888 (dst, in_over (s, expand_alpha (s), vmask, d)); w--; dst++; src++; } while (w >= 2) { __m64 vs = ldq_u ((__m64 *)src); __m64 vd = *(__m64 *)dst; __m64 vsrc0 = expand8888 (vs, 0); __m64 vsrc1 = expand8888 (vs, 1); *(__m64 *)dst = pack8888 ( in_over (vsrc0, expand_alpha (vsrc0), vmask, expand8888 (vd, 0)), in_over (vsrc1, expand_alpha (vsrc1), vmask, expand8888 (vd, 1))); w -= 2; dst += 2; src += 2; } if (w) { __m64 s = load8888 (src); __m64 d = load8888 (dst); store8888 (dst, in_over (s, expand_alpha (s), vmask, d)); } } _mm_empty (); } static void mmx_composite_over_x888_n_8888 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t *dst_line, *dst; uint32_t *src_line, *src; uint32_t mask; __m64 vmask; int dst_stride, src_stride; int32_t w; __m64 srca; CHECKPOINT (); PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); mask = _pixman_image_get_solid (imp, mask_image, dest_image->bits.format); vmask = expand_alpha (load8888 (&mask)); srca = MC (4x00ff); while (height--) { dst = dst_line; dst_line += dst_stride; src = src_line; src_line += src_stride; w = width; while (w && (uintptr_t)dst & 7) { uint32_t ssrc = *src | 0xff000000; __m64 s = load8888 (&ssrc); __m64 d = load8888 (dst); store8888 (dst, in_over (s, srca, vmask, d)); w--; dst++; src++; } while (w >= 16) { __m64 vd0 = *(__m64 *)(dst + 0); __m64 vd1 = *(__m64 *)(dst + 2); __m64 vd2 = *(__m64 *)(dst + 4); __m64 vd3 = *(__m64 *)(dst + 6); __m64 vd4 = *(__m64 *)(dst + 8); __m64 vd5 = *(__m64 *)(dst + 10); __m64 vd6 = *(__m64 *)(dst + 12); __m64 vd7 = *(__m64 *)(dst + 14); __m64 vs0 = ldq_u ((__m64 *)(src + 0)); __m64 vs1 = ldq_u ((__m64 *)(src + 2)); __m64 vs2 = ldq_u ((__m64 *)(src + 4)); __m64 vs3 = ldq_u ((__m64 *)(src + 6)); __m64 vs4 = ldq_u ((__m64 *)(src + 8)); __m64 vs5 = ldq_u ((__m64 *)(src + 10)); __m64 vs6 = ldq_u ((__m64 *)(src + 12)); __m64 vs7 = ldq_u ((__m64 *)(src + 14)); vd0 = pack8888 ( in_over (expandx888 (vs0, 0), srca, vmask, expand8888 (vd0, 0)), in_over (expandx888 (vs0, 1), srca, vmask, expand8888 (vd0, 1))); vd1 = pack8888 ( in_over (expandx888 (vs1, 0), srca, vmask, expand8888 (vd1, 0)), in_over (expandx888 (vs1, 1), srca, vmask, expand8888 (vd1, 1))); vd2 = pack8888 ( in_over (expandx888 (vs2, 0), srca, vmask, expand8888 (vd2, 0)), in_over (expandx888 (vs2, 1), srca, vmask, expand8888 (vd2, 1))); vd3 = pack8888 ( in_over (expandx888 (vs3, 0), srca, vmask, expand8888 (vd3, 0)), in_over (expandx888 (vs3, 1), srca, vmask, expand8888 (vd3, 1))); vd4 = pack8888 ( in_over (expandx888 (vs4, 0), srca, vmask, expand8888 (vd4, 0)), in_over (expandx888 (vs4, 1), srca, vmask, expand8888 (vd4, 1))); vd5 = pack8888 ( in_over (expandx888 (vs5, 0), srca, vmask, expand8888 (vd5, 0)), in_over (expandx888 (vs5, 1), srca, vmask, expand8888 (vd5, 1))); vd6 = pack8888 ( in_over (expandx888 (vs6, 0), srca, vmask, expand8888 (vd6, 0)), in_over (expandx888 (vs6, 1), srca, vmask, expand8888 (vd6, 1))); vd7 = pack8888 ( in_over (expandx888 (vs7, 0), srca, vmask, expand8888 (vd7, 0)), in_over (expandx888 (vs7, 1), srca, vmask, expand8888 (vd7, 1))); *(__m64 *)(dst + 0) = vd0; *(__m64 *)(dst + 2) = vd1; *(__m64 *)(dst + 4) = vd2; *(__m64 *)(dst + 6) = vd3; *(__m64 *)(dst + 8) = vd4; *(__m64 *)(dst + 10) = vd5; *(__m64 *)(dst + 12) = vd6; *(__m64 *)(dst + 14) = vd7; w -= 16; dst += 16; src += 16; } while (w) { uint32_t ssrc = *src | 0xff000000; __m64 s = load8888 (&ssrc); __m64 d = load8888 (dst); store8888 (dst, in_over (s, srca, vmask, d)); w--; dst++; src++; } } _mm_empty (); } static void mmx_composite_over_8888_8888 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t *dst_line, *dst; uint32_t *src_line, *src; uint32_t s; int dst_stride, src_stride; uint8_t a; int32_t w; CHECKPOINT (); PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); while (height--) { dst = dst_line; dst_line += dst_stride; src = src_line; src_line += src_stride; w = width; while (w--) { s = *src++; a = s >> 24; if (a == 0xff) { *dst = s; } else if (s) { __m64 ms, sa; ms = load8888 (&s); sa = expand_alpha (ms); store8888 (dst, over (ms, sa, load8888 (dst))); } dst++; } } _mm_empty (); } static void mmx_composite_over_8888_0565 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint16_t *dst_line, *dst; uint32_t *src_line, *src; int dst_stride, src_stride; int32_t w; CHECKPOINT (); PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); #if 0 /* FIXME */ assert (src_image->drawable == mask_image->drawable); #endif while (height--) { dst = dst_line; dst_line += dst_stride; src = src_line; src_line += src_stride; w = width; CHECKPOINT (); while (w && (uintptr_t)dst & 7) { __m64 vsrc = load8888 (src); uint64_t d = *dst; __m64 vdest = expand565 (to_m64 (d), 0); vdest = pack_565 ( over (vsrc, expand_alpha (vsrc), vdest), vdest, 0); *dst = to_uint64 (vdest); w--; dst++; src++; } CHECKPOINT (); while (w >= 4) { __m64 vdest = *(__m64 *)dst; __m64 v0, v1, v2, v3; __m64 vsrc0, vsrc1, vsrc2, vsrc3; expand_4x565 (vdest, &v0, &v1, &v2, &v3, 0); vsrc0 = load8888 ((src + 0)); vsrc1 = load8888 ((src + 1)); vsrc2 = load8888 ((src + 2)); vsrc3 = load8888 ((src + 3)); v0 = over (vsrc0, expand_alpha (vsrc0), v0); v1 = over (vsrc1, expand_alpha (vsrc1), v1); v2 = over (vsrc2, expand_alpha (vsrc2), v2); v3 = over (vsrc3, expand_alpha (vsrc3), v3); *(__m64 *)dst = pack_4x565 (v0, v1, v2, v3); w -= 4; dst += 4; src += 4; } CHECKPOINT (); while (w) { __m64 vsrc = load8888 (src); uint64_t d = *dst; __m64 vdest = expand565 (to_m64 (d), 0); vdest = pack_565 (over (vsrc, expand_alpha (vsrc), vdest), vdest, 0); *dst = to_uint64 (vdest); w--; dst++; src++; } } _mm_empty (); } static void mmx_composite_over_n_8_8888 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t src, srca; uint32_t *dst_line, *dst; uint8_t *mask_line, *mask; int dst_stride, mask_stride; int32_t w; __m64 vsrc, vsrca; uint64_t srcsrc; CHECKPOINT (); src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); srca = src >> 24; if (src == 0) return; srcsrc = (uint64_t)src << 32 | src; PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1); vsrc = load8888 (&src); vsrca = expand_alpha (vsrc); while (height--) { dst = dst_line; dst_line += dst_stride; mask = mask_line; mask_line += mask_stride; w = width; CHECKPOINT (); while (w && (uintptr_t)dst & 7) { uint64_t m = *mask; if (m) { __m64 vdest = in_over (vsrc, vsrca, expand_alpha_rev (to_m64 (m)), load8888 (dst)); store8888 (dst, vdest); } w--; mask++; dst++; } CHECKPOINT (); while (w >= 2) { uint64_t m0, m1; m0 = *mask; m1 = *(mask + 1); if (srca == 0xff && (m0 & m1) == 0xff) { *(uint64_t *)dst = srcsrc; } else if (m0 | m1) { __m64 vdest; __m64 dest0, dest1; vdest = *(__m64 *)dst; dest0 = in_over (vsrc, vsrca, expand_alpha_rev (to_m64 (m0)), expand8888 (vdest, 0)); dest1 = in_over (vsrc, vsrca, expand_alpha_rev (to_m64 (m1)), expand8888 (vdest, 1)); *(__m64 *)dst = pack8888 (dest0, dest1); } mask += 2; dst += 2; w -= 2; } CHECKPOINT (); if (w) { uint64_t m = *mask; if (m) { __m64 vdest = load8888 (dst); vdest = in_over ( vsrc, vsrca, expand_alpha_rev (to_m64 (m)), vdest); store8888 (dst, vdest); } } } _mm_empty (); } static pixman_bool_t mmx_fill (pixman_implementation_t *imp, uint32_t * bits, int stride, int bpp, int x, int y, int width, int height, uint32_t filler) { uint64_t fill; __m64 vfill; uint32_t byte_width; uint8_t *byte_line; #if defined __GNUC__ && defined USE_X86_MMX __m64 v1, v2, v3, v4, v5, v6, v7; #endif if (bpp != 16 && bpp != 32 && bpp != 8) return FALSE; if (bpp == 8) { stride = stride * (int) sizeof (uint32_t) / 1; byte_line = (uint8_t *)(((uint8_t *)bits) + stride * y + x); byte_width = width; stride *= 1; filler = (filler & 0xff) * 0x01010101; } else if (bpp == 16) { stride = stride * (int) sizeof (uint32_t) / 2; byte_line = (uint8_t *)(((uint16_t *)bits) + stride * y + x); byte_width = 2 * width; stride *= 2; filler = (filler & 0xffff) * 0x00010001; } else { stride = stride * (int) sizeof (uint32_t) / 4; byte_line = (uint8_t *)(((uint32_t *)bits) + stride * y + x); byte_width = 4 * width; stride *= 4; } fill = ((uint64_t)filler << 32) | filler; vfill = to_m64 (fill); #if defined __GNUC__ && defined USE_X86_MMX __asm__ ( "movq %7, %0\n" "movq %7, %1\n" "movq %7, %2\n" "movq %7, %3\n" "movq %7, %4\n" "movq %7, %5\n" "movq %7, %6\n" : "=&y" (v1), "=&y" (v2), "=&y" (v3), "=&y" (v4), "=&y" (v5), "=&y" (v6), "=y" (v7) : "y" (vfill)); #endif while (height--) { int w; uint8_t *d = byte_line; byte_line += stride; w = byte_width; if (w >= 1 && ((uintptr_t)d & 1)) { *(uint8_t *)d = (filler & 0xff); w--; d++; } if (w >= 2 && ((uintptr_t)d & 3)) { *(uint16_t *)d = filler; w -= 2; d += 2; } while (w >= 4 && ((uintptr_t)d & 7)) { *(uint32_t *)d = filler; w -= 4; d += 4; } while (w >= 64) { #if defined __GNUC__ && defined USE_X86_MMX __asm__ ( "movq %1, (%0)\n" "movq %2, 8(%0)\n" "movq %3, 16(%0)\n" "movq %4, 24(%0)\n" "movq %5, 32(%0)\n" "movq %6, 40(%0)\n" "movq %7, 48(%0)\n" "movq %8, 56(%0)\n" : : "r" (d), "y" (vfill), "y" (v1), "y" (v2), "y" (v3), "y" (v4), "y" (v5), "y" (v6), "y" (v7) : "memory"); #else *(__m64*) (d + 0) = vfill; *(__m64*) (d + 8) = vfill; *(__m64*) (d + 16) = vfill; *(__m64*) (d + 24) = vfill; *(__m64*) (d + 32) = vfill; *(__m64*) (d + 40) = vfill; *(__m64*) (d + 48) = vfill; *(__m64*) (d + 56) = vfill; #endif w -= 64; d += 64; } while (w >= 4) { *(uint32_t *)d = filler; w -= 4; d += 4; } if (w >= 2) { *(uint16_t *)d = filler; w -= 2; d += 2; } if (w >= 1) { *(uint8_t *)d = (filler & 0xff); w--; d++; } } _mm_empty (); return TRUE; } static void mmx_composite_src_x888_0565 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint16_t *dst_line, *dst; uint32_t *src_line, *src, s; int dst_stride, src_stride; int32_t w; PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1); while (height--) { dst = dst_line; dst_line += dst_stride; src = src_line; src_line += src_stride; w = width; while (w && (uintptr_t)dst & 7) { s = *src++; *dst = convert_8888_to_0565 (s); dst++; w--; } while (w >= 4) { __m64 vdest; __m64 vsrc0 = ldq_u ((__m64 *)(src + 0)); __m64 vsrc1 = ldq_u ((__m64 *)(src + 2)); vdest = pack_4xpacked565 (vsrc0, vsrc1); *(__m64 *)dst = vdest; w -= 4; src += 4; dst += 4; } while (w) { s = *src++; *dst = convert_8888_to_0565 (s); dst++; w--; } } _mm_empty (); } static void mmx_composite_src_n_8_8888 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t src, srca; uint32_t *dst_line, *dst; uint8_t *mask_line, *mask; int dst_stride, mask_stride; int32_t w; __m64 vsrc; uint64_t srcsrc; CHECKPOINT (); src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); srca = src >> 24; if (src == 0) { mmx_fill (imp, dest_image->bits.bits, dest_image->bits.rowstride, PIXMAN_FORMAT_BPP (dest_image->bits.format), dest_x, dest_y, width, height, 0); return; } srcsrc = (uint64_t)src << 32 | src; PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1); vsrc = load8888 (&src); while (height--) { dst = dst_line; dst_line += dst_stride; mask = mask_line; mask_line += mask_stride; w = width; CHECKPOINT (); while (w && (uintptr_t)dst & 7) { uint64_t m = *mask; if (m) { __m64 vdest = in (vsrc, expand_alpha_rev (to_m64 (m))); store8888 (dst, vdest); } else { *dst = 0; } w--; mask++; dst++; } CHECKPOINT (); while (w >= 2) { uint64_t m0, m1; m0 = *mask; m1 = *(mask + 1); if (srca == 0xff && (m0 & m1) == 0xff) { *(uint64_t *)dst = srcsrc; } else if (m0 | m1) { __m64 dest0, dest1; dest0 = in (vsrc, expand_alpha_rev (to_m64 (m0))); dest1 = in (vsrc, expand_alpha_rev (to_m64 (m1))); *(__m64 *)dst = pack8888 (dest0, dest1); } else { *(uint64_t *)dst = 0; } mask += 2; dst += 2; w -= 2; } CHECKPOINT (); if (w) { uint64_t m = *mask; if (m) { __m64 vdest = load8888 (dst); vdest = in (vsrc, expand_alpha_rev (to_m64 (m))); store8888 (dst, vdest); } else { *dst = 0; } } } _mm_empty (); } static void mmx_composite_over_n_8_0565 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t src, srca; uint16_t *dst_line, *dst; uint8_t *mask_line, *mask; int dst_stride, mask_stride; int32_t w; __m64 vsrc, vsrca, tmp; __m64 srcsrcsrcsrc; CHECKPOINT (); src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); srca = src >> 24; if (src == 0) return; PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1); vsrc = load8888 (&src); vsrca = expand_alpha (vsrc); tmp = pack_565 (vsrc, _mm_setzero_si64 (), 0); srcsrcsrcsrc = expand_alpha_rev (tmp); while (height--) { dst = dst_line; dst_line += dst_stride; mask = mask_line; mask_line += mask_stride; w = width; CHECKPOINT (); while (w && (uintptr_t)dst & 7) { uint64_t m = *mask; if (m) { uint64_t d = *dst; __m64 vd = to_m64 (d); __m64 vdest = in_over ( vsrc, vsrca, expand_alpha_rev (to_m64 (m)), expand565 (vd, 0)); vd = pack_565 (vdest, _mm_setzero_si64 (), 0); *dst = to_uint64 (vd); } w--; mask++; dst++; } CHECKPOINT (); while (w >= 4) { uint64_t m0, m1, m2, m3; m0 = *mask; m1 = *(mask + 1); m2 = *(mask + 2); m3 = *(mask + 3); if (srca == 0xff && (m0 & m1 & m2 & m3) == 0xff) { *(__m64 *)dst = srcsrcsrcsrc; } else if (m0 | m1 | m2 | m3) { __m64 vdest = *(__m64 *)dst; __m64 v0, v1, v2, v3; __m64 vm0, vm1, vm2, vm3; expand_4x565 (vdest, &v0, &v1, &v2, &v3, 0); vm0 = to_m64 (m0); v0 = in_over (vsrc, vsrca, expand_alpha_rev (vm0), v0); vm1 = to_m64 (m1); v1 = in_over (vsrc, vsrca, expand_alpha_rev (vm1), v1); vm2 = to_m64 (m2); v2 = in_over (vsrc, vsrca, expand_alpha_rev (vm2), v2); vm3 = to_m64 (m3); v3 = in_over (vsrc, vsrca, expand_alpha_rev (vm3), v3); *(__m64 *)dst = pack_4x565 (v0, v1, v2, v3);; } w -= 4; mask += 4; dst += 4; } CHECKPOINT (); while (w) { uint64_t m = *mask; if (m) { uint64_t d = *dst; __m64 vd = to_m64 (d); __m64 vdest = in_over (vsrc, vsrca, expand_alpha_rev (to_m64 (m)), expand565 (vd, 0)); vd = pack_565 (vdest, _mm_setzero_si64 (), 0); *dst = to_uint64 (vd); } w--; mask++; dst++; } } _mm_empty (); } static void mmx_composite_over_pixbuf_0565 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint16_t *dst_line, *dst; uint32_t *src_line, *src; int dst_stride, src_stride; int32_t w; CHECKPOINT (); PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); #if 0 /* FIXME */ assert (src_image->drawable == mask_image->drawable); #endif while (height--) { dst = dst_line; dst_line += dst_stride; src = src_line; src_line += src_stride; w = width; CHECKPOINT (); while (w && (uintptr_t)dst & 7) { __m64 vsrc = load8888 (src); uint64_t d = *dst; __m64 vdest = expand565 (to_m64 (d), 0); vdest = pack_565 (over_rev_non_pre (vsrc, vdest), vdest, 0); *dst = to_uint64 (vdest); w--; dst++; src++; } CHECKPOINT (); while (w >= 4) { uint32_t s0, s1, s2, s3; unsigned char a0, a1, a2, a3; s0 = *src; s1 = *(src + 1); s2 = *(src + 2); s3 = *(src + 3); a0 = (s0 >> 24); a1 = (s1 >> 24); a2 = (s2 >> 24); a3 = (s3 >> 24); if ((a0 & a1 & a2 & a3) == 0xFF) { __m64 v0 = invert_colors (load8888 (&s0)); __m64 v1 = invert_colors (load8888 (&s1)); __m64 v2 = invert_colors (load8888 (&s2)); __m64 v3 = invert_colors (load8888 (&s3)); *(__m64 *)dst = pack_4x565 (v0, v1, v2, v3); } else if (s0 | s1 | s2 | s3) { __m64 vdest = *(__m64 *)dst; __m64 v0, v1, v2, v3; __m64 vsrc0 = load8888 (&s0); __m64 vsrc1 = load8888 (&s1); __m64 vsrc2 = load8888 (&s2); __m64 vsrc3 = load8888 (&s3); expand_4x565 (vdest, &v0, &v1, &v2, &v3, 0); v0 = over_rev_non_pre (vsrc0, v0); v1 = over_rev_non_pre (vsrc1, v1); v2 = over_rev_non_pre (vsrc2, v2); v3 = over_rev_non_pre (vsrc3, v3); *(__m64 *)dst = pack_4x565 (v0, v1, v2, v3); } w -= 4; dst += 4; src += 4; } CHECKPOINT (); while (w) { __m64 vsrc = load8888 (src); uint64_t d = *dst; __m64 vdest = expand565 (to_m64 (d), 0); vdest = pack_565 (over_rev_non_pre (vsrc, vdest), vdest, 0); *dst = to_uint64 (vdest); w--; dst++; src++; } } _mm_empty (); } static void mmx_composite_over_pixbuf_8888 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t *dst_line, *dst; uint32_t *src_line, *src; int dst_stride, src_stride; int32_t w; CHECKPOINT (); PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); #if 0 /* FIXME */ assert (src_image->drawable == mask_image->drawable); #endif while (height--) { dst = dst_line; dst_line += dst_stride; src = src_line; src_line += src_stride; w = width; while (w && (uintptr_t)dst & 7) { __m64 s = load8888 (src); __m64 d = load8888 (dst); store8888 (dst, over_rev_non_pre (s, d)); w--; dst++; src++; } while (w >= 2) { uint32_t s0, s1; unsigned char a0, a1; __m64 d0, d1; s0 = *src; s1 = *(src + 1); a0 = (s0 >> 24); a1 = (s1 >> 24); if ((a0 & a1) == 0xFF) { d0 = invert_colors (load8888 (&s0)); d1 = invert_colors (load8888 (&s1)); *(__m64 *)dst = pack8888 (d0, d1); } else if (s0 | s1) { __m64 vdest = *(__m64 *)dst; d0 = over_rev_non_pre (load8888 (&s0), expand8888 (vdest, 0)); d1 = over_rev_non_pre (load8888 (&s1), expand8888 (vdest, 1)); *(__m64 *)dst = pack8888 (d0, d1); } w -= 2; dst += 2; src += 2; } if (w) { __m64 s = load8888 (src); __m64 d = load8888 (dst); store8888 (dst, over_rev_non_pre (s, d)); } } _mm_empty (); } static void mmx_composite_over_n_8888_0565_ca (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t src; uint16_t *dst_line; uint32_t *mask_line; int dst_stride, mask_stride; __m64 vsrc, vsrca; CHECKPOINT (); src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); if (src == 0) return; PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1); vsrc = load8888 (&src); vsrca = expand_alpha (vsrc); while (height--) { int twidth = width; uint32_t *p = (uint32_t *)mask_line; uint16_t *q = (uint16_t *)dst_line; while (twidth && ((uintptr_t)q & 7)) { uint32_t m = *(uint32_t *)p; if (m) { uint64_t d = *q; __m64 vdest = expand565 (to_m64 (d), 0); vdest = pack_565 (in_over (vsrc, vsrca, load8888 (&m), vdest), vdest, 0); *q = to_uint64 (vdest); } twidth--; p++; q++; } while (twidth >= 4) { uint32_t m0, m1, m2, m3; m0 = *p; m1 = *(p + 1); m2 = *(p + 2); m3 = *(p + 3); if ((m0 | m1 | m2 | m3)) { __m64 vdest = *(__m64 *)q; __m64 v0, v1, v2, v3; expand_4x565 (vdest, &v0, &v1, &v2, &v3, 0); v0 = in_over (vsrc, vsrca, load8888 (&m0), v0); v1 = in_over (vsrc, vsrca, load8888 (&m1), v1); v2 = in_over (vsrc, vsrca, load8888 (&m2), v2); v3 = in_over (vsrc, vsrca, load8888 (&m3), v3); *(__m64 *)q = pack_4x565 (v0, v1, v2, v3); } twidth -= 4; p += 4; q += 4; } while (twidth) { uint32_t m; m = *(uint32_t *)p; if (m) { uint64_t d = *q; __m64 vdest = expand565 (to_m64 (d), 0); vdest = pack_565 (in_over (vsrc, vsrca, load8888 (&m), vdest), vdest, 0); *q = to_uint64 (vdest); } twidth--; p++; q++; } mask_line += mask_stride; dst_line += dst_stride; } _mm_empty (); } static void mmx_composite_in_n_8_8 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint8_t *dst_line, *dst; uint8_t *mask_line, *mask; int dst_stride, mask_stride; int32_t w; uint32_t src; uint8_t sa; __m64 vsrc, vsrca; PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1); src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); sa = src >> 24; vsrc = load8888 (&src); vsrca = expand_alpha (vsrc); while (height--) { dst = dst_line; dst_line += dst_stride; mask = mask_line; mask_line += mask_stride; w = width; while (w && (uintptr_t)dst & 7) { uint16_t tmp; uint8_t a; uint32_t m, d; a = *mask++; d = *dst; m = MUL_UN8 (sa, a, tmp); d = MUL_UN8 (m, d, tmp); *dst++ = d; w--; } while (w >= 4) { __m64 vmask; __m64 vdest; vmask = load8888u ((uint32_t *)mask); vdest = load8888 ((uint32_t *)dst); store8888 ((uint32_t *)dst, in (in (vsrca, vmask), vdest)); dst += 4; mask += 4; w -= 4; } while (w--) { uint16_t tmp; uint8_t a; uint32_t m, d; a = *mask++; d = *dst; m = MUL_UN8 (sa, a, tmp); d = MUL_UN8 (m, d, tmp); *dst++ = d; } } _mm_empty (); } static void mmx_composite_in_8_8 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint8_t *dst_line, *dst; uint8_t *src_line, *src; int src_stride, dst_stride; int32_t w; PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1); while (height--) { dst = dst_line; dst_line += dst_stride; src = src_line; src_line += src_stride; w = width; while (w && (uintptr_t)dst & 3) { uint8_t s, d; uint16_t tmp; s = *src; d = *dst; *dst = MUL_UN8 (s, d, tmp); src++; dst++; w--; } while (w >= 4) { uint32_t *s = (uint32_t *)src; uint32_t *d = (uint32_t *)dst; store8888 (d, in (load8888u (s), load8888 (d))); w -= 4; dst += 4; src += 4; } while (w--) { uint8_t s, d; uint16_t tmp; s = *src; d = *dst; *dst = MUL_UN8 (s, d, tmp); src++; dst++; } } _mm_empty (); } static void mmx_composite_add_n_8_8 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint8_t *dst_line, *dst; uint8_t *mask_line, *mask; int dst_stride, mask_stride; int32_t w; uint32_t src; uint8_t sa; __m64 vsrc, vsrca; PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1); src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); sa = src >> 24; if (src == 0) return; vsrc = load8888 (&src); vsrca = expand_alpha (vsrc); while (height--) { dst = dst_line; dst_line += dst_stride; mask = mask_line; mask_line += mask_stride; w = width; while (w && (uintptr_t)dst & 3) { uint16_t tmp; uint16_t a; uint32_t m, d; uint32_t r; a = *mask++; d = *dst; m = MUL_UN8 (sa, a, tmp); r = ADD_UN8 (m, d, tmp); *dst++ = r; w--; } while (w >= 4) { __m64 vmask; __m64 vdest; vmask = load8888u ((uint32_t *)mask); vdest = load8888 ((uint32_t *)dst); store8888 ((uint32_t *)dst, _mm_adds_pu8 (in (vsrca, vmask), vdest)); dst += 4; mask += 4; w -= 4; } while (w--) { uint16_t tmp; uint16_t a; uint32_t m, d; uint32_t r; a = *mask++; d = *dst; m = MUL_UN8 (sa, a, tmp); r = ADD_UN8 (m, d, tmp); *dst++ = r; } } _mm_empty (); } static void mmx_composite_add_8_8 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint8_t *dst_line, *dst; uint8_t *src_line, *src; int dst_stride, src_stride; int32_t w; uint8_t s, d; uint16_t t; CHECKPOINT (); PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1); PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1); while (height--) { dst = dst_line; dst_line += dst_stride; src = src_line; src_line += src_stride; w = width; while (w && (uintptr_t)dst & 7) { s = *src; d = *dst; t = d + s; s = t | (0 - (t >> 8)); *dst = s; dst++; src++; w--; } while (w >= 8) { *(__m64*)dst = _mm_adds_pu8 (ldq_u ((__m64 *)src), *(__m64*)dst); dst += 8; src += 8; w -= 8; } while (w) { s = *src; d = *dst; t = d + s; s = t | (0 - (t >> 8)); *dst = s; dst++; src++; w--; } } _mm_empty (); } static void mmx_composite_add_0565_0565 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint16_t *dst_line, *dst; uint32_t d; uint16_t *src_line, *src; uint32_t s; int dst_stride, src_stride; int32_t w; CHECKPOINT (); PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint16_t, src_stride, src_line, 1); PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1); while (height--) { dst = dst_line; dst_line += dst_stride; src = src_line; src_line += src_stride; w = width; while (w && (uintptr_t)dst & 7) { s = *src++; if (s) { d = *dst; s = convert_0565_to_8888 (s); if (d) { d = convert_0565_to_8888 (d); UN8x4_ADD_UN8x4 (s, d); } *dst = convert_8888_to_0565 (s); } dst++; w--; } while (w >= 4) { __m64 vdest = *(__m64 *)dst; __m64 vsrc = ldq_u ((__m64 *)src); __m64 vd0, vd1; __m64 vs0, vs1; expand_4xpacked565 (vdest, &vd0, &vd1, 0); expand_4xpacked565 (vsrc, &vs0, &vs1, 0); vd0 = _mm_adds_pu8 (vd0, vs0); vd1 = _mm_adds_pu8 (vd1, vs1); *(__m64 *)dst = pack_4xpacked565 (vd0, vd1); dst += 4; src += 4; w -= 4; } while (w--) { s = *src++; if (s) { d = *dst; s = convert_0565_to_8888 (s); if (d) { d = convert_0565_to_8888 (d); UN8x4_ADD_UN8x4 (s, d); } *dst = convert_8888_to_0565 (s); } dst++; } } _mm_empty (); } static void mmx_composite_add_8888_8888 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t *dst_line, *dst; uint32_t *src_line, *src; int dst_stride, src_stride; int32_t w; CHECKPOINT (); PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); while (height--) { dst = dst_line; dst_line += dst_stride; src = src_line; src_line += src_stride; w = width; while (w && (uintptr_t)dst & 7) { store (dst, _mm_adds_pu8 (load ((const uint32_t *)src), load ((const uint32_t *)dst))); dst++; src++; w--; } while (w >= 2) { *(__m64 *)dst = _mm_adds_pu8 (ldq_u ((__m64 *)src), *(__m64*)dst); dst += 2; src += 2; w -= 2; } if (w) { store (dst, _mm_adds_pu8 (load ((const uint32_t *)src), load ((const uint32_t *)dst))); } } _mm_empty (); } static pixman_bool_t mmx_blt (pixman_implementation_t *imp, uint32_t * src_bits, uint32_t * dst_bits, int src_stride, int dst_stride, int src_bpp, int dst_bpp, int src_x, int src_y, int dest_x, int dest_y, int width, int height) { uint8_t * src_bytes; uint8_t * dst_bytes; int byte_width; if (src_bpp != dst_bpp) return FALSE; if (src_bpp == 16) { src_stride = src_stride * (int) sizeof (uint32_t) / 2; dst_stride = dst_stride * (int) sizeof (uint32_t) / 2; src_bytes = (uint8_t *)(((uint16_t *)src_bits) + src_stride * (src_y) + (src_x)); dst_bytes = (uint8_t *)(((uint16_t *)dst_bits) + dst_stride * (dest_y) + (dest_x)); byte_width = 2 * width; src_stride *= 2; dst_stride *= 2; } else if (src_bpp == 32) { src_stride = src_stride * (int) sizeof (uint32_t) / 4; dst_stride = dst_stride * (int) sizeof (uint32_t) / 4; src_bytes = (uint8_t *)(((uint32_t *)src_bits) + src_stride * (src_y) + (src_x)); dst_bytes = (uint8_t *)(((uint32_t *)dst_bits) + dst_stride * (dest_y) + (dest_x)); byte_width = 4 * width; src_stride *= 4; dst_stride *= 4; } else { return FALSE; } while (height--) { int w; uint8_t *s = src_bytes; uint8_t *d = dst_bytes; src_bytes += src_stride; dst_bytes += dst_stride; w = byte_width; if (w >= 1 && ((uintptr_t)d & 1)) { *(uint8_t *)d = *(uint8_t *)s; w -= 1; s += 1; d += 1; } if (w >= 2 && ((uintptr_t)d & 3)) { *(uint16_t *)d = *(uint16_t *)s; w -= 2; s += 2; d += 2; } while (w >= 4 && ((uintptr_t)d & 7)) { *(uint32_t *)d = ldl_u ((uint32_t *)s); w -= 4; s += 4; d += 4; } while (w >= 64) { #if (defined (__GNUC__) || (defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590))) && defined USE_X86_MMX __asm__ ( "movq (%1), %%mm0\n" "movq 8(%1), %%mm1\n" "movq 16(%1), %%mm2\n" "movq 24(%1), %%mm3\n" "movq 32(%1), %%mm4\n" "movq 40(%1), %%mm5\n" "movq 48(%1), %%mm6\n" "movq 56(%1), %%mm7\n" "movq %%mm0, (%0)\n" "movq %%mm1, 8(%0)\n" "movq %%mm2, 16(%0)\n" "movq %%mm3, 24(%0)\n" "movq %%mm4, 32(%0)\n" "movq %%mm5, 40(%0)\n" "movq %%mm6, 48(%0)\n" "movq %%mm7, 56(%0)\n" : : "r" (d), "r" (s) : "memory", "%mm0", "%mm1", "%mm2", "%mm3", "%mm4", "%mm5", "%mm6", "%mm7"); #else __m64 v0 = ldq_u ((__m64 *)(s + 0)); __m64 v1 = ldq_u ((__m64 *)(s + 8)); __m64 v2 = ldq_u ((__m64 *)(s + 16)); __m64 v3 = ldq_u ((__m64 *)(s + 24)); __m64 v4 = ldq_u ((__m64 *)(s + 32)); __m64 v5 = ldq_u ((__m64 *)(s + 40)); __m64 v6 = ldq_u ((__m64 *)(s + 48)); __m64 v7 = ldq_u ((__m64 *)(s + 56)); *(__m64 *)(d + 0) = v0; *(__m64 *)(d + 8) = v1; *(__m64 *)(d + 16) = v2; *(__m64 *)(d + 24) = v3; *(__m64 *)(d + 32) = v4; *(__m64 *)(d + 40) = v5; *(__m64 *)(d + 48) = v6; *(__m64 *)(d + 56) = v7; #endif w -= 64; s += 64; d += 64; } while (w >= 4) { *(uint32_t *)d = ldl_u ((uint32_t *)s); w -= 4; s += 4; d += 4; } if (w >= 2) { *(uint16_t *)d = *(uint16_t *)s; w -= 2; s += 2; d += 2; } } _mm_empty (); return TRUE; } static void mmx_composite_copy_area (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); mmx_blt (imp, src_image->bits.bits, dest_image->bits.bits, src_image->bits.rowstride, dest_image->bits.rowstride, PIXMAN_FORMAT_BPP (src_image->bits.format), PIXMAN_FORMAT_BPP (dest_image->bits.format), src_x, src_y, dest_x, dest_y, width, height); } static void mmx_composite_over_x888_8_8888 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t *src, *src_line; uint32_t *dst, *dst_line; uint8_t *mask, *mask_line; int src_stride, mask_stride, dst_stride; int32_t w; PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1); PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); while (height--) { src = src_line; src_line += src_stride; dst = dst_line; dst_line += dst_stride; mask = mask_line; mask_line += mask_stride; w = width; while (w--) { uint64_t m = *mask; if (m) { uint32_t ssrc = *src | 0xff000000; __m64 s = load8888 (&ssrc); if (m == 0xff) { store8888 (dst, s); } else { __m64 sa = expand_alpha (s); __m64 vm = expand_alpha_rev (to_m64 (m)); __m64 vdest = in_over (s, sa, vm, load8888 (dst)); store8888 (dst, vdest); } } mask++; dst++; src++; } } _mm_empty (); } static void mmx_composite_over_reverse_n_8888 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t src; uint32_t *dst_line, *dst; int32_t w; int dst_stride; __m64 vsrc; CHECKPOINT (); src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); if (src == 0) return; PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); vsrc = load8888 (&src); while (height--) { dst = dst_line; dst_line += dst_stride; w = width; CHECKPOINT (); while (w && (uintptr_t)dst & 7) { __m64 vdest = load8888 (dst); store8888 (dst, over (vdest, expand_alpha (vdest), vsrc)); w--; dst++; } while (w >= 2) { __m64 vdest = *(__m64 *)dst; __m64 dest0 = expand8888 (vdest, 0); __m64 dest1 = expand8888 (vdest, 1); dest0 = over (dest0, expand_alpha (dest0), vsrc); dest1 = over (dest1, expand_alpha (dest1), vsrc); *(__m64 *)dst = pack8888 (dest0, dest1); dst += 2; w -= 2; } CHECKPOINT (); if (w) { __m64 vdest = load8888 (dst); store8888 (dst, over (vdest, expand_alpha (vdest), vsrc)); } } _mm_empty (); } static force_inline void scaled_nearest_scanline_mmx_8888_8888_OVER (uint32_t* pd, const uint32_t* ps, int32_t w, pixman_fixed_t vx, pixman_fixed_t unit_x, pixman_fixed_t src_width_fixed, pixman_bool_t fully_transparent_src) { if (fully_transparent_src) return; while (w) { __m64 d = load (pd); __m64 s = load (ps + pixman_fixed_to_int (vx)); vx += unit_x; while (vx >= 0) vx -= src_width_fixed; store8888 (pd, core_combine_over_u_pixel_mmx (s, d)); pd++; w--; } _mm_empty (); } FAST_NEAREST_MAINLOOP (mmx_8888_8888_cover_OVER, scaled_nearest_scanline_mmx_8888_8888_OVER, uint32_t, uint32_t, COVER) FAST_NEAREST_MAINLOOP (mmx_8888_8888_none_OVER, scaled_nearest_scanline_mmx_8888_8888_OVER, uint32_t, uint32_t, NONE) FAST_NEAREST_MAINLOOP (mmx_8888_8888_pad_OVER, scaled_nearest_scanline_mmx_8888_8888_OVER, uint32_t, uint32_t, PAD) FAST_NEAREST_MAINLOOP (mmx_8888_8888_normal_OVER, scaled_nearest_scanline_mmx_8888_8888_OVER, uint32_t, uint32_t, NORMAL) static force_inline void scaled_nearest_scanline_mmx_8888_n_8888_OVER (const uint32_t * mask, uint32_t * dst, const uint32_t * src, int32_t w, pixman_fixed_t vx, pixman_fixed_t unit_x, pixman_fixed_t src_width_fixed, pixman_bool_t zero_src) { __m64 mm_mask; if (zero_src || (*mask >> 24) == 0) { /* A workaround for https://gcc.gnu.org/PR47759 */ _mm_empty (); return; } mm_mask = expand_alpha (load8888 (mask)); while (w) { uint32_t s = *(src + pixman_fixed_to_int (vx)); vx += unit_x; while (vx >= 0) vx -= src_width_fixed; if (s) { __m64 ms = load8888 (&s); __m64 alpha = expand_alpha (ms); __m64 dest = load8888 (dst); store8888 (dst, (in_over (ms, alpha, mm_mask, dest))); } dst++; w--; } _mm_empty (); } FAST_NEAREST_MAINLOOP_COMMON (mmx_8888_n_8888_cover_OVER, scaled_nearest_scanline_mmx_8888_n_8888_OVER, uint32_t, uint32_t, uint32_t, COVER, TRUE, TRUE) FAST_NEAREST_MAINLOOP_COMMON (mmx_8888_n_8888_pad_OVER, scaled_nearest_scanline_mmx_8888_n_8888_OVER, uint32_t, uint32_t, uint32_t, PAD, TRUE, TRUE) FAST_NEAREST_MAINLOOP_COMMON (mmx_8888_n_8888_none_OVER, scaled_nearest_scanline_mmx_8888_n_8888_OVER, uint32_t, uint32_t, uint32_t, NONE, TRUE, TRUE) FAST_NEAREST_MAINLOOP_COMMON (mmx_8888_n_8888_normal_OVER, scaled_nearest_scanline_mmx_8888_n_8888_OVER, uint32_t, uint32_t, uint32_t, NORMAL, TRUE, TRUE) #define BSHIFT ((1 << BILINEAR_INTERPOLATION_BITS)) #define BMSK (BSHIFT - 1) #define BILINEAR_DECLARE_VARIABLES \ const __m64 mm_wt = _mm_set_pi16 (wt, wt, wt, wt); \ const __m64 mm_wb = _mm_set_pi16 (wb, wb, wb, wb); \ const __m64 mm_addc7 = _mm_set_pi16 (0, 1, 0, 1); \ const __m64 mm_xorc7 = _mm_set_pi16 (0, BMSK, 0, BMSK); \ const __m64 mm_ux = _mm_set_pi16 (unit_x, unit_x, unit_x, unit_x); \ const __m64 mm_zero = _mm_setzero_si64 (); \ __m64 mm_x = _mm_set_pi16 (vx, vx, vx, vx) #define BILINEAR_INTERPOLATE_ONE_PIXEL(pix) \ do { \ /* fetch 2x2 pixel block into 2 mmx registers */ \ __m64 t = ldq_u ((__m64 *)&src_top [pixman_fixed_to_int (vx)]); \ __m64 b = ldq_u ((__m64 *)&src_bottom [pixman_fixed_to_int (vx)]); \ /* vertical interpolation */ \ __m64 t_hi = _mm_mullo_pi16 (_mm_unpackhi_pi8 (t, mm_zero), mm_wt); \ __m64 t_lo = _mm_mullo_pi16 (_mm_unpacklo_pi8 (t, mm_zero), mm_wt); \ __m64 b_hi = _mm_mullo_pi16 (_mm_unpackhi_pi8 (b, mm_zero), mm_wb); \ __m64 b_lo = _mm_mullo_pi16 (_mm_unpacklo_pi8 (b, mm_zero), mm_wb); \ __m64 hi = _mm_add_pi16 (t_hi, b_hi); \ __m64 lo = _mm_add_pi16 (t_lo, b_lo); \ /* calculate horizontal weights */ \ __m64 mm_wh = _mm_add_pi16 (mm_addc7, _mm_xor_si64 (mm_xorc7, \ _mm_srli_pi16 (mm_x, \ 16 - BILINEAR_INTERPOLATION_BITS))); \ /* horizontal interpolation */ \ __m64 p = _mm_unpacklo_pi16 (lo, hi); \ __m64 q = _mm_unpackhi_pi16 (lo, hi); \ vx += unit_x; \ lo = _mm_madd_pi16 (p, mm_wh); \ hi = _mm_madd_pi16 (q, mm_wh); \ mm_x = _mm_add_pi16 (mm_x, mm_ux); \ /* shift and pack the result */ \ hi = _mm_srli_pi32 (hi, BILINEAR_INTERPOLATION_BITS * 2); \ lo = _mm_srli_pi32 (lo, BILINEAR_INTERPOLATION_BITS * 2); \ lo = _mm_packs_pi32 (lo, hi); \ lo = _mm_packs_pu16 (lo, lo); \ pix = lo; \ } while (0) #define BILINEAR_SKIP_ONE_PIXEL() \ do { \ vx += unit_x; \ mm_x = _mm_add_pi16 (mm_x, mm_ux); \ } while(0) static force_inline void scaled_bilinear_scanline_mmx_8888_8888_SRC (uint32_t * dst, const uint32_t * mask, const uint32_t * src_top, const uint32_t * src_bottom, int32_t w, int wt, int wb, pixman_fixed_t vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx, pixman_bool_t zero_src) { BILINEAR_DECLARE_VARIABLES; __m64 pix; while (w--) { BILINEAR_INTERPOLATE_ONE_PIXEL (pix); store (dst, pix); dst++; } _mm_empty (); } FAST_BILINEAR_MAINLOOP_COMMON (mmx_8888_8888_cover_SRC, scaled_bilinear_scanline_mmx_8888_8888_SRC, uint32_t, uint32_t, uint32_t, COVER, FLAG_NONE) FAST_BILINEAR_MAINLOOP_COMMON (mmx_8888_8888_pad_SRC, scaled_bilinear_scanline_mmx_8888_8888_SRC, uint32_t, uint32_t, uint32_t, PAD, FLAG_NONE) FAST_BILINEAR_MAINLOOP_COMMON (mmx_8888_8888_none_SRC, scaled_bilinear_scanline_mmx_8888_8888_SRC, uint32_t, uint32_t, uint32_t, NONE, FLAG_NONE) FAST_BILINEAR_MAINLOOP_COMMON (mmx_8888_8888_normal_SRC, scaled_bilinear_scanline_mmx_8888_8888_SRC, uint32_t, uint32_t, uint32_t, NORMAL, FLAG_NONE) static force_inline void scaled_bilinear_scanline_mmx_8888_8888_OVER (uint32_t * dst, const uint32_t * mask, const uint32_t * src_top, const uint32_t * src_bottom, int32_t w, int wt, int wb, pixman_fixed_t vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx, pixman_bool_t zero_src) { BILINEAR_DECLARE_VARIABLES; __m64 pix1, pix2; while (w) { BILINEAR_INTERPOLATE_ONE_PIXEL (pix1); if (!is_zero (pix1)) { pix2 = load (dst); store8888 (dst, core_combine_over_u_pixel_mmx (pix1, pix2)); } w--; dst++; } _mm_empty (); } FAST_BILINEAR_MAINLOOP_COMMON (mmx_8888_8888_cover_OVER, scaled_bilinear_scanline_mmx_8888_8888_OVER, uint32_t, uint32_t, uint32_t, COVER, FLAG_NONE) FAST_BILINEAR_MAINLOOP_COMMON (mmx_8888_8888_pad_OVER, scaled_bilinear_scanline_mmx_8888_8888_OVER, uint32_t, uint32_t, uint32_t, PAD, FLAG_NONE) FAST_BILINEAR_MAINLOOP_COMMON (mmx_8888_8888_none_OVER, scaled_bilinear_scanline_mmx_8888_8888_OVER, uint32_t, uint32_t, uint32_t, NONE, FLAG_NONE) FAST_BILINEAR_MAINLOOP_COMMON (mmx_8888_8888_normal_OVER, scaled_bilinear_scanline_mmx_8888_8888_OVER, uint32_t, uint32_t, uint32_t, NORMAL, FLAG_NONE) static force_inline void scaled_bilinear_scanline_mmx_8888_8_8888_OVER (uint32_t * dst, const uint8_t * mask, const uint32_t * src_top, const uint32_t * src_bottom, int32_t w, int wt, int wb, pixman_fixed_t vx, pixman_fixed_t unit_x, pixman_fixed_t max_vx, pixman_bool_t zero_src) { BILINEAR_DECLARE_VARIABLES; __m64 pix1, pix2; uint32_t m; while (w) { m = (uint32_t) *mask++; if (m) { BILINEAR_INTERPOLATE_ONE_PIXEL (pix1); if (m == 0xff && is_opaque (pix1)) { store (dst, pix1); } else { __m64 ms, md, ma, msa; pix2 = load (dst); ma = expand_alpha_rev (to_m64 (m)); ms = _mm_unpacklo_pi8 (pix1, _mm_setzero_si64 ()); md = _mm_unpacklo_pi8 (pix2, _mm_setzero_si64 ()); msa = expand_alpha (ms); store8888 (dst, (in_over (ms, msa, ma, md))); } } else { BILINEAR_SKIP_ONE_PIXEL (); } w--; dst++; } _mm_empty (); } FAST_BILINEAR_MAINLOOP_COMMON (mmx_8888_8_8888_cover_OVER, scaled_bilinear_scanline_mmx_8888_8_8888_OVER, uint32_t, uint8_t, uint32_t, COVER, FLAG_HAVE_NON_SOLID_MASK) FAST_BILINEAR_MAINLOOP_COMMON (mmx_8888_8_8888_pad_OVER, scaled_bilinear_scanline_mmx_8888_8_8888_OVER, uint32_t, uint8_t, uint32_t, PAD, FLAG_HAVE_NON_SOLID_MASK) FAST_BILINEAR_MAINLOOP_COMMON (mmx_8888_8_8888_none_OVER, scaled_bilinear_scanline_mmx_8888_8_8888_OVER, uint32_t, uint8_t, uint32_t, NONE, FLAG_HAVE_NON_SOLID_MASK) FAST_BILINEAR_MAINLOOP_COMMON (mmx_8888_8_8888_normal_OVER, scaled_bilinear_scanline_mmx_8888_8_8888_OVER, uint32_t, uint8_t, uint32_t, NORMAL, FLAG_HAVE_NON_SOLID_MASK) static uint32_t * mmx_fetch_x8r8g8b8 (pixman_iter_t *iter, const uint32_t *mask) { int w = iter->width; uint32_t *dst = iter->buffer; uint32_t *src = (uint32_t *)iter->bits; iter->bits += iter->stride; while (w && ((uintptr_t)dst) & 7) { *dst++ = (*src++) | 0xff000000; w--; } while (w >= 8) { __m64 vsrc1 = ldq_u ((__m64 *)(src + 0)); __m64 vsrc2 = ldq_u ((__m64 *)(src + 2)); __m64 vsrc3 = ldq_u ((__m64 *)(src + 4)); __m64 vsrc4 = ldq_u ((__m64 *)(src + 6)); *(__m64 *)(dst + 0) = _mm_or_si64 (vsrc1, MC (ff000000)); *(__m64 *)(dst + 2) = _mm_or_si64 (vsrc2, MC (ff000000)); *(__m64 *)(dst + 4) = _mm_or_si64 (vsrc3, MC (ff000000)); *(__m64 *)(dst + 6) = _mm_or_si64 (vsrc4, MC (ff000000)); dst += 8; src += 8; w -= 8; } while (w) { *dst++ = (*src++) | 0xff000000; w--; } _mm_empty (); return iter->buffer; } static uint32_t * mmx_fetch_r5g6b5 (pixman_iter_t *iter, const uint32_t *mask) { int w = iter->width; uint32_t *dst = iter->buffer; uint16_t *src = (uint16_t *)iter->bits; iter->bits += iter->stride; while (w && ((uintptr_t)dst) & 0x0f) { uint16_t s = *src++; *dst++ = convert_0565_to_8888 (s); w--; } while (w >= 4) { __m64 vsrc = ldq_u ((__m64 *)src); __m64 mm0, mm1; expand_4xpacked565 (vsrc, &mm0, &mm1, 1); *(__m64 *)(dst + 0) = mm0; *(__m64 *)(dst + 2) = mm1; dst += 4; src += 4; w -= 4; } while (w) { uint16_t s = *src++; *dst++ = convert_0565_to_8888 (s); w--; } _mm_empty (); return iter->buffer; } static uint32_t * mmx_fetch_a8 (pixman_iter_t *iter, const uint32_t *mask) { int w = iter->width; uint32_t *dst = iter->buffer; uint8_t *src = iter->bits; iter->bits += iter->stride; while (w && (((uintptr_t)dst) & 15)) { *dst++ = (uint32_t)*(src++) << 24; w--; } while (w >= 8) { __m64 mm0 = ldq_u ((__m64 *)src); __m64 mm1 = _mm_unpacklo_pi8 (_mm_setzero_si64(), mm0); __m64 mm2 = _mm_unpackhi_pi8 (_mm_setzero_si64(), mm0); __m64 mm3 = _mm_unpacklo_pi16 (_mm_setzero_si64(), mm1); __m64 mm4 = _mm_unpackhi_pi16 (_mm_setzero_si64(), mm1); __m64 mm5 = _mm_unpacklo_pi16 (_mm_setzero_si64(), mm2); __m64 mm6 = _mm_unpackhi_pi16 (_mm_setzero_si64(), mm2); *(__m64 *)(dst + 0) = mm3; *(__m64 *)(dst + 2) = mm4; *(__m64 *)(dst + 4) = mm5; *(__m64 *)(dst + 6) = mm6; dst += 8; src += 8; w -= 8; } while (w) { *dst++ = (uint32_t)*(src++) << 24; w--; } _mm_empty (); return iter->buffer; } #define IMAGE_FLAGS \ (FAST_PATH_STANDARD_FLAGS | FAST_PATH_ID_TRANSFORM | \ FAST_PATH_BITS_IMAGE | FAST_PATH_SAMPLES_COVER_CLIP_NEAREST) static const pixman_iter_info_t mmx_iters[] = { { PIXMAN_x8r8g8b8, IMAGE_FLAGS, ITER_NARROW, _pixman_iter_init_bits_stride, mmx_fetch_x8r8g8b8, NULL }, { PIXMAN_r5g6b5, IMAGE_FLAGS, ITER_NARROW, _pixman_iter_init_bits_stride, mmx_fetch_r5g6b5, NULL }, { PIXMAN_a8, IMAGE_FLAGS, ITER_NARROW, _pixman_iter_init_bits_stride, mmx_fetch_a8, NULL }, { PIXMAN_null }, }; static const pixman_fast_path_t mmx_fast_paths[] = { PIXMAN_STD_FAST_PATH (OVER, solid, a8, r5g6b5, mmx_composite_over_n_8_0565 ), PIXMAN_STD_FAST_PATH (OVER, solid, a8, b5g6r5, mmx_composite_over_n_8_0565 ), PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8r8g8b8, mmx_composite_over_n_8_8888 ), PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8r8g8b8, mmx_composite_over_n_8_8888 ), PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8b8g8r8, mmx_composite_over_n_8_8888 ), PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8b8g8r8, mmx_composite_over_n_8_8888 ), PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, a8r8g8b8, mmx_composite_over_n_8888_8888_ca ), PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, x8r8g8b8, mmx_composite_over_n_8888_8888_ca ), PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, r5g6b5, mmx_composite_over_n_8888_0565_ca ), PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, a8b8g8r8, mmx_composite_over_n_8888_8888_ca ), PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, x8b8g8r8, mmx_composite_over_n_8888_8888_ca ), PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, b5g6r5, mmx_composite_over_n_8888_0565_ca ), PIXMAN_STD_FAST_PATH (OVER, pixbuf, pixbuf, a8r8g8b8, mmx_composite_over_pixbuf_8888 ), PIXMAN_STD_FAST_PATH (OVER, pixbuf, pixbuf, x8r8g8b8, mmx_composite_over_pixbuf_8888 ), PIXMAN_STD_FAST_PATH (OVER, pixbuf, pixbuf, r5g6b5, mmx_composite_over_pixbuf_0565 ), PIXMAN_STD_FAST_PATH (OVER, rpixbuf, rpixbuf, a8b8g8r8, mmx_composite_over_pixbuf_8888 ), PIXMAN_STD_FAST_PATH (OVER, rpixbuf, rpixbuf, x8b8g8r8, mmx_composite_over_pixbuf_8888 ), PIXMAN_STD_FAST_PATH (OVER, rpixbuf, rpixbuf, b5g6r5, mmx_composite_over_pixbuf_0565 ), PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, solid, a8r8g8b8, mmx_composite_over_x888_n_8888 ), PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, solid, x8r8g8b8, mmx_composite_over_x888_n_8888 ), PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, solid, a8b8g8r8, mmx_composite_over_x888_n_8888 ), PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, solid, x8b8g8r8, mmx_composite_over_x888_n_8888 ), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, a8r8g8b8, mmx_composite_over_8888_n_8888 ), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, x8r8g8b8, mmx_composite_over_8888_n_8888 ), PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, solid, a8b8g8r8, mmx_composite_over_8888_n_8888 ), PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, solid, x8b8g8r8, mmx_composite_over_8888_n_8888 ), PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, a8, x8r8g8b8, mmx_composite_over_x888_8_8888 ), PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, a8, a8r8g8b8, mmx_composite_over_x888_8_8888 ), PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, a8, x8b8g8r8, mmx_composite_over_x888_8_8888 ), PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, a8, a8b8g8r8, mmx_composite_over_x888_8_8888 ), PIXMAN_STD_FAST_PATH (OVER, solid, null, a8r8g8b8, mmx_composite_over_n_8888 ), PIXMAN_STD_FAST_PATH (OVER, solid, null, x8r8g8b8, mmx_composite_over_n_8888 ), PIXMAN_STD_FAST_PATH (OVER, solid, null, r5g6b5, mmx_composite_over_n_0565 ), PIXMAN_STD_FAST_PATH (OVER, solid, null, b5g6r5, mmx_composite_over_n_0565 ), PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, null, x8r8g8b8, mmx_composite_copy_area ), PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, null, x8b8g8r8, mmx_composite_copy_area ), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, a8r8g8b8, mmx_composite_over_8888_8888 ), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, x8r8g8b8, mmx_composite_over_8888_8888 ), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, r5g6b5, mmx_composite_over_8888_0565 ), PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, a8b8g8r8, mmx_composite_over_8888_8888 ), PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, x8b8g8r8, mmx_composite_over_8888_8888 ), PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, b5g6r5, mmx_composite_over_8888_0565 ), PIXMAN_STD_FAST_PATH (OVER_REVERSE, solid, null, a8r8g8b8, mmx_composite_over_reverse_n_8888), PIXMAN_STD_FAST_PATH (OVER_REVERSE, solid, null, a8b8g8r8, mmx_composite_over_reverse_n_8888), PIXMAN_STD_FAST_PATH (ADD, r5g6b5, null, r5g6b5, mmx_composite_add_0565_0565 ), PIXMAN_STD_FAST_PATH (ADD, b5g6r5, null, b5g6r5, mmx_composite_add_0565_0565 ), PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, null, a8r8g8b8, mmx_composite_add_8888_8888 ), PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, null, a8b8g8r8, mmx_composite_add_8888_8888 ), PIXMAN_STD_FAST_PATH (ADD, a8, null, a8, mmx_composite_add_8_8 ), PIXMAN_STD_FAST_PATH (ADD, solid, a8, a8, mmx_composite_add_n_8_8 ), PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, r5g6b5, mmx_composite_src_x888_0565 ), PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, b5g6r5, mmx_composite_src_x888_0565 ), PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, r5g6b5, mmx_composite_src_x888_0565 ), PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, b5g6r5, mmx_composite_src_x888_0565 ), PIXMAN_STD_FAST_PATH (SRC, solid, a8, a8r8g8b8, mmx_composite_src_n_8_8888 ), PIXMAN_STD_FAST_PATH (SRC, solid, a8, x8r8g8b8, mmx_composite_src_n_8_8888 ), PIXMAN_STD_FAST_PATH (SRC, solid, a8, a8b8g8r8, mmx_composite_src_n_8_8888 ), PIXMAN_STD_FAST_PATH (SRC, solid, a8, x8b8g8r8, mmx_composite_src_n_8_8888 ), PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, a8r8g8b8, mmx_composite_copy_area ), PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, a8b8g8r8, mmx_composite_copy_area ), PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, x8r8g8b8, mmx_composite_copy_area ), PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, x8b8g8r8, mmx_composite_copy_area ), PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, x8r8g8b8, mmx_composite_copy_area ), PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, x8b8g8r8, mmx_composite_copy_area ), PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, r5g6b5, mmx_composite_copy_area ), PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, b5g6r5, mmx_composite_copy_area ), PIXMAN_STD_FAST_PATH (IN, a8, null, a8, mmx_composite_in_8_8 ), PIXMAN_STD_FAST_PATH (IN, solid, a8, a8, mmx_composite_in_n_8_8 ), SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8, mmx_8888_8888 ), SIMPLE_NEAREST_FAST_PATH (OVER, a8b8g8r8, x8b8g8r8, mmx_8888_8888 ), SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, mmx_8888_8888 ), SIMPLE_NEAREST_FAST_PATH (OVER, a8b8g8r8, a8b8g8r8, mmx_8888_8888 ), SIMPLE_NEAREST_SOLID_MASK_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, mmx_8888_n_8888 ), SIMPLE_NEAREST_SOLID_MASK_FAST_PATH (OVER, a8b8g8r8, a8b8g8r8, mmx_8888_n_8888 ), SIMPLE_NEAREST_SOLID_MASK_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8, mmx_8888_n_8888 ), SIMPLE_NEAREST_SOLID_MASK_FAST_PATH (OVER, a8b8g8r8, x8b8g8r8, mmx_8888_n_8888 ), SIMPLE_BILINEAR_FAST_PATH (SRC, a8r8g8b8, a8r8g8b8, mmx_8888_8888 ), SIMPLE_BILINEAR_FAST_PATH (SRC, a8r8g8b8, x8r8g8b8, mmx_8888_8888 ), SIMPLE_BILINEAR_FAST_PATH (SRC, x8r8g8b8, x8r8g8b8, mmx_8888_8888 ), SIMPLE_BILINEAR_FAST_PATH (SRC, a8b8g8r8, a8b8g8r8, mmx_8888_8888 ), SIMPLE_BILINEAR_FAST_PATH (SRC, a8b8g8r8, x8b8g8r8, mmx_8888_8888 ), SIMPLE_BILINEAR_FAST_PATH (SRC, x8b8g8r8, x8b8g8r8, mmx_8888_8888 ), SIMPLE_BILINEAR_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8, mmx_8888_8888 ), SIMPLE_BILINEAR_FAST_PATH (OVER, a8b8g8r8, x8b8g8r8, mmx_8888_8888 ), SIMPLE_BILINEAR_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, mmx_8888_8888 ), SIMPLE_BILINEAR_FAST_PATH (OVER, a8b8g8r8, a8b8g8r8, mmx_8888_8888 ), SIMPLE_BILINEAR_A8_MASK_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8, mmx_8888_8_8888 ), SIMPLE_BILINEAR_A8_MASK_FAST_PATH (OVER, a8b8g8r8, x8b8g8r8, mmx_8888_8_8888 ), SIMPLE_BILINEAR_A8_MASK_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, mmx_8888_8_8888 ), SIMPLE_BILINEAR_A8_MASK_FAST_PATH (OVER, a8b8g8r8, a8b8g8r8, mmx_8888_8_8888 ), { PIXMAN_OP_NONE }, }; pixman_implementation_t * _pixman_implementation_create_mmx (pixman_implementation_t *fallback) { pixman_implementation_t *imp = _pixman_implementation_create (fallback, mmx_fast_paths); imp->combine_32[PIXMAN_OP_OVER] = mmx_combine_over_u; imp->combine_32[PIXMAN_OP_OVER_REVERSE] = mmx_combine_over_reverse_u; imp->combine_32[PIXMAN_OP_IN] = mmx_combine_in_u; imp->combine_32[PIXMAN_OP_IN_REVERSE] = mmx_combine_in_reverse_u; imp->combine_32[PIXMAN_OP_OUT] = mmx_combine_out_u; imp->combine_32[PIXMAN_OP_OUT_REVERSE] = mmx_combine_out_reverse_u; imp->combine_32[PIXMAN_OP_ATOP] = mmx_combine_atop_u; imp->combine_32[PIXMAN_OP_ATOP_REVERSE] = mmx_combine_atop_reverse_u; imp->combine_32[PIXMAN_OP_XOR] = mmx_combine_xor_u; imp->combine_32[PIXMAN_OP_ADD] = mmx_combine_add_u; imp->combine_32[PIXMAN_OP_SATURATE] = mmx_combine_saturate_u; imp->combine_32_ca[PIXMAN_OP_SRC] = mmx_combine_src_ca; imp->combine_32_ca[PIXMAN_OP_OVER] = mmx_combine_over_ca; imp->combine_32_ca[PIXMAN_OP_OVER_REVERSE] = mmx_combine_over_reverse_ca; imp->combine_32_ca[PIXMAN_OP_IN] = mmx_combine_in_ca; imp->combine_32_ca[PIXMAN_OP_IN_REVERSE] = mmx_combine_in_reverse_ca; imp->combine_32_ca[PIXMAN_OP_OUT] = mmx_combine_out_ca; imp->combine_32_ca[PIXMAN_OP_OUT_REVERSE] = mmx_combine_out_reverse_ca; imp->combine_32_ca[PIXMAN_OP_ATOP] = mmx_combine_atop_ca; imp->combine_32_ca[PIXMAN_OP_ATOP_REVERSE] = mmx_combine_atop_reverse_ca; imp->combine_32_ca[PIXMAN_OP_XOR] = mmx_combine_xor_ca; imp->combine_32_ca[PIXMAN_OP_ADD] = mmx_combine_add_ca; imp->blt = mmx_blt; imp->fill = mmx_fill; imp->iter_info = mmx_iters; return imp; } #endif /* USE_X86_MMX || USE_LOONGSON_MMI */ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-noop.c0000664000175000017500000001067214712446423017031 0ustar00mattst88mattst88/* -*- Mode: c; c-basic-offset: 4; tab-width: 8; indent-tabs-mode: t; -*- */ /* * Copyright Âİ 2011 Red Hat, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifdef HAVE_CONFIG_H #include #endif #include #include #include "pixman-private.h" #include "pixman-combine32.h" #include "pixman-inlines.h" static void noop_composite (pixman_implementation_t *imp, pixman_composite_info_t *info) { return; } static uint32_t * noop_get_scanline (pixman_iter_t *iter, const uint32_t *mask) { uint32_t *result = iter->buffer; iter->buffer += iter->image->bits.rowstride; return result; } static void noop_init_solid_narrow (pixman_iter_t *iter, const pixman_iter_info_t *info) { pixman_image_t *image = iter->image; uint32_t *buffer = iter->buffer; uint32_t *end = buffer + iter->width; uint32_t color; if (iter->image->type == SOLID) color = image->solid.color_32; else color = image->bits.fetch_pixel_32 (&image->bits, 0, 0); while (buffer < end) *(buffer++) = color; } static void noop_init_solid_wide (pixman_iter_t *iter, const pixman_iter_info_t *info) { pixman_image_t *image = iter->image; argb_t *buffer = (argb_t *)iter->buffer; argb_t *end = buffer + iter->width; argb_t color; if (iter->image->type == SOLID) color = image->solid.color_float; else color = image->bits.fetch_pixel_float (&image->bits, 0, 0); while (buffer < end) *(buffer++) = color; } static void noop_init_direct_buffer (pixman_iter_t *iter, const pixman_iter_info_t *info) { pixman_image_t *image = iter->image; iter->buffer = image->bits.bits + iter->y * image->bits.rowstride + iter->x; } static void dest_write_back_direct (pixman_iter_t *iter) { iter->buffer += iter->image->bits.rowstride; } static const pixman_iter_info_t noop_iters[] = { /* Source iters */ { PIXMAN_any, 0, ITER_IGNORE_ALPHA | ITER_IGNORE_RGB | ITER_SRC, NULL, _pixman_iter_get_scanline_noop, NULL }, { PIXMAN_solid, FAST_PATH_NO_ALPHA_MAP, ITER_NARROW | ITER_SRC, noop_init_solid_narrow, _pixman_iter_get_scanline_noop, NULL, }, { PIXMAN_solid, FAST_PATH_NO_ALPHA_MAP, ITER_WIDE | ITER_SRC, noop_init_solid_wide, _pixman_iter_get_scanline_noop, NULL }, { PIXMAN_a8r8g8b8, FAST_PATH_STANDARD_FLAGS | FAST_PATH_ID_TRANSFORM | FAST_PATH_BITS_IMAGE | FAST_PATH_SAMPLES_COVER_CLIP_NEAREST, ITER_NARROW | ITER_SRC, noop_init_direct_buffer, noop_get_scanline, NULL }, /* Dest iters */ { PIXMAN_a8r8g8b8, FAST_PATH_STD_DEST_FLAGS, ITER_NARROW | ITER_DEST, noop_init_direct_buffer, _pixman_iter_get_scanline_noop, dest_write_back_direct }, { PIXMAN_x8r8g8b8, FAST_PATH_STD_DEST_FLAGS, ITER_NARROW | ITER_DEST | ITER_LOCALIZED_ALPHA, noop_init_direct_buffer, _pixman_iter_get_scanline_noop, dest_write_back_direct }, { PIXMAN_null }, }; static const pixman_fast_path_t noop_fast_paths[] = { { PIXMAN_OP_DST, PIXMAN_any, 0, PIXMAN_any, 0, PIXMAN_any, 0, noop_composite }, { PIXMAN_OP_NONE }, }; pixman_implementation_t * _pixman_implementation_create_noop (pixman_implementation_t *fallback) { pixman_implementation_t *imp = _pixman_implementation_create (fallback, noop_fast_paths); imp->iter_info = noop_iters; return imp; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-ppc.c0000664000175000017500000000764414712446423016645 0ustar00mattst88mattst88/* * Copyright Âİ 2000 SuSE, Inc. * Copyright Âİ 2007 Red Hat, Inc. * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of SuSE not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. SuSE makes no representations about the * suitability of this software for any purpose. It is provided "as is" * without express or implied warranty. * * SuSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL SuSE * BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifdef HAVE_CONFIG_H #include #endif #include "pixman-private.h" #ifdef USE_VMX /* The CPU detection code needs to be in a file not compiled with * "-maltivec -mabi=altivec", as gcc would try to save vector register * across function calls causing SIGILL on cpus without Altivec/vmx. */ #ifdef __APPLE__ #include static pixman_bool_t pixman_have_vmx (void) { int error, have_vmx; size_t length = sizeof(have_vmx); error = sysctlbyname ("hw.optional.altivec", &have_vmx, &length, NULL, 0); if (error) return FALSE; return have_vmx; } #elif defined (__OpenBSD__) #include #include #include static pixman_bool_t pixman_have_vmx (void) { int error, have_vmx; int mib[2] = { CTL_MACHDEP, CPU_ALTIVEC }; size_t length = sizeof(have_vmx); error = sysctl (mib, 2, &have_vmx, &length, NULL, 0); if (error != 0) return FALSE; return have_vmx; } #elif defined (__FreeBSD__) #include #include static pixman_bool_t pixman_have_vmx (void) { unsigned long cpufeatures; int have_vmx; if (elf_aux_info(AT_HWCAP, &cpufeatures, sizeof(cpufeatures))) return FALSE; have_vmx = cpufeatures & PPC_FEATURE_HAS_ALTIVEC; return have_vmx; } #elif defined (__linux__) #include #include #include #include #include #include #include static pixman_bool_t pixman_have_vmx (void) { int have_vmx = FALSE; int fd; struct { unsigned long type; unsigned long value; } aux; fd = open ("/proc/self/auxv", O_RDONLY); if (fd >= 0) { while (read (fd, &aux, sizeof (aux)) == sizeof (aux)) { if (aux.type == AT_HWCAP && (aux.value & PPC_FEATURE_HAS_ALTIVEC)) { have_vmx = TRUE; break; } } close (fd); } return have_vmx; } #else /* !__APPLE__ && !__OpenBSD__ && !__linux__ */ #include #include static jmp_buf jump_env; static void vmx_test (int sig, siginfo_t *si, void * unused) { longjmp (jump_env, 1); } static pixman_bool_t pixman_have_vmx (void) { struct sigaction sa, osa; int jmp_result; sa.sa_flags = SA_SIGINFO; sigemptyset (&sa.sa_mask); sa.sa_sigaction = vmx_test; sigaction (SIGILL, &sa, &osa); jmp_result = setjmp (jump_env); if (jmp_result == 0) { asm volatile ( "vor 0, 0, 0" ); } sigaction (SIGILL, &osa, NULL); return (jmp_result == 0); } #endif /* __APPLE__ */ #endif /* USE_VMX */ pixman_implementation_t * _pixman_ppc_get_implementations (pixman_implementation_t *imp) { #ifdef USE_VMX if (!_pixman_disabled ("vmx") && pixman_have_vmx ()) imp = _pixman_implementation_create_vmx (imp); #endif return imp; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-private.h0000664000175000017500000010661114712446423017534 0ustar00mattst88mattst88#ifndef PIXMAN_PRIVATE_H #define PIXMAN_PRIVATE_H /* * The defines which are shared between C and assembly code */ /* bilinear interpolation precision (must be < 8) */ #define BILINEAR_INTERPOLATION_BITS 7 #define BILINEAR_INTERPOLATION_RANGE (1 << BILINEAR_INTERPOLATION_BITS) /* * C specific part */ #ifndef __ASSEMBLER__ #ifndef PACKAGE # error config.h must be included before pixman-private.h #endif #define PIXMAN_DISABLE_DEPRECATED #define PIXMAN_USE_INTERNAL_API #include "pixman.h" #include #include #include #include #include #include #include "pixman-compiler.h" /* * Images */ typedef struct image_common image_common_t; typedef struct solid_fill solid_fill_t; typedef struct gradient gradient_t; typedef struct linear_gradient linear_gradient_t; typedef struct horizontal_gradient horizontal_gradient_t; typedef struct vertical_gradient vertical_gradient_t; typedef struct conical_gradient conical_gradient_t; typedef struct radial_gradient radial_gradient_t; typedef struct bits_image bits_image_t; typedef struct circle circle_t; typedef struct argb_t argb_t; struct argb_t { float a; float r; float g; float b; }; typedef void (*fetch_scanline_t) (bits_image_t *image, int x, int y, int width, uint32_t *buffer, const uint32_t *mask); typedef uint32_t (*fetch_pixel_32_t) (bits_image_t *image, int x, int y); typedef argb_t (*fetch_pixel_float_t) (bits_image_t *image, int x, int y); typedef void (*store_scanline_t) (bits_image_t * image, int x, int y, int width, const uint32_t *values); typedef enum { BITS, LINEAR, CONICAL, RADIAL, SOLID } image_type_t; typedef void (*property_changed_func_t) (pixman_image_t *image); struct image_common { image_type_t type; int32_t ref_count; pixman_region32_t clip_region; int32_t alpha_count; /* How many times this image is being used as an alpha map */ pixman_bool_t have_clip_region; /* FALSE if there is no clip */ pixman_bool_t client_clip; /* Whether the source clip was set by a client */ pixman_bool_t clip_sources; /* Whether the clip applies when * the image is used as a source */ pixman_bool_t dirty; pixman_transform_t * transform; pixman_repeat_t repeat; pixman_filter_t filter; pixman_fixed_t * filter_params; int n_filter_params; bits_image_t * alpha_map; int alpha_origin_x; int alpha_origin_y; pixman_bool_t component_alpha; property_changed_func_t property_changed; pixman_image_destroy_func_t destroy_func; void * destroy_data; uint32_t flags; pixman_format_code_t extended_format_code; }; struct solid_fill { image_common_t common; pixman_color_t color; uint32_t color_32; argb_t color_float; }; struct gradient { image_common_t common; int n_stops; pixman_gradient_stop_t *stops; }; struct linear_gradient { gradient_t common; pixman_point_fixed_t p1; pixman_point_fixed_t p2; }; struct circle { pixman_fixed_t x; pixman_fixed_t y; pixman_fixed_t radius; }; struct radial_gradient { gradient_t common; circle_t c1; circle_t c2; circle_t delta; double a; double inva; double mindr; }; struct conical_gradient { gradient_t common; pixman_point_fixed_t center; double angle; }; struct bits_image { image_common_t common; pixman_format_code_t format; const pixman_indexed_t * indexed; int width; int height; uint32_t * bits; uint32_t * free_me; int rowstride; /* in number of uint32_t's */ pixman_dither_t dither; uint32_t dither_offset_y; uint32_t dither_offset_x; fetch_scanline_t fetch_scanline_32; fetch_pixel_32_t fetch_pixel_32; store_scanline_t store_scanline_32; fetch_scanline_t fetch_scanline_float; fetch_pixel_float_t fetch_pixel_float; store_scanline_t store_scanline_float; /* Used for indirect access to the bits */ pixman_read_memory_func_t read_func; pixman_write_memory_func_t write_func; }; union pixman_image { image_type_t type; image_common_t common; bits_image_t bits; gradient_t gradient; linear_gradient_t linear; conical_gradient_t conical; radial_gradient_t radial; solid_fill_t solid; }; typedef struct pixman_iter_t pixman_iter_t; typedef uint32_t *(* pixman_iter_get_scanline_t) (pixman_iter_t *iter, const uint32_t *mask); typedef void (* pixman_iter_write_back_t) (pixman_iter_t *iter); typedef void (* pixman_iter_fini_t) (pixman_iter_t *iter); typedef enum { ITER_NARROW = (1 << 0), ITER_WIDE = (1 << 1), /* "Localized alpha" is when the alpha channel is used only to compute * the alpha value of the destination. This means that the computation * of the RGB values of the result is independent of the alpha value. * * For example, the OVER operator has localized alpha for the * destination, because the RGB values of the result can be computed * without knowing the destination alpha. Similarly, ADD has localized * alpha for both source and destination because the RGB values of the * result can be computed without knowing the alpha value of source or * destination. * * When he destination is xRGB, this is useful knowledge, because then * we can treat it as if it were ARGB, which means in some cases we can * avoid copying it to a temporary buffer. */ ITER_LOCALIZED_ALPHA = (1 << 2), ITER_IGNORE_ALPHA = (1 << 3), ITER_IGNORE_RGB = (1 << 4), /* These indicate whether the iterator is for a source * or a destination image */ ITER_SRC = (1 << 5), ITER_DEST = (1 << 6) } iter_flags_t; struct pixman_iter_t { /* These are initialized by _pixman_implementation_{src,dest}_init */ pixman_image_t * image; uint32_t * buffer; int x, y; int width; int height; iter_flags_t iter_flags; uint32_t image_flags; /* These function pointers are initialized by the implementation */ pixman_iter_get_scanline_t get_scanline; pixman_iter_write_back_t write_back; pixman_iter_fini_t fini; /* These fields are scratch data that implementations can use */ void * data; uint8_t * bits; int stride; }; typedef struct pixman_iter_info_t pixman_iter_info_t; typedef void (* pixman_iter_initializer_t) (pixman_iter_t *iter, const pixman_iter_info_t *info); struct pixman_iter_info_t { pixman_format_code_t format; uint32_t image_flags; iter_flags_t iter_flags; pixman_iter_initializer_t initializer; pixman_iter_get_scanline_t get_scanline; pixman_iter_write_back_t write_back; }; void _pixman_bits_image_setup_accessors (bits_image_t *image); void _pixman_bits_image_src_iter_init (pixman_image_t *image, pixman_iter_t *iter); void _pixman_bits_image_dest_iter_init (pixman_image_t *image, pixman_iter_t *iter); void _pixman_linear_gradient_iter_init (pixman_image_t *image, pixman_iter_t *iter); void _pixman_radial_gradient_iter_init (pixman_image_t *image, pixman_iter_t *iter); void _pixman_conical_gradient_iter_init (pixman_image_t *image, pixman_iter_t *iter); void _pixman_image_init (pixman_image_t *image); pixman_bool_t _pixman_bits_image_init (pixman_image_t * image, pixman_format_code_t format, int width, int height, uint32_t * bits, int rowstride, pixman_bool_t clear); pixman_bool_t _pixman_image_fini (pixman_image_t *image); pixman_image_t * _pixman_image_allocate (void); pixman_bool_t _pixman_init_gradient (gradient_t * gradient, const pixman_gradient_stop_t *stops, int n_stops); void _pixman_image_reset_clip_region (pixman_image_t *image); void _pixman_image_validate (pixman_image_t *image); #define PIXMAN_IMAGE_GET_LINE(image, x, y, type, out_stride, line, mul) \ do \ { \ uint32_t *__bits__; \ int __stride__; \ \ __bits__ = image->bits.bits; \ __stride__ = image->bits.rowstride; \ (out_stride) = \ __stride__ * (int) sizeof (uint32_t) / (int) sizeof (type); \ (line) = \ ((type *) __bits__) + (out_stride) * (y) + (mul) * (x); \ } while (0) /* * Gradient walker */ typedef struct { float a_s, a_b; float r_s, r_b; float g_s, g_b; float b_s, b_b; pixman_fixed_48_16_t left_x; pixman_fixed_48_16_t right_x; pixman_gradient_stop_t *stops; int num_stops; pixman_repeat_t repeat; pixman_bool_t need_reset; } pixman_gradient_walker_t; void _pixman_gradient_walker_init (pixman_gradient_walker_t *walker, gradient_t * gradient, pixman_repeat_t repeat); void _pixman_gradient_walker_reset (pixman_gradient_walker_t *walker, pixman_fixed_48_16_t pos); typedef void (*pixman_gradient_walker_write_t) ( pixman_gradient_walker_t *walker, pixman_fixed_48_16_t x, uint32_t *buffer); void _pixman_gradient_walker_write_narrow(pixman_gradient_walker_t *walker, pixman_fixed_48_16_t x, uint32_t *buffer); void _pixman_gradient_walker_write_wide(pixman_gradient_walker_t *walker, pixman_fixed_48_16_t x, uint32_t *buffer); typedef void (*pixman_gradient_walker_fill_t) ( pixman_gradient_walker_t *walker, pixman_fixed_48_16_t x, uint32_t *buffer, uint32_t *end); void _pixman_gradient_walker_fill_narrow(pixman_gradient_walker_t *walker, pixman_fixed_48_16_t x, uint32_t *buffer, uint32_t *end); void _pixman_gradient_walker_fill_wide(pixman_gradient_walker_t *walker, pixman_fixed_48_16_t x, uint32_t *buffer, uint32_t *end); /* * Edges */ #define MAX_ALPHA(n) ((1 << (n)) - 1) #define N_Y_FRAC(n) ((n) == 1 ? 1 : (1 << ((n) / 2)) - 1) #define N_X_FRAC(n) ((n) == 1 ? 1 : (1 << ((n) / 2)) + 1) #define STEP_Y_SMALL(n) (pixman_fixed_1 / N_Y_FRAC (n)) #define STEP_Y_BIG(n) (pixman_fixed_1 - (N_Y_FRAC (n) - 1) * STEP_Y_SMALL (n)) #define Y_FRAC_FIRST(n) (STEP_Y_BIG (n) / 2) #define Y_FRAC_LAST(n) (Y_FRAC_FIRST (n) + (N_Y_FRAC (n) - 1) * STEP_Y_SMALL (n)) #define STEP_X_SMALL(n) (pixman_fixed_1 / N_X_FRAC (n)) #define STEP_X_BIG(n) (pixman_fixed_1 - (N_X_FRAC (n) - 1) * STEP_X_SMALL (n)) #define X_FRAC_FIRST(n) (STEP_X_BIG (n) / 2) #define X_FRAC_LAST(n) (X_FRAC_FIRST (n) + (N_X_FRAC (n) - 1) * STEP_X_SMALL (n)) #define RENDER_SAMPLES_X(x, n) \ ((n) == 1? 0 : (pixman_fixed_frac (x) + \ X_FRAC_FIRST (n)) / STEP_X_SMALL (n)) void pixman_rasterize_edges_accessors (pixman_image_t *image, pixman_edge_t * l, pixman_edge_t * r, pixman_fixed_t t, pixman_fixed_t b); /* * Implementations */ typedef struct pixman_implementation_t pixman_implementation_t; typedef struct { pixman_op_t op; pixman_image_t * src_image; pixman_image_t * mask_image; pixman_image_t * dest_image; int32_t src_x; int32_t src_y; int32_t mask_x; int32_t mask_y; int32_t dest_x; int32_t dest_y; int32_t width; int32_t height; uint32_t src_flags; uint32_t mask_flags; uint32_t dest_flags; } pixman_composite_info_t; #define PIXMAN_COMPOSITE_ARGS(info) \ MAYBE_UNUSED pixman_op_t op = info->op; \ MAYBE_UNUSED pixman_image_t * src_image = info->src_image; \ MAYBE_UNUSED pixman_image_t * mask_image = info->mask_image; \ MAYBE_UNUSED pixman_image_t * dest_image = info->dest_image; \ MAYBE_UNUSED int32_t src_x = info->src_x; \ MAYBE_UNUSED int32_t src_y = info->src_y; \ MAYBE_UNUSED int32_t mask_x = info->mask_x; \ MAYBE_UNUSED int32_t mask_y = info->mask_y; \ MAYBE_UNUSED int32_t dest_x = info->dest_x; \ MAYBE_UNUSED int32_t dest_y = info->dest_y; \ MAYBE_UNUSED int32_t width = info->width; \ MAYBE_UNUSED int32_t height = info->height typedef void (*pixman_combine_32_func_t) (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width); typedef void (*pixman_combine_float_func_t) (pixman_implementation_t *imp, pixman_op_t op, float * dest, const float * src, const float * mask, int n_pixels); typedef void (*pixman_composite_func_t) (pixman_implementation_t *imp, pixman_composite_info_t *info); typedef pixman_bool_t (*pixman_blt_func_t) (pixman_implementation_t *imp, uint32_t * src_bits, uint32_t * dst_bits, int src_stride, int dst_stride, int src_bpp, int dst_bpp, int src_x, int src_y, int dest_x, int dest_y, int width, int height); typedef pixman_bool_t (*pixman_fill_func_t) (pixman_implementation_t *imp, uint32_t * bits, int stride, int bpp, int x, int y, int width, int height, uint32_t filler); void _pixman_setup_combiner_functions_32 (pixman_implementation_t *imp); void _pixman_setup_combiner_functions_float (pixman_implementation_t *imp); typedef struct { pixman_op_t op; pixman_format_code_t src_format; uint32_t src_flags; pixman_format_code_t mask_format; uint32_t mask_flags; pixman_format_code_t dest_format; uint32_t dest_flags; pixman_composite_func_t func; } pixman_fast_path_t; struct pixman_implementation_t { pixman_implementation_t * toplevel; pixman_implementation_t * fallback; const pixman_fast_path_t * fast_paths; const pixman_iter_info_t * iter_info; pixman_blt_func_t blt; pixman_fill_func_t fill; pixman_combine_32_func_t combine_32[PIXMAN_N_OPERATORS]; pixman_combine_32_func_t combine_32_ca[PIXMAN_N_OPERATORS]; pixman_combine_float_func_t combine_float[PIXMAN_N_OPERATORS]; pixman_combine_float_func_t combine_float_ca[PIXMAN_N_OPERATORS]; }; uint32_t _pixman_image_get_solid (pixman_implementation_t *imp, pixman_image_t * image, pixman_format_code_t format); pixman_implementation_t * _pixman_implementation_create (pixman_implementation_t *fallback, const pixman_fast_path_t *fast_paths); void _pixman_implementation_lookup_composite (pixman_implementation_t *toplevel, pixman_op_t op, pixman_format_code_t src_format, uint32_t src_flags, pixman_format_code_t mask_format, uint32_t mask_flags, pixman_format_code_t dest_format, uint32_t dest_flags, pixman_implementation_t **out_imp, pixman_composite_func_t *out_func); pixman_combine_32_func_t _pixman_implementation_lookup_combiner (pixman_implementation_t *imp, pixman_op_t op, pixman_bool_t component_alpha, pixman_bool_t wide); pixman_bool_t _pixman_implementation_blt (pixman_implementation_t *imp, uint32_t * src_bits, uint32_t * dst_bits, int src_stride, int dst_stride, int src_bpp, int dst_bpp, int src_x, int src_y, int dest_x, int dest_y, int width, int height); pixman_bool_t _pixman_implementation_fill (pixman_implementation_t *imp, uint32_t * bits, int stride, int bpp, int x, int y, int width, int height, uint32_t filler); void _pixman_implementation_iter_init (pixman_implementation_t *imp, pixman_iter_t *iter, pixman_image_t *image, int x, int y, int width, int height, uint8_t *buffer, iter_flags_t flags, uint32_t image_flags); /* Specific implementations */ pixman_implementation_t * _pixman_implementation_create_general (void); pixman_implementation_t * _pixman_implementation_create_fast_path (pixman_implementation_t *fallback); pixman_implementation_t * _pixman_implementation_create_noop (pixman_implementation_t *fallback); #if defined USE_X86_MMX || defined USE_LOONGSON_MMI pixman_implementation_t * _pixman_implementation_create_mmx (pixman_implementation_t *fallback); #endif #ifdef USE_SSE2 pixman_implementation_t * _pixman_implementation_create_sse2 (pixman_implementation_t *fallback); #endif #ifdef USE_SSSE3 pixman_implementation_t * _pixman_implementation_create_ssse3 (pixman_implementation_t *fallback); #endif #ifdef USE_ARM_SIMD pixman_implementation_t * _pixman_implementation_create_arm_simd (pixman_implementation_t *fallback); #endif #ifdef USE_ARM_NEON pixman_implementation_t * _pixman_implementation_create_arm_neon (pixman_implementation_t *fallback); #endif #ifdef USE_ARM_A64_NEON pixman_implementation_t * _pixman_implementation_create_arm_neon (pixman_implementation_t *fallback); #endif #ifdef USE_MIPS_DSPR2 pixman_implementation_t * _pixman_implementation_create_mips_dspr2 (pixman_implementation_t *fallback); #endif #ifdef USE_VMX pixman_implementation_t * _pixman_implementation_create_vmx (pixman_implementation_t *fallback); #endif #ifdef USE_RVV pixman_implementation_t * _pixman_implementation_create_rvv (pixman_implementation_t *fallback); #endif pixman_bool_t _pixman_implementation_disabled (const char *name); pixman_implementation_t * _pixman_x86_get_implementations (pixman_implementation_t *imp); pixman_implementation_t * _pixman_arm_get_implementations (pixman_implementation_t *imp); pixman_implementation_t * _pixman_ppc_get_implementations (pixman_implementation_t *imp); pixman_implementation_t * _pixman_mips_get_implementations (pixman_implementation_t *imp); pixman_implementation_t * _pixman_riscv_get_implementations (pixman_implementation_t *imp); pixman_implementation_t * _pixman_choose_implementation (void); pixman_bool_t _pixman_disabled (const char *name); /* * Utilities */ pixman_bool_t _pixman_compute_composite_region32 (pixman_region32_t * region, pixman_image_t * src_image, pixman_image_t * mask_image, pixman_image_t * dest_image, int32_t src_x, int32_t src_y, int32_t mask_x, int32_t mask_y, int32_t dest_x, int32_t dest_y, int32_t width, int32_t height); uint32_t * _pixman_iter_get_scanline_noop (pixman_iter_t *iter, const uint32_t *mask); void _pixman_iter_init_bits_stride (pixman_iter_t *iter, const pixman_iter_info_t *info); /* These "formats" all have depth 0, so they * will never clash with any real ones */ #define PIXMAN_null PIXMAN_FORMAT (0, 0, 0, 0, 0, 0) #define PIXMAN_solid PIXMAN_FORMAT (0, 1, 0, 0, 0, 0) #define PIXMAN_pixbuf PIXMAN_FORMAT (0, 2, 0, 0, 0, 0) #define PIXMAN_rpixbuf PIXMAN_FORMAT (0, 3, 0, 0, 0, 0) #define PIXMAN_unknown PIXMAN_FORMAT (0, 4, 0, 0, 0, 0) #define PIXMAN_any PIXMAN_FORMAT (0, 5, 0, 0, 0, 0) #define PIXMAN_OP_any (PIXMAN_N_OPERATORS + 1) #define FAST_PATH_ID_TRANSFORM (1 << 0) #define FAST_PATH_NO_ALPHA_MAP (1 << 1) #define FAST_PATH_NO_CONVOLUTION_FILTER (1 << 2) #define FAST_PATH_NO_PAD_REPEAT (1 << 3) #define FAST_PATH_NO_REFLECT_REPEAT (1 << 4) #define FAST_PATH_NO_ACCESSORS (1 << 5) #define FAST_PATH_NARROW_FORMAT (1 << 6) #define FAST_PATH_COMPONENT_ALPHA (1 << 8) #define FAST_PATH_SAMPLES_OPAQUE (1 << 7) #define FAST_PATH_UNIFIED_ALPHA (1 << 9) #define FAST_PATH_SCALE_TRANSFORM (1 << 10) #define FAST_PATH_NEAREST_FILTER (1 << 11) #define FAST_PATH_HAS_TRANSFORM (1 << 12) #define FAST_PATH_IS_OPAQUE (1 << 13) #define FAST_PATH_NO_NORMAL_REPEAT (1 << 14) #define FAST_PATH_NO_NONE_REPEAT (1 << 15) #define FAST_PATH_X_UNIT_POSITIVE (1 << 16) #define FAST_PATH_AFFINE_TRANSFORM (1 << 17) #define FAST_PATH_Y_UNIT_ZERO (1 << 18) #define FAST_PATH_BILINEAR_FILTER (1 << 19) #define FAST_PATH_ROTATE_90_TRANSFORM (1 << 20) #define FAST_PATH_ROTATE_180_TRANSFORM (1 << 21) #define FAST_PATH_ROTATE_270_TRANSFORM (1 << 22) #define FAST_PATH_SAMPLES_COVER_CLIP_NEAREST (1 << 23) #define FAST_PATH_SAMPLES_COVER_CLIP_BILINEAR (1 << 24) #define FAST_PATH_BITS_IMAGE (1 << 25) #define FAST_PATH_SEPARABLE_CONVOLUTION_FILTER (1 << 26) #define FAST_PATH_PAD_REPEAT \ (FAST_PATH_NO_NONE_REPEAT | \ FAST_PATH_NO_NORMAL_REPEAT | \ FAST_PATH_NO_REFLECT_REPEAT) #define FAST_PATH_NORMAL_REPEAT \ (FAST_PATH_NO_NONE_REPEAT | \ FAST_PATH_NO_PAD_REPEAT | \ FAST_PATH_NO_REFLECT_REPEAT) #define FAST_PATH_NONE_REPEAT \ (FAST_PATH_NO_NORMAL_REPEAT | \ FAST_PATH_NO_PAD_REPEAT | \ FAST_PATH_NO_REFLECT_REPEAT) #define FAST_PATH_REFLECT_REPEAT \ (FAST_PATH_NO_NONE_REPEAT | \ FAST_PATH_NO_NORMAL_REPEAT | \ FAST_PATH_NO_PAD_REPEAT) #define FAST_PATH_STANDARD_FLAGS \ (FAST_PATH_NO_CONVOLUTION_FILTER | \ FAST_PATH_NO_ACCESSORS | \ FAST_PATH_NO_ALPHA_MAP | \ FAST_PATH_NARROW_FORMAT) #define FAST_PATH_STD_DEST_FLAGS \ (FAST_PATH_NO_ACCESSORS | \ FAST_PATH_NO_ALPHA_MAP | \ FAST_PATH_NARROW_FORMAT) #define SOURCE_FLAGS(format) \ (FAST_PATH_STANDARD_FLAGS | \ ((PIXMAN_ ## format == PIXMAN_solid) ? \ 0 : (FAST_PATH_SAMPLES_COVER_CLIP_NEAREST | FAST_PATH_NEAREST_FILTER | FAST_PATH_ID_TRANSFORM))) #define MASK_FLAGS(format, extra) \ ((PIXMAN_ ## format == PIXMAN_null) ? 0 : (SOURCE_FLAGS (format) | extra)) #define FAST_PATH(op, src, src_flags, mask, mask_flags, dest, dest_flags, func) \ PIXMAN_OP_ ## op, \ PIXMAN_ ## src, \ src_flags, \ PIXMAN_ ## mask, \ mask_flags, \ PIXMAN_ ## dest, \ dest_flags, \ func #define PIXMAN_STD_FAST_PATH(op, src, mask, dest, func) \ { FAST_PATH ( \ op, \ src, SOURCE_FLAGS (src), \ mask, MASK_FLAGS (mask, FAST_PATH_UNIFIED_ALPHA), \ dest, FAST_PATH_STD_DEST_FLAGS, \ func) } #define PIXMAN_STD_FAST_PATH_CA(op, src, mask, dest, func) \ { FAST_PATH ( \ op, \ src, SOURCE_FLAGS (src), \ mask, MASK_FLAGS (mask, FAST_PATH_COMPONENT_ALPHA), \ dest, FAST_PATH_STD_DEST_FLAGS, \ func) } extern pixman_implementation_t *global_implementation; static force_inline pixman_implementation_t * get_implementation (void) { #ifndef TOOLCHAIN_SUPPORTS_ATTRIBUTE_CONSTRUCTOR if (!global_implementation) global_implementation = _pixman_choose_implementation (); #endif return global_implementation; } /* This function is exported for the sake of the test suite and not part * of the ABI. */ PIXMAN_EXPORT pixman_implementation_t * _pixman_internal_only_get_reference_implementation (void); /* This function is exported for the sake of the test suite and not part * of the ABI. */ PIXMAN_EXPORT pixman_implementation_t * _pixman_internal_only_get_implementation (void); /* This function is exported for the sake of the test suite and not part * of the ABI. */ PIXMAN_EXPORT pixman_fast_path_t * _pixman_implementation_get_reference_fast_path (void); /* This function is exported for the sake of the test suite and not part * of the ABI. */ PIXMAN_EXPORT int _pixman_implementation_get_reference_fast_path_size (); /* Memory allocation helpers */ void * pixman_malloc_ab (unsigned int n, unsigned int b); void * pixman_malloc_abc (unsigned int a, unsigned int b, unsigned int c); void * pixman_malloc_ab_plus_c (unsigned int a, unsigned int b, unsigned int c); pixman_bool_t _pixman_multiply_overflows_size (size_t a, size_t b); pixman_bool_t _pixman_multiply_overflows_int (unsigned int a, unsigned int b); pixman_bool_t _pixman_addition_overflows_int (unsigned int a, unsigned int b); /* Compositing utilities */ void pixman_expand_to_float (argb_t *dst, const uint32_t *src, pixman_format_code_t format, int width); void pixman_contract_from_float (uint32_t *dst, const argb_t *src, int width); /* Region Helpers */ pixman_bool_t pixman_region32_copy_from_region16 (pixman_region32_t *dst, const pixman_region16_t *src); pixman_bool_t pixman_region16_copy_from_region32 (pixman_region16_t *dst, const pixman_region32_t *src); /* Doubly linked lists */ typedef struct pixman_link_t pixman_link_t; struct pixman_link_t { pixman_link_t *next; pixman_link_t *prev; }; typedef struct pixman_list_t pixman_list_t; struct pixman_list_t { pixman_link_t *head; pixman_link_t *tail; }; static force_inline void pixman_list_init (pixman_list_t *list) { list->head = (pixman_link_t *)list; list->tail = (pixman_link_t *)list; } static force_inline void pixman_list_prepend (pixman_list_t *list, pixman_link_t *link) { link->next = list->head; link->prev = (pixman_link_t *)list; list->head->prev = link; list->head = link; } static force_inline void pixman_list_unlink (pixman_link_t *link) { link->prev->next = link->next; link->next->prev = link->prev; } static force_inline void pixman_list_move_to_front (pixman_list_t *list, pixman_link_t *link) { pixman_list_unlink (link); pixman_list_prepend (list, link); } /* Misc macros */ #ifndef FALSE # define FALSE 0 #endif #ifndef TRUE # define TRUE 1 #endif #ifndef MIN # define MIN(a, b) ((a < b) ? a : b) #endif #ifndef MAX # define MAX(a, b) ((a > b) ? a : b) #endif /* Integer division that rounds towards -infinity */ #define DIV(a, b) \ ((((a) < 0) == ((b) < 0)) ? (a) / (b) : \ ((a) - (b) + 1 - (((b) < 0) << 1)) / (b)) /* Modulus that produces the remainder wrt. DIV */ #define MOD(a, b) ((a) < 0 ? ((b) - ((-(a) - 1) % (b))) - 1 : (a) % (b)) #define CLIP(v, low, high) ((v) < (low) ? (low) : ((v) > (high) ? (high) : (v))) #define FLOAT_IS_ZERO(f) (-FLT_MIN < (f) && (f) < FLT_MIN) /* Conversion between 8888 and 0565 */ static force_inline uint16_t convert_8888_to_0565 (uint32_t s) { /* The following code can be compiled into just 4 instructions on ARM */ uint32_t a, b; a = (s >> 3) & 0x1F001F; b = s & 0xFC00; a |= a >> 5; a |= b >> 5; return (uint16_t)a; } static force_inline uint32_t convert_0565_to_0888 (uint16_t s) { return (((((s) << 3) & 0xf8) | (((s) >> 2) & 0x7)) | ((((s) << 5) & 0xfc00) | (((s) >> 1) & 0x300)) | ((((s) << 8) & 0xf80000) | (((s) << 3) & 0x70000))); } static force_inline uint32_t convert_0565_to_8888 (uint16_t s) { return convert_0565_to_0888 (s) | 0xff000000; } /* Trivial versions that are useful in macros */ static force_inline uint32_t convert_8888_to_8888 (uint32_t s) { return s; } static force_inline uint32_t convert_x888_to_8888 (uint32_t s) { return s | 0xff000000; } static force_inline uint16_t convert_0565_to_0565 (uint16_t s) { return s; } #define PIXMAN_FORMAT_IS_WIDE(f) \ (PIXMAN_FORMAT_A (f) > 8 || \ PIXMAN_FORMAT_R (f) > 8 || \ PIXMAN_FORMAT_G (f) > 8 || \ PIXMAN_FORMAT_B (f) > 8 || \ PIXMAN_FORMAT_TYPE (f) == PIXMAN_TYPE_ARGB_SRGB) #ifdef WORDS_BIGENDIAN # define SCREEN_SHIFT_LEFT(x,n) ((x) << (n)) # define SCREEN_SHIFT_RIGHT(x,n) ((x) >> (n)) #else # define SCREEN_SHIFT_LEFT(x,n) ((x) >> (n)) # define SCREEN_SHIFT_RIGHT(x,n) ((x) << (n)) #endif static force_inline uint32_t unorm_to_unorm (uint32_t val, int from_bits, int to_bits) { uint32_t result; if (from_bits == 0) return 0; /* Delete any extra bits */ val &= ((1 << from_bits) - 1); if (from_bits >= to_bits) return val >> (from_bits - to_bits); /* Start out with the high bit of val in the high bit of result. */ result = val << (to_bits - from_bits); /* Copy the bits in result, doubling the number of bits each time, until * we fill all to_bits. Unrolled manually because from_bits and to_bits * are usually known statically, so the compiler can turn all of this * into a few shifts. */ #define REPLICATE() \ do \ { \ if (from_bits < to_bits) \ { \ result |= result >> from_bits; \ \ from_bits *= 2; \ } \ } \ while (0) REPLICATE(); REPLICATE(); REPLICATE(); REPLICATE(); REPLICATE(); return result; } uint16_t pixman_float_to_unorm (float f, int n_bits); float pixman_unorm_to_float (uint16_t u, int n_bits); /* * Various debugging code */ #define COMPILE_TIME_ASSERT(x) \ do { typedef int compile_time_assertion [(x)?1:-1]; } while (0) void _pixman_log_error (const char *function, const char *message); #define return_if_fail(expr) \ do \ { \ if (unlikely (!(expr))) \ { \ _pixman_log_error (FUNC, "The expression " # expr " was false"); \ return; \ } \ } \ while (0) #define return_val_if_fail(expr, retval) \ do \ { \ if (unlikely (!(expr))) \ { \ _pixman_log_error (FUNC, "The expression " # expr " was false"); \ return (retval); \ } \ } \ while (0) #define critical_if_fail(expr) \ do \ { \ if (unlikely (!(expr))) \ _pixman_log_error (FUNC, "The expression " # expr " was false"); \ } \ while (0) /* * Matrix */ typedef struct { pixman_fixed_48_16_t v[3]; } pixman_vector_48_16_t; PIXMAN_EXPORT pixman_bool_t pixman_transform_point_31_16 (const pixman_transform_t *t, const pixman_vector_48_16_t *v, pixman_vector_48_16_t *result); PIXMAN_EXPORT void pixman_transform_point_31_16_3d (const pixman_transform_t *t, const pixman_vector_48_16_t *v, pixman_vector_48_16_t *result); PIXMAN_EXPORT void pixman_transform_point_31_16_affine (const pixman_transform_t *t, const pixman_vector_48_16_t *v, pixman_vector_48_16_t *result); /* * Timers */ #ifdef PIXMAN_TIMERS static inline uint64_t oil_profile_stamp_rdtsc (void) { uint32_t hi, lo; __asm__ __volatile__ ("rdtsc\n" : "=a" (lo), "=d" (hi)); return lo | (((uint64_t)hi) << 32); } #define OIL_STAMP oil_profile_stamp_rdtsc typedef struct pixman_timer_t pixman_timer_t; struct pixman_timer_t { int initialized; const char * name; uint64_t n_times; uint64_t total; pixman_timer_t *next; }; extern int timer_defined; void pixman_timer_register (pixman_timer_t *timer); #define TIMER_BEGIN(tname) \ { \ static pixman_timer_t timer ## tname; \ uint64_t begin ## tname; \ \ if (!timer ## tname.initialized) \ { \ timer ## tname.initialized = 1; \ timer ## tname.name = # tname; \ pixman_timer_register (&timer ## tname); \ } \ \ timer ## tname.n_times++; \ begin ## tname = OIL_STAMP (); #define TIMER_END(tname) \ timer ## tname.total += OIL_STAMP () - begin ## tname; \ } #else #define TIMER_BEGIN(tname) #define TIMER_END(tname) #endif /* PIXMAN_TIMERS */ #endif /* __ASSEMBLER__ */ #endif /* PIXMAN_PRIVATE_H */ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-radial-gradient.c0000664000175000017500000003526514712446423021112 0ustar00mattst88mattst88/* -*- Mode: c; c-basic-offset: 4; tab-width: 8; indent-tabs-mode: t; -*- */ /* * * Copyright Âİ 2000 Keith Packard, member of The XFree86 Project, Inc. * Copyright Âİ 2000 SuSE, Inc. * 2005 Lars Knoll & Zack Rusin, Trolltech * Copyright Âİ 2007 Red Hat, Inc. * * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of Keith Packard not be used in * advertising or publicity pertaining to distribution of the software without * specific, written prior permission. Keith Packard makes no * representations about the suitability of this software for any purpose. It * is provided "as is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS * SOFTWARE. */ #ifdef HAVE_CONFIG_H #include #endif #include #include #include "pixman-private.h" static inline pixman_fixed_32_32_t dot (pixman_fixed_48_16_t x1, pixman_fixed_48_16_t y1, pixman_fixed_48_16_t z1, pixman_fixed_48_16_t x2, pixman_fixed_48_16_t y2, pixman_fixed_48_16_t z2) { /* * Exact computation, assuming that the input values can * be represented as pixman_fixed_16_16_t */ return x1 * x2 + y1 * y2 + z1 * z2; } static inline double fdot (double x1, double y1, double z1, double x2, double y2, double z2) { /* * Error can be unbound in some special cases. * Using clever dot product algorithms (for example compensated * dot product) would improve this but make the code much less * obvious */ return x1 * x2 + y1 * y2 + z1 * z2; } static void radial_write_color (double a, double b, double c, double inva, double dr, double mindr, pixman_gradient_walker_t *walker, pixman_repeat_t repeat, int Bpp, pixman_gradient_walker_write_t write_pixel, uint32_t *buffer) { /* * In this function error propagation can lead to bad results: * - discr can have an unbound error (if b*b-a*c is very small), * potentially making it the opposite sign of what it should have been * (thus clearing a pixel that would have been colored or vice-versa) * or propagating the error to sqrtdiscr; * if discr has the wrong sign or b is very small, this can lead to bad * results * * - the algorithm used to compute the solutions of the quadratic * equation is not numerically stable (but saves one division compared * to the numerically stable one); * this can be a problem if a*c is much smaller than b*b * * - the above problems are worse if a is small (as inva becomes bigger) */ double discr; if (a == 0) { double t; if (b == 0) { memset (buffer, 0, Bpp); return; } t = pixman_fixed_1 / 2 * c / b; if (repeat == PIXMAN_REPEAT_NONE) { if (0 <= t && t <= pixman_fixed_1) { write_pixel (walker, t, buffer); return; } } else { if (t * dr >= mindr) { write_pixel (walker, t, buffer); return; } } memset (buffer, 0, Bpp); return; } discr = fdot (b, a, 0, b, -c, 0); if (discr >= 0) { double sqrtdiscr, t0, t1; sqrtdiscr = sqrt (discr); t0 = (b + sqrtdiscr) * inva; t1 = (b - sqrtdiscr) * inva; /* * The root that must be used is the biggest one that belongs * to the valid range ([0,1] for PIXMAN_REPEAT_NONE, any * solution that results in a positive radius otherwise). * * If a > 0, t0 is the biggest solution, so if it is valid, it * is the correct result. * * If a < 0, only one of the solutions can be valid, so the * order in which they are tested is not important. */ if (repeat == PIXMAN_REPEAT_NONE) { if (0 <= t0 && t0 <= pixman_fixed_1) { write_pixel (walker, t0, buffer); return; } else if (0 <= t1 && t1 <= pixman_fixed_1) { write_pixel (walker, t1, buffer); return; } } else { if (t0 * dr >= mindr) { write_pixel (walker, t0, buffer); return; } else if (t1 * dr >= mindr) { write_pixel (walker, t1, buffer); return; } } } memset (buffer, 0, Bpp); return; } static uint32_t * radial_get_scanline (pixman_iter_t *iter, const uint32_t *mask, int Bpp, pixman_gradient_walker_write_t write_pixel) { /* * Implementation of radial gradients following the PDF specification. * See section 8.7.4.5.4 Type 3 (Radial) Shadings of the PDF Reference * Manual (PDF 32000-1:2008 at the time of this writing). * * In the radial gradient problem we are given two circles (c₁,r₁) and * (c₂,r₂) that define the gradient itself. * * Mathematically the gradient can be defined as the family of circles * * ((1-t)·c₁ + t·(c₂), (1-t)·r₁ + t·r₂) * * excluding those circles whose radius would be < 0. When a point * belongs to more than one circle, the one with a bigger t is the only * one that contributes to its color. When a point does not belong * to any of the circles, it is transparent black, i.e. RGBA (0, 0, 0, 0). * Further limitations on the range of values for t are imposed when * the gradient is not repeated, namely t must belong to [0,1]. * * The graphical result is the same as drawing the valid (radius > 0) * circles with increasing t in [-inf, +inf] (or in [0,1] if the gradient * is not repeated) using SOURCE operator composition. * * It looks like a cone pointing towards the viewer if the ending circle * is smaller than the starting one, a cone pointing inside the page if * the starting circle is the smaller one and like a cylinder if they * have the same radius. * * What we actually do is, given the point whose color we are interested * in, compute the t values for that point, solving for t in: * * length((1-t)·c₁ + t·(c₂) - p) = (1-t)·r₁ + t·r₂ * * Let's rewrite it in a simpler way, by defining some auxiliary * variables: * * cd = c₂ - c₁ * pd = p - c₁ * dr = r₂ - r₁ * length(t·cd - pd) = r₁ + t·dr * * which actually means * * hypot(t·cdx - pdx, t·cdy - pdy) = r₁ + t·dr * * or * * ⎷((t·cdx - pdx)² + (t·cdy - pdy)²) = r₁ + t·dr. * * If we impose (as stated earlier) that r₁ + t·dr >= 0, it becomes: * * (t·cdx - pdx)² + (t·cdy - pdy)² = (r₁ + t·dr)² * * where we can actually expand the squares and solve for t: * * t²cdx² - 2t·cdx·pdx + pdx² + t²cdy² - 2t·cdy·pdy + pdy² = * = r₁² + 2·r₁·t·dr + t²·dr² * * (cdx² + cdy² - dr²)t² - 2(cdx·pdx + cdy·pdy + r₁·dr)t + * (pdx² + pdy² - r₁²) = 0 * * A = cdx² + cdy² - dr² * B = pdx·cdx + pdy·cdy + r₁·dr * C = pdx² + pdy² - r₁² * At² - 2Bt + C = 0 * * The solutions (unless the equation degenerates because of A = 0) are: * * t = (B Âħ ⎷(B² - A·C)) / A * * The solution we are going to prefer is the bigger one, unless the * radius associated to it is negative (or it falls outside the valid t * range). * * Additional observations (useful for optimizations): * A does not depend on p * * A < 0 <=> one of the two circles completely contains the other one * <=> for every p, the radiuses associated with the two t solutions * have opposite sign */ pixman_image_t *image = iter->image; int x = iter->x; int y = iter->y; int width = iter->width; uint32_t *buffer = iter->buffer; gradient_t *gradient = (gradient_t *)image; radial_gradient_t *radial = (radial_gradient_t *)image; uint32_t *end = buffer + width * (Bpp / 4); pixman_gradient_walker_t walker; pixman_vector_t v, unit; /* reference point is the center of the pixel */ v.vector[0] = pixman_int_to_fixed (x) + pixman_fixed_1 / 2; v.vector[1] = pixman_int_to_fixed (y) + pixman_fixed_1 / 2; v.vector[2] = pixman_fixed_1; _pixman_gradient_walker_init (&walker, gradient, image->common.repeat); if (image->common.transform) { if (!pixman_transform_point_3d (image->common.transform, &v)) return iter->buffer; unit.vector[0] = image->common.transform->matrix[0][0]; unit.vector[1] = image->common.transform->matrix[1][0]; unit.vector[2] = image->common.transform->matrix[2][0]; } else { unit.vector[0] = pixman_fixed_1; unit.vector[1] = 0; unit.vector[2] = 0; } if (unit.vector[2] == 0 && v.vector[2] == pixman_fixed_1) { /* * Given: * * t = (B Âħ ⎷(B² - A·C)) / A * * where * * A = cdx² + cdy² - dr² * B = pdx·cdx + pdy·cdy + r₁·dr * C = pdx² + pdy² - r₁² * det = B² - A·C * * Since we have an affine transformation, we know that (pdx, pdy) * increase linearly with each pixel, * * pdx = pdx₀ + n·ux, * pdy = pdy₀ + n·uy, * * we can then express B, C and det through multiple differentiation. */ pixman_fixed_32_32_t b, db, c, dc, ddc; /* warning: this computation may overflow */ v.vector[0] -= radial->c1.x; v.vector[1] -= radial->c1.y; /* * B and C are computed and updated exactly. * If fdot was used instead of dot, in the worst case it would * lose 11 bits of precision in each of the multiplication and * summing up would zero out all the bit that were preserved, * thus making the result 0 instead of the correct one. * This would mean a worst case of unbound relative error or * about 2^10 absolute error */ b = dot (v.vector[0], v.vector[1], radial->c1.radius, radial->delta.x, radial->delta.y, radial->delta.radius); db = dot (unit.vector[0], unit.vector[1], 0, radial->delta.x, radial->delta.y, 0); c = dot (v.vector[0], v.vector[1], -((pixman_fixed_48_16_t) radial->c1.radius), v.vector[0], v.vector[1], radial->c1.radius); dc = dot (2 * (pixman_fixed_48_16_t) v.vector[0] + unit.vector[0], 2 * (pixman_fixed_48_16_t) v.vector[1] + unit.vector[1], 0, unit.vector[0], unit.vector[1], 0); ddc = 2 * dot (unit.vector[0], unit.vector[1], 0, unit.vector[0], unit.vector[1], 0); while (buffer < end) { if (!mask || *mask++) { radial_write_color (radial->a, b, c, radial->inva, radial->delta.radius, radial->mindr, &walker, image->common.repeat, Bpp, write_pixel, buffer); } b += db; c += dc; dc += ddc; buffer += (Bpp / 4); } } else { /* projective */ /* Warning: * error propagation guarantees are much looser than in the affine case */ while (buffer < end) { if (!mask || *mask++) { if (v.vector[2] != 0) { double pdx, pdy, invv2, b, c; invv2 = 1. * pixman_fixed_1 / v.vector[2]; pdx = v.vector[0] * invv2 - radial->c1.x; /* / pixman_fixed_1 */ pdy = v.vector[1] * invv2 - radial->c1.y; /* / pixman_fixed_1 */ b = fdot (pdx, pdy, radial->c1.radius, radial->delta.x, radial->delta.y, radial->delta.radius); /* / pixman_fixed_1 / pixman_fixed_1 */ c = fdot (pdx, pdy, -radial->c1.radius, pdx, pdy, radial->c1.radius); /* / pixman_fixed_1 / pixman_fixed_1 */ radial_write_color (radial->a, b, c, radial->inva, radial->delta.radius, radial->mindr, &walker, image->common.repeat, Bpp, write_pixel, buffer); } else { memset (buffer, 0, Bpp); } } buffer += (Bpp / 4); v.vector[0] += unit.vector[0]; v.vector[1] += unit.vector[1]; v.vector[2] += unit.vector[2]; } } iter->y++; return iter->buffer; } static uint32_t * radial_get_scanline_narrow (pixman_iter_t *iter, const uint32_t *mask) { return radial_get_scanline (iter, mask, 4, _pixman_gradient_walker_write_narrow); } static uint32_t * radial_get_scanline_wide (pixman_iter_t *iter, const uint32_t *mask) { return radial_get_scanline (iter, NULL, 16, _pixman_gradient_walker_write_wide); } void _pixman_radial_gradient_iter_init (pixman_image_t *image, pixman_iter_t *iter) { if (iter->iter_flags & ITER_NARROW) iter->get_scanline = radial_get_scanline_narrow; else iter->get_scanline = radial_get_scanline_wide; } PIXMAN_EXPORT pixman_image_t * pixman_image_create_radial_gradient (const pixman_point_fixed_t * inner, const pixman_point_fixed_t * outer, pixman_fixed_t inner_radius, pixman_fixed_t outer_radius, const pixman_gradient_stop_t *stops, int n_stops) { pixman_image_t *image; radial_gradient_t *radial; image = _pixman_image_allocate (); if (!image) return NULL; radial = &image->radial; if (!_pixman_init_gradient (&radial->common, stops, n_stops)) { free (image); return NULL; } image->type = RADIAL; radial->c1.x = inner->x; radial->c1.y = inner->y; radial->c1.radius = inner_radius; radial->c2.x = outer->x; radial->c2.y = outer->y; radial->c2.radius = outer_radius; /* warning: this computations may overflow */ radial->delta.x = radial->c2.x - radial->c1.x; radial->delta.y = radial->c2.y - radial->c1.y; radial->delta.radius = radial->c2.radius - radial->c1.radius; /* computed exactly, then cast to double -> every bit of the double representation is correct (53 bits) */ radial->a = dot (radial->delta.x, radial->delta.y, -radial->delta.radius, radial->delta.x, radial->delta.y, radial->delta.radius); if (radial->a != 0) radial->inva = 1. * pixman_fixed_1 / radial->a; radial->mindr = -1. * pixman_fixed_1 * radial->c1.radius; return image; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-region.c0000664000175000017500000023030514712446423017336 0ustar00mattst88mattst88/* * Copyright 1987, 1988, 1989, 1998 The Open Group * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation. * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * OPEN GROUP BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * * Except as contained in this notice, the name of The Open Group shall not be * used in advertising or otherwise to promote the sale, use or other dealings * in this Software without prior written authorization from The Open Group. * * Copyright 1987, 1988, 1989 by * Digital Equipment Corporation, Maynard, Massachusetts. * * All Rights Reserved * * Permission to use, copy, modify, and distribute this software and its * documentation for any purpose and without fee is hereby granted, * provided that the above copyright notice appear in all copies and that * both that copyright notice and this permission notice appear in * supporting documentation, and that the name of Digital not be * used in advertising or publicity pertaining to distribution of the * software without specific, written prior permission. * * DIGITAL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING * ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL * DIGITAL BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR * ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, * ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS * SOFTWARE. * * Copyright Âİ 1998 Keith Packard * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of Keith Packard not be used in * advertising or publicity pertaining to distribution of the software without * specific, written prior permission. Keith Packard makes no * representations about the suitability of this software for any purpose. It * is provided "as is" without express or implied warranty. * * KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ #include #include #include #include #include "pixman-private.h" #define PIXREGION_NIL(reg) ((reg)->data && !(reg)->data->numRects) /* not a region */ #define PIXREGION_NAR(reg) ((reg)->data == pixman_broken_data) #define PIXREGION_NUMRECTS(reg) ((reg)->data ? (reg)->data->numRects : 1) #define PIXREGION_SIZE(reg) ((reg)->data ? (reg)->data->size : 0) #define PIXREGION_RECTS(reg) \ ((reg)->data ? (box_type_t *)((reg)->data + 1) \ : (box_type_t *)&(reg)->extents) #define PIXREGION_BOXPTR(reg) ((box_type_t *)((reg)->data + 1)) #define PIXREGION_BOX(reg, i) (&PIXREGION_BOXPTR (reg)[i]) #define PIXREGION_TOP(reg) PIXREGION_BOX (reg, (reg)->data->numRects) #define PIXREGION_END(reg) PIXREGION_BOX (reg, (reg)->data->numRects - 1) #define GOOD_RECT(rect) ((rect)->x1 < (rect)->x2 && (rect)->y1 < (rect)->y2) #define BAD_RECT(rect) ((rect)->x1 > (rect)->x2 || (rect)->y1 > (rect)->y2) #ifdef DEBUG #define GOOD(reg) \ do \ { \ if (!PREFIX (_selfcheck (reg))) \ _pixman_log_error (FUNC, "Malformed region " # reg); \ } while (0) #else #define GOOD(reg) #endif static const box_type_t PREFIX (_empty_box_) = { 0, 0, 0, 0 }; static const region_data_type_t PREFIX (_empty_data_) = { 0, 0 }; #if defined (__llvm__) && !defined (__clang__) static const volatile region_data_type_t PREFIX (_broken_data_) = { 0, 0 }; #else static const region_data_type_t PREFIX (_broken_data_) = { 0, 0 }; #endif static box_type_t *pixman_region_empty_box = (box_type_t *)&PREFIX (_empty_box_); static region_data_type_t *pixman_region_empty_data = (region_data_type_t *)&PREFIX (_empty_data_); static region_data_type_t *pixman_broken_data = (region_data_type_t *)&PREFIX (_broken_data_); static pixman_bool_t pixman_break (region_type_t *region); /* * The functions in this file implement the Region abstraction used extensively * throughout the X11 sample server. A Region is simply a set of disjoint * (non-overlapping) rectangles, plus an "extent" rectangle which is the * smallest single rectangle that contains all the non-overlapping rectangles. * * A Region is implemented as a "y-x-banded" array of rectangles. This array * imposes two degrees of order. First, all rectangles are sorted by top side * y coordinate first (y1), and then by left side x coordinate (x1). * * Furthermore, the rectangles are grouped into "bands". Each rectangle in a * band has the same top y coordinate (y1), and each has the same bottom y * coordinate (y2). Thus all rectangles in a band differ only in their left * and right side (x1 and x2). Bands are implicit in the array of rectangles: * there is no separate list of band start pointers. * * The y-x band representation does not minimize rectangles. In particular, * if a rectangle vertically crosses a band (the rectangle has scanlines in * the y1 to y2 area spanned by the band), then the rectangle may be broken * down into two or more smaller rectangles stacked one atop the other. * * ----------- ----------- * | | | | band 0 * | | -------- ----------- -------- * | | | | in y-x banded | | | | band 1 * | | | | form is | | | | * ----------- | | ----------- -------- * | | | | band 2 * -------- -------- * * An added constraint on the rectangles is that they must cover as much * horizontal area as possible: no two rectangles within a band are allowed * to touch. * * Whenever possible, bands will be merged together to cover a greater vertical * distance (and thus reduce the number of rectangles). Two bands can be merged * only if the bottom of one touches the top of the other and they have * rectangles in the same places (of the same width, of course). * * Adam de Boor wrote most of the original region code. Joel McCormack * substantially modified or rewrote most of the core arithmetic routines, and * added pixman_region_validate in order to support several speed improvements * to pixman_region_validate_tree. Bob Scheifler changed the representation * to be more compact when empty or a single rectangle, and did a bunch of * gratuitous reformatting. Carl Worth did further gratuitous reformatting * while re-merging the server and client region code into libpixregion. * Soren Sandmann did even more gratuitous reformatting. */ /* true iff two Boxes overlap */ #define EXTENTCHECK(r1, r2) \ (!( ((r1)->x2 <= (r2)->x1) || \ ((r1)->x1 >= (r2)->x2) || \ ((r1)->y2 <= (r2)->y1) || \ ((r1)->y1 >= (r2)->y2) ) ) /* true iff (x,y) is in Box */ #define INBOX(r, x, y) \ ( ((r)->x2 > x) && \ ((r)->x1 <= x) && \ ((r)->y2 > y) && \ ((r)->y1 <= y) ) /* true iff Box r1 contains Box r2 */ #define SUBSUMES(r1, r2) \ ( ((r1)->x1 <= (r2)->x1) && \ ((r1)->x2 >= (r2)->x2) && \ ((r1)->y1 <= (r2)->y1) && \ ((r1)->y2 >= (r2)->y2) ) static size_t PIXREGION_SZOF (size_t n) { size_t size = n * sizeof(box_type_t); if (n > UINT32_MAX / sizeof(box_type_t)) return 0; if (sizeof(region_data_type_t) > UINT32_MAX - size) return 0; return size + sizeof(region_data_type_t); } static region_data_type_t * alloc_data (size_t n) { size_t sz = PIXREGION_SZOF (n); if (!sz) return NULL; return malloc (sz); } #define FREE_DATA(reg) if ((reg)->data && (reg)->data->size) free ((reg)->data) #define RECTALLOC_BAIL(region, n, bail) \ do \ { \ if (!(region)->data || \ (((region)->data->numRects + (n)) > (region)->data->size)) \ { \ if (!pixman_rect_alloc (region, n)) \ goto bail; \ } \ } while (0) #define RECTALLOC(region, n) \ do \ { \ if (!(region)->data || \ (((region)->data->numRects + (n)) > (region)->data->size)) \ { \ if (!pixman_rect_alloc (region, n)) { \ return FALSE; \ } \ } \ } while (0) #define ADDRECT(next_rect, nx1, ny1, nx2, ny2) \ do \ { \ next_rect->x1 = nx1; \ next_rect->y1 = ny1; \ next_rect->x2 = nx2; \ next_rect->y2 = ny2; \ next_rect++; \ } \ while (0) #define NEWRECT(region, next_rect, nx1, ny1, nx2, ny2) \ do \ { \ if (!(region)->data || \ ((region)->data->numRects == (region)->data->size)) \ { \ if (!pixman_rect_alloc (region, 1)) \ return FALSE; \ next_rect = PIXREGION_TOP (region); \ } \ ADDRECT (next_rect, nx1, ny1, nx2, ny2); \ region->data->numRects++; \ critical_if_fail (region->data->numRects <= region->data->size); \ } while (0) #define DOWNSIZE(reg, numRects) \ do \ { \ if (((numRects) < ((reg)->data->size >> 1)) && \ ((reg)->data->size > 50)) \ { \ region_data_type_t * new_data; \ size_t data_size = PIXREGION_SZOF (numRects); \ \ if (!data_size) \ { \ new_data = NULL; \ } \ else \ { \ new_data = (region_data_type_t *) \ realloc ((reg)->data, data_size); \ } \ \ if (new_data) \ { \ new_data->size = (numRects); \ (reg)->data = new_data; \ } \ } \ } while (0) PIXMAN_EXPORT pixman_bool_t PREFIX (_equal) (const region_type_t *reg1, const region_type_t *reg2) { int i; box_type_t *rects1; box_type_t *rects2; if (reg1->extents.x1 != reg2->extents.x1) return FALSE; if (reg1->extents.x2 != reg2->extents.x2) return FALSE; if (reg1->extents.y1 != reg2->extents.y1) return FALSE; if (reg1->extents.y2 != reg2->extents.y2) return FALSE; if (PIXREGION_NUMRECTS (reg1) != PIXREGION_NUMRECTS (reg2)) return FALSE; rects1 = PIXREGION_RECTS (reg1); rects2 = PIXREGION_RECTS (reg2); for (i = 0; i != PIXREGION_NUMRECTS (reg1); i++) { if (rects1[i].x1 != rects2[i].x1) return FALSE; if (rects1[i].x2 != rects2[i].x2) return FALSE; if (rects1[i].y1 != rects2[i].y1) return FALSE; if (rects1[i].y2 != rects2[i].y2) return FALSE; } return TRUE; } int PREFIX (_print) (region_type_t *rgn) { int num, size; int i; box_type_t * rects; num = PIXREGION_NUMRECTS (rgn); size = PIXREGION_SIZE (rgn); rects = PIXREGION_RECTS (rgn); fprintf (stderr, "num: %d size: %d\n", num, size); fprintf (stderr, "extents: %d %d %d %d\n", rgn->extents.x1, rgn->extents.y1, rgn->extents.x2, rgn->extents.y2); for (i = 0; i < num; i++) { fprintf (stderr, "%d %d %d %d \n", rects[i].x1, rects[i].y1, rects[i].x2, rects[i].y2); } fprintf (stderr, "\n"); return(num); } PIXMAN_EXPORT void PREFIX (_init) (region_type_t *region) { region->extents = *pixman_region_empty_box; region->data = pixman_region_empty_data; } PIXMAN_EXPORT void PREFIX (_init_rect) (region_type_t * region, int x, int y, unsigned int width, unsigned int height) { region->extents.x1 = x; region->extents.y1 = y; region->extents.x2 = x + width; region->extents.y2 = y + height; if (!GOOD_RECT (®ion->extents)) { if (BAD_RECT (®ion->extents)) _pixman_log_error (FUNC, "Invalid rectangle passed"); PREFIX (_init) (region); return; } region->data = NULL; } PIXMAN_EXPORT void PREFIX (_init_with_extents) (region_type_t *region, const box_type_t *extents) { if (!GOOD_RECT (extents)) { if (BAD_RECT (extents)) _pixman_log_error (FUNC, "Invalid rectangle passed"); PREFIX (_init) (region); return; } region->extents = *extents; region->data = NULL; } PIXMAN_EXPORT void PREFIX (_fini) (region_type_t *region) { GOOD (region); FREE_DATA (region); } PIXMAN_EXPORT int PREFIX (_n_rects) (const region_type_t *region) { return PIXREGION_NUMRECTS (region); } PIXMAN_EXPORT box_type_t * PREFIX (_rectangles) (const region_type_t *region, int *n_rects) { if (n_rects) *n_rects = PIXREGION_NUMRECTS (region); return PIXREGION_RECTS (region); } static pixman_bool_t pixman_break (region_type_t *region) { FREE_DATA (region); region->extents = *pixman_region_empty_box; region->data = pixman_broken_data; return FALSE; } static pixman_bool_t pixman_rect_alloc (region_type_t * region, int n) { region_data_type_t *data; if (!region->data) { n++; region->data = alloc_data (n); if (!region->data) return pixman_break (region); region->data->numRects = 1; *PIXREGION_BOXPTR (region) = region->extents; } else if (!region->data->size) { region->data = alloc_data (n); if (!region->data) return pixman_break (region); region->data->numRects = 0; } else { size_t data_size; if (n == 1) { n = region->data->numRects; if (n > 500) /* XXX pick numbers out of a hat */ n = 250; } n += region->data->numRects; data_size = PIXREGION_SZOF (n); if (!data_size) { data = NULL; } else { data = (region_data_type_t *) realloc (region->data, PIXREGION_SZOF (n)); } if (!data) return pixman_break (region); region->data = data; } region->data->size = n; return TRUE; } PIXMAN_EXPORT pixman_bool_t PREFIX (_copy) (region_type_t *dst, const region_type_t *src) { GOOD (dst); GOOD (src); if (dst == src) return TRUE; dst->extents = src->extents; if (!src->data || !src->data->size) { FREE_DATA (dst); dst->data = src->data; return TRUE; } if (!dst->data || (dst->data->size < src->data->numRects)) { FREE_DATA (dst); dst->data = alloc_data (src->data->numRects); if (!dst->data) return pixman_break (dst); dst->data->size = src->data->numRects; } dst->data->numRects = src->data->numRects; memmove ((char *)PIXREGION_BOXPTR (dst), (char *)PIXREGION_BOXPTR (src), dst->data->numRects * sizeof(box_type_t)); return TRUE; } /*====================================================================== * Generic Region Operator *====================================================================*/ /*- *----------------------------------------------------------------------- * pixman_coalesce -- * Attempt to merge the boxes in the current band with those in the * previous one. We are guaranteed that the current band extends to * the end of the rects array. Used only by pixman_op. * * Results: * The new index for the previous band. * * Side Effects: * If coalescing takes place: * - rectangles in the previous band will have their y2 fields * altered. * - region->data->numRects will be decreased. * *----------------------------------------------------------------------- */ static inline int pixman_coalesce (region_type_t * region, /* Region to coalesce */ int prev_start, /* Index of start of previous band */ int cur_start) /* Index of start of current band */ { box_type_t *prev_box; /* Current box in previous band */ box_type_t *cur_box; /* Current box in current band */ int numRects; /* Number rectangles in both bands */ int y2; /* Bottom of current band */ /* * Figure out how many rectangles are in the band. */ numRects = cur_start - prev_start; critical_if_fail (numRects == region->data->numRects - cur_start); if (!numRects) return cur_start; /* * The bands may only be coalesced if the bottom of the previous * matches the top scanline of the current. */ prev_box = PIXREGION_BOX (region, prev_start); cur_box = PIXREGION_BOX (region, cur_start); if (prev_box->y2 != cur_box->y1) return cur_start; /* * Make sure the bands have boxes in the same places. This * assumes that boxes have been added in such a way that they * cover the most area possible. I.e. two boxes in a band must * have some horizontal space between them. */ y2 = cur_box->y2; do { if ((prev_box->x1 != cur_box->x1) || (prev_box->x2 != cur_box->x2)) return (cur_start); prev_box++; cur_box++; numRects--; } while (numRects); /* * The bands may be merged, so set the bottom y of each box * in the previous band to the bottom y of the current band. */ numRects = cur_start - prev_start; region->data->numRects -= numRects; do { prev_box--; prev_box->y2 = y2; numRects--; } while (numRects); return prev_start; } /* Quicky macro to avoid trivial reject procedure calls to pixman_coalesce */ #define COALESCE(new_reg, prev_band, cur_band) \ do \ { \ if (cur_band - prev_band == new_reg->data->numRects - cur_band) \ prev_band = pixman_coalesce (new_reg, prev_band, cur_band); \ else \ prev_band = cur_band; \ } while (0) /*- *----------------------------------------------------------------------- * pixman_region_append_non_o -- * Handle a non-overlapping band for the union and subtract operations. * Just adds the (top/bottom-clipped) rectangles into the region. * Doesn't have to check for subsumption or anything. * * Results: * None. * * Side Effects: * region->data->numRects is incremented and the rectangles overwritten * with the rectangles we're passed. * *----------------------------------------------------------------------- */ static inline pixman_bool_t pixman_region_append_non_o (region_type_t * region, box_type_t * r, box_type_t * r_end, int y1, int y2) { box_type_t *next_rect; int new_rects; new_rects = r_end - r; critical_if_fail (y1 < y2); critical_if_fail (new_rects != 0); /* Make sure we have enough space for all rectangles to be added */ RECTALLOC (region, new_rects); next_rect = PIXREGION_TOP (region); region->data->numRects += new_rects; do { critical_if_fail (r->x1 < r->x2); ADDRECT (next_rect, r->x1, y1, r->x2, y2); r++; } while (r != r_end); return TRUE; } #define FIND_BAND(r, r_band_end, r_end, ry1) \ do \ { \ ry1 = r->y1; \ r_band_end = r + 1; \ while ((r_band_end != r_end) && (r_band_end->y1 == ry1)) { \ r_band_end++; \ } \ } while (0) #define APPEND_REGIONS(new_reg, r, r_end) \ do \ { \ int new_rects; \ if ((new_rects = r_end - r)) { \ RECTALLOC_BAIL (new_reg, new_rects, bail); \ memmove ((char *)PIXREGION_TOP (new_reg), (char *)r, \ new_rects * sizeof(box_type_t)); \ new_reg->data->numRects += new_rects; \ } \ } while (0) /*- *----------------------------------------------------------------------- * pixman_op -- * Apply an operation to two regions. Called by pixman_region_union, pixman_region_inverse, * pixman_region_subtract, pixman_region_intersect.... Both regions MUST have at least one * rectangle, and cannot be the same object. * * Results: * TRUE if successful. * * Side Effects: * The new region is overwritten. * overlap set to TRUE if overlap_func ever returns TRUE. * * Notes: * The idea behind this function is to view the two regions as sets. * Together they cover a rectangle of area that this function divides * into horizontal bands where points are covered only by one region * or by both. For the first case, the non_overlap_func is called with * each the band and the band's upper and lower extents. For the * second, the overlap_func is called to process the entire band. It * is responsible for clipping the rectangles in the band, though * this function provides the boundaries. * At the end of each band, the new region is coalesced, if possible, * to reduce the number of rectangles in the region. * *----------------------------------------------------------------------- */ typedef pixman_bool_t (*overlap_proc_ptr) (region_type_t *region, box_type_t * r1, box_type_t * r1_end, box_type_t * r2, box_type_t * r2_end, int y1, int y2); static pixman_bool_t pixman_op (region_type_t * new_reg, /* Place to store result */ const region_type_t * reg1, /* First region in operation */ const region_type_t * reg2, /* 2d region in operation */ overlap_proc_ptr overlap_func, /* Function to call for over- * lapping bands */ int append_non1, /* Append non-overlapping bands * in region 1 ? */ int append_non2 /* Append non-overlapping bands * in region 2 ? */ ) { box_type_t *r1; /* Pointer into first region */ box_type_t *r2; /* Pointer into 2d region */ box_type_t *r1_end; /* End of 1st region */ box_type_t *r2_end; /* End of 2d region */ int ybot; /* Bottom of intersection */ int ytop; /* Top of intersection */ region_data_type_t *old_data; /* Old data for new_reg */ int prev_band; /* Index of start of * previous band in new_reg */ int cur_band; /* Index of start of current * band in new_reg */ box_type_t * r1_band_end; /* End of current band in r1 */ box_type_t * r2_band_end; /* End of current band in r2 */ int top; /* Top of non-overlapping band */ int bot; /* Bottom of non-overlapping band*/ int r1y1; /* Temps for r1->y1 and r2->y1 */ int r2y1; int new_size; int numRects; /* * Break any region computed from a broken region */ if (PIXREGION_NAR (reg1) || PIXREGION_NAR (reg2)) return pixman_break (new_reg); /* * Initialization: * set r1, r2, r1_end and r2_end appropriately, save the rectangles * of the destination region until the end in case it's one of * the two source regions, then mark the "new" region empty, allocating * another array of rectangles for it to use. */ r1 = PIXREGION_RECTS (reg1); new_size = PIXREGION_NUMRECTS (reg1); r1_end = r1 + new_size; numRects = PIXREGION_NUMRECTS (reg2); r2 = PIXREGION_RECTS (reg2); r2_end = r2 + numRects; critical_if_fail (r1 != r1_end); critical_if_fail (r2 != r2_end); old_data = (region_data_type_t *)NULL; if (((new_reg == reg1) && (new_size > 1)) || ((new_reg == reg2) && (numRects > 1))) { old_data = new_reg->data; new_reg->data = pixman_region_empty_data; } /* guess at new size */ if (numRects > new_size) new_size = numRects; new_size <<= 1; if (!new_reg->data) new_reg->data = pixman_region_empty_data; else if (new_reg->data->size) new_reg->data->numRects = 0; if (new_size > new_reg->data->size) { if (!pixman_rect_alloc (new_reg, new_size)) { free (old_data); return FALSE; } } /* * Initialize ybot. * In the upcoming loop, ybot and ytop serve different functions depending * on whether the band being handled is an overlapping or non-overlapping * band. * In the case of a non-overlapping band (only one of the regions * has points in the band), ybot is the bottom of the most recent * intersection and thus clips the top of the rectangles in that band. * ytop is the top of the next intersection between the two regions and * serves to clip the bottom of the rectangles in the current band. * For an overlapping band (where the two regions intersect), ytop clips * the top of the rectangles of both regions and ybot clips the bottoms. */ ybot = MIN (r1->y1, r2->y1); /* * prev_band serves to mark the start of the previous band so rectangles * can be coalesced into larger rectangles. qv. pixman_coalesce, above. * In the beginning, there is no previous band, so prev_band == cur_band * (cur_band is set later on, of course, but the first band will always * start at index 0). prev_band and cur_band must be indices because of * the possible expansion, and resultant moving, of the new region's * array of rectangles. */ prev_band = 0; do { /* * This algorithm proceeds one source-band (as opposed to a * destination band, which is determined by where the two regions * intersect) at a time. r1_band_end and r2_band_end serve to mark the * rectangle after the last one in the current band for their * respective regions. */ critical_if_fail (r1 != r1_end); critical_if_fail (r2 != r2_end); FIND_BAND (r1, r1_band_end, r1_end, r1y1); FIND_BAND (r2, r2_band_end, r2_end, r2y1); /* * First handle the band that doesn't intersect, if any. * * Note that attention is restricted to one band in the * non-intersecting region at once, so if a region has n * bands between the current position and the next place it overlaps * the other, this entire loop will be passed through n times. */ if (r1y1 < r2y1) { if (append_non1) { top = MAX (r1y1, ybot); bot = MIN (r1->y2, r2y1); if (top != bot) { cur_band = new_reg->data->numRects; if (!pixman_region_append_non_o (new_reg, r1, r1_band_end, top, bot)) goto bail; COALESCE (new_reg, prev_band, cur_band); } } ytop = r2y1; } else if (r2y1 < r1y1) { if (append_non2) { top = MAX (r2y1, ybot); bot = MIN (r2->y2, r1y1); if (top != bot) { cur_band = new_reg->data->numRects; if (!pixman_region_append_non_o (new_reg, r2, r2_band_end, top, bot)) goto bail; COALESCE (new_reg, prev_band, cur_band); } } ytop = r1y1; } else { ytop = r1y1; } /* * Now see if we've hit an intersecting band. The two bands only * intersect if ybot > ytop */ ybot = MIN (r1->y2, r2->y2); if (ybot > ytop) { cur_band = new_reg->data->numRects; if (!(*overlap_func)(new_reg, r1, r1_band_end, r2, r2_band_end, ytop, ybot)) { goto bail; } COALESCE (new_reg, prev_band, cur_band); } /* * If we've finished with a band (y2 == ybot) we skip forward * in the region to the next band. */ if (r1->y2 == ybot) r1 = r1_band_end; if (r2->y2 == ybot) r2 = r2_band_end; } while (r1 != r1_end && r2 != r2_end); /* * Deal with whichever region (if any) still has rectangles left. * * We only need to worry about banding and coalescing for the very first * band left. After that, we can just group all remaining boxes, * regardless of how many bands, into one final append to the list. */ if ((r1 != r1_end) && append_non1) { /* Do first non_overlap1Func call, which may be able to coalesce */ FIND_BAND (r1, r1_band_end, r1_end, r1y1); cur_band = new_reg->data->numRects; if (!pixman_region_append_non_o (new_reg, r1, r1_band_end, MAX (r1y1, ybot), r1->y2)) { goto bail; } COALESCE (new_reg, prev_band, cur_band); /* Just append the rest of the boxes */ APPEND_REGIONS (new_reg, r1_band_end, r1_end); } else if ((r2 != r2_end) && append_non2) { /* Do first non_overlap2Func call, which may be able to coalesce */ FIND_BAND (r2, r2_band_end, r2_end, r2y1); cur_band = new_reg->data->numRects; if (!pixman_region_append_non_o (new_reg, r2, r2_band_end, MAX (r2y1, ybot), r2->y2)) { goto bail; } COALESCE (new_reg, prev_band, cur_band); /* Append rest of boxes */ APPEND_REGIONS (new_reg, r2_band_end, r2_end); } free (old_data); if (!(numRects = new_reg->data->numRects)) { FREE_DATA (new_reg); new_reg->data = pixman_region_empty_data; } else if (numRects == 1) { new_reg->extents = *PIXREGION_BOXPTR (new_reg); FREE_DATA (new_reg); new_reg->data = (region_data_type_t *)NULL; } else { DOWNSIZE (new_reg, numRects); } return TRUE; bail: free (old_data); return pixman_break (new_reg); } /*- *----------------------------------------------------------------------- * pixman_set_extents -- * Reset the extents of a region to what they should be. Called by * pixman_region_subtract and pixman_region_intersect as they can't * figure it out along the way or do so easily, as pixman_region_union can. * * Results: * None. * * Side Effects: * The region's 'extents' structure is overwritten. * *----------------------------------------------------------------------- */ static void pixman_set_extents (region_type_t *region) { box_type_t *box, *box_end; if (!region->data) return; if (!region->data->size) { region->extents.x2 = region->extents.x1; region->extents.y2 = region->extents.y1; return; } box = PIXREGION_BOXPTR (region); box_end = PIXREGION_END (region); /* * Since box is the first rectangle in the region, it must have the * smallest y1 and since box_end is the last rectangle in the region, * it must have the largest y2, because of banding. Initialize x1 and * x2 from box and box_end, resp., as good things to initialize them * to... */ region->extents.x1 = box->x1; region->extents.y1 = box->y1; region->extents.x2 = box_end->x2; region->extents.y2 = box_end->y2; critical_if_fail (region->extents.y1 < region->extents.y2); while (box <= box_end) { if (box->x1 < region->extents.x1) region->extents.x1 = box->x1; if (box->x2 > region->extents.x2) region->extents.x2 = box->x2; box++; } critical_if_fail (region->extents.x1 < region->extents.x2); } /*====================================================================== * Region Intersection *====================================================================*/ /*- *----------------------------------------------------------------------- * pixman_region_intersect_o -- * Handle an overlapping band for pixman_region_intersect. * * Results: * TRUE if successful. * * Side Effects: * Rectangles may be added to the region. * *----------------------------------------------------------------------- */ /*ARGSUSED*/ static pixman_bool_t pixman_region_intersect_o (region_type_t *region, box_type_t * r1, box_type_t * r1_end, box_type_t * r2, box_type_t * r2_end, int y1, int y2) { int x1; int x2; box_type_t * next_rect; next_rect = PIXREGION_TOP (region); critical_if_fail (y1 < y2); critical_if_fail (r1 != r1_end && r2 != r2_end); do { x1 = MAX (r1->x1, r2->x1); x2 = MIN (r1->x2, r2->x2); /* * If there's any overlap between the two rectangles, add that * overlap to the new region. */ if (x1 < x2) NEWRECT (region, next_rect, x1, y1, x2, y2); /* * Advance the pointer(s) with the leftmost right side, since the next * rectangle on that list may still overlap the other region's * current rectangle. */ if (r1->x2 == x2) { r1++; } if (r2->x2 == x2) { r2++; } } while ((r1 != r1_end) && (r2 != r2_end)); return TRUE; } PIXMAN_EXPORT pixman_bool_t PREFIX (_intersect) (region_type_t * new_reg, const region_type_t * reg1, const region_type_t * reg2) { GOOD (reg1); GOOD (reg2); GOOD (new_reg); /* check for trivial reject */ if (PIXREGION_NIL (reg1) || PIXREGION_NIL (reg2) || !EXTENTCHECK (®1->extents, ®2->extents)) { /* Covers about 20% of all cases */ FREE_DATA (new_reg); new_reg->extents.x2 = new_reg->extents.x1; new_reg->extents.y2 = new_reg->extents.y1; if (PIXREGION_NAR (reg1) || PIXREGION_NAR (reg2)) { new_reg->data = pixman_broken_data; return FALSE; } else { new_reg->data = pixman_region_empty_data; } } else if (!reg1->data && !reg2->data) { /* Covers about 80% of cases that aren't trivially rejected */ new_reg->extents.x1 = MAX (reg1->extents.x1, reg2->extents.x1); new_reg->extents.y1 = MAX (reg1->extents.y1, reg2->extents.y1); new_reg->extents.x2 = MIN (reg1->extents.x2, reg2->extents.x2); new_reg->extents.y2 = MIN (reg1->extents.y2, reg2->extents.y2); FREE_DATA (new_reg); new_reg->data = (region_data_type_t *)NULL; } else if (!reg2->data && SUBSUMES (®2->extents, ®1->extents)) { return PREFIX (_copy) (new_reg, reg1); } else if (!reg1->data && SUBSUMES (®1->extents, ®2->extents)) { return PREFIX (_copy) (new_reg, reg2); } else if (reg1 == reg2) { return PREFIX (_copy) (new_reg, reg1); } else { /* General purpose intersection */ if (!pixman_op (new_reg, reg1, reg2, pixman_region_intersect_o, FALSE, FALSE)) return FALSE; pixman_set_extents (new_reg); } GOOD (new_reg); return(TRUE); } #define MERGERECT(r) \ do \ { \ if (r->x1 <= x2) \ { \ /* Merge with current rectangle */ \ if (x2 < r->x2) \ x2 = r->x2; \ } \ else \ { \ /* Add current rectangle, start new one */ \ NEWRECT (region, next_rect, x1, y1, x2, y2); \ x1 = r->x1; \ x2 = r->x2; \ } \ r++; \ } while (0) /*====================================================================== * Region Union *====================================================================*/ /*- *----------------------------------------------------------------------- * pixman_region_union_o -- * Handle an overlapping band for the union operation. Picks the * left-most rectangle each time and merges it into the region. * * Results: * TRUE if successful. * * Side Effects: * region is overwritten. * overlap is set to TRUE if any boxes overlap. * *----------------------------------------------------------------------- */ static pixman_bool_t pixman_region_union_o (region_type_t *region, box_type_t * r1, box_type_t * r1_end, box_type_t * r2, box_type_t * r2_end, int y1, int y2) { box_type_t *next_rect; int x1; /* left and right side of current union */ int x2; critical_if_fail (y1 < y2); critical_if_fail (r1 != r1_end && r2 != r2_end); next_rect = PIXREGION_TOP (region); /* Start off current rectangle */ if (r1->x1 < r2->x1) { x1 = r1->x1; x2 = r1->x2; r1++; } else { x1 = r2->x1; x2 = r2->x2; r2++; } while (r1 != r1_end && r2 != r2_end) { if (r1->x1 < r2->x1) MERGERECT (r1); else MERGERECT (r2); } /* Finish off whoever (if any) is left */ if (r1 != r1_end) { do { MERGERECT (r1); } while (r1 != r1_end); } else if (r2 != r2_end) { do { MERGERECT (r2); } while (r2 != r2_end); } /* Add current rectangle */ NEWRECT (region, next_rect, x1, y1, x2, y2); return TRUE; } PIXMAN_EXPORT pixman_bool_t PREFIX(_intersect_rect) (region_type_t *dest, const region_type_t *source, int x, int y, unsigned int width, unsigned int height) { region_type_t region; region.data = NULL; region.extents.x1 = x; region.extents.y1 = y; region.extents.x2 = x + width; region.extents.y2 = y + height; return PREFIX(_intersect) (dest, source, ®ion); } /* Convenience function for performing union of region with a * single rectangle */ PIXMAN_EXPORT pixman_bool_t PREFIX (_union_rect) (region_type_t *dest, const region_type_t *source, int x, int y, unsigned int width, unsigned int height) { region_type_t region; region.extents.x1 = x; region.extents.y1 = y; region.extents.x2 = x + width; region.extents.y2 = y + height; if (!GOOD_RECT (®ion.extents)) { if (BAD_RECT (®ion.extents)) _pixman_log_error (FUNC, "Invalid rectangle passed"); return PREFIX (_copy) (dest, source); } region.data = NULL; return PREFIX (_union) (dest, source, ®ion); } PIXMAN_EXPORT pixman_bool_t PREFIX (_union) (region_type_t * new_reg, const region_type_t *reg1, const region_type_t *reg2) { /* Return TRUE if some overlap * between reg1, reg2 */ GOOD (reg1); GOOD (reg2); GOOD (new_reg); /* checks all the simple cases */ /* * Region 1 and 2 are the same */ if (reg1 == reg2) return PREFIX (_copy) (new_reg, reg1); /* * Region 1 is empty */ if (PIXREGION_NIL (reg1)) { if (PIXREGION_NAR (reg1)) return pixman_break (new_reg); if (new_reg != reg2) return PREFIX (_copy) (new_reg, reg2); return TRUE; } /* * Region 2 is empty */ if (PIXREGION_NIL (reg2)) { if (PIXREGION_NAR (reg2)) return pixman_break (new_reg); if (new_reg != reg1) return PREFIX (_copy) (new_reg, reg1); return TRUE; } /* * Region 1 completely subsumes region 2 */ if (!reg1->data && SUBSUMES (®1->extents, ®2->extents)) { if (new_reg != reg1) return PREFIX (_copy) (new_reg, reg1); return TRUE; } /* * Region 2 completely subsumes region 1 */ if (!reg2->data && SUBSUMES (®2->extents, ®1->extents)) { if (new_reg != reg2) return PREFIX (_copy) (new_reg, reg2); return TRUE; } if (!pixman_op (new_reg, reg1, reg2, pixman_region_union_o, TRUE, TRUE)) return FALSE; new_reg->extents.x1 = MIN (reg1->extents.x1, reg2->extents.x1); new_reg->extents.y1 = MIN (reg1->extents.y1, reg2->extents.y1); new_reg->extents.x2 = MAX (reg1->extents.x2, reg2->extents.x2); new_reg->extents.y2 = MAX (reg1->extents.y2, reg2->extents.y2); GOOD (new_reg); return TRUE; } /*====================================================================== * Batch Rectangle Union *====================================================================*/ #define EXCHANGE_RECTS(a, b) \ { \ box_type_t t; \ t = rects[a]; \ rects[a] = rects[b]; \ rects[b] = t; \ } static void quick_sort_rects ( box_type_t rects[], int numRects) { int y1; int x1; int i, j; box_type_t *r; /* Always called with numRects > 1 */ do { if (numRects == 2) { if (rects[0].y1 > rects[1].y1 || (rects[0].y1 == rects[1].y1 && rects[0].x1 > rects[1].x1)) { EXCHANGE_RECTS (0, 1); } return; } /* Choose partition element, stick in location 0 */ EXCHANGE_RECTS (0, numRects >> 1); y1 = rects[0].y1; x1 = rects[0].x1; /* Partition array */ i = 0; j = numRects; do { r = &(rects[i]); do { r++; i++; } while (i != numRects && (r->y1 < y1 || (r->y1 == y1 && r->x1 < x1))); r = &(rects[j]); do { r--; j--; } while (y1 < r->y1 || (y1 == r->y1 && x1 < r->x1)); if (i < j) EXCHANGE_RECTS (i, j); } while (i < j); /* Move partition element back to middle */ EXCHANGE_RECTS (0, j); /* Recurse */ if (numRects - j - 1 > 1) quick_sort_rects (&rects[j + 1], numRects - j - 1); numRects = j; } while (numRects > 1); } /*- *----------------------------------------------------------------------- * pixman_region_validate -- * * Take a ``region'' which is a non-y-x-banded random collection of * rectangles, and compute a nice region which is the union of all the * rectangles. * * Results: * TRUE if successful. * * Side Effects: * The passed-in ``region'' may be modified. * overlap set to TRUE if any retangles overlapped, * else FALSE; * * Strategy: * Step 1. Sort the rectangles into ascending order with primary key y1 * and secondary key x1. * * Step 2. Split the rectangles into the minimum number of proper y-x * banded regions. This may require horizontally merging * rectangles, and vertically coalescing bands. With any luck, * this step in an identity transformation (ala the Box widget), * or a coalescing into 1 box (ala Menus). * * Step 3. Merge the separate regions down to a single region by calling * pixman_region_union. Maximize the work each pixman_region_union call does by using * a binary merge. * *----------------------------------------------------------------------- */ static pixman_bool_t validate (region_type_t * badreg) { /* Descriptor for regions under construction in Step 2. */ typedef struct { region_type_t reg; int prev_band; int cur_band; } region_info_t; region_info_t stack_regions[64]; int numRects; /* Original numRects for badreg */ region_info_t *ri; /* Array of current regions */ int num_ri; /* Number of entries used in ri */ int size_ri; /* Number of entries available in ri */ int i; /* Index into rects */ int j; /* Index into ri */ region_info_t *rit; /* &ri[j] */ region_type_t *reg; /* ri[j].reg */ box_type_t *box; /* Current box in rects */ box_type_t *ri_box; /* Last box in ri[j].reg */ region_type_t *hreg; /* ri[j_half].reg */ pixman_bool_t ret = TRUE; if (!badreg->data) { GOOD (badreg); return TRUE; } numRects = badreg->data->numRects; if (!numRects) { if (PIXREGION_NAR (badreg)) return FALSE; GOOD (badreg); return TRUE; } if (badreg->extents.x1 < badreg->extents.x2) { if ((numRects) == 1) { FREE_DATA (badreg); badreg->data = (region_data_type_t *) NULL; } else { DOWNSIZE (badreg, numRects); } GOOD (badreg); return TRUE; } /* Step 1: Sort the rects array into ascending (y1, x1) order */ quick_sort_rects (PIXREGION_BOXPTR (badreg), numRects); /* Step 2: Scatter the sorted array into the minimum number of regions */ /* Set up the first region to be the first rectangle in badreg */ /* Note that step 2 code will never overflow the ri[0].reg rects array */ ri = stack_regions; size_ri = sizeof (stack_regions) / sizeof (stack_regions[0]); num_ri = 1; ri[0].prev_band = 0; ri[0].cur_band = 0; ri[0].reg = *badreg; box = PIXREGION_BOXPTR (&ri[0].reg); ri[0].reg.extents = *box; ri[0].reg.data->numRects = 1; badreg->extents = *pixman_region_empty_box; badreg->data = pixman_region_empty_data; /* Now scatter rectangles into the minimum set of valid regions. If the * next rectangle to be added to a region would force an existing rectangle * in the region to be split up in order to maintain y-x banding, just * forget it. Try the next region. If it doesn't fit cleanly into any * region, make a new one. */ for (i = numRects; --i > 0;) { box++; /* Look for a region to append box to */ for (j = num_ri, rit = ri; --j >= 0; rit++) { reg = &rit->reg; ri_box = PIXREGION_END (reg); if (box->y1 == ri_box->y1 && box->y2 == ri_box->y2) { /* box is in same band as ri_box. Merge or append it */ if (box->x1 <= ri_box->x2) { /* Merge it with ri_box */ if (box->x2 > ri_box->x2) ri_box->x2 = box->x2; } else { RECTALLOC_BAIL (reg, 1, bail); *PIXREGION_TOP (reg) = *box; reg->data->numRects++; } goto next_rect; /* So sue me */ } else if (box->y1 >= ri_box->y2) { /* Put box into new band */ if (reg->extents.x2 < ri_box->x2) reg->extents.x2 = ri_box->x2; if (reg->extents.x1 > box->x1) reg->extents.x1 = box->x1; COALESCE (reg, rit->prev_band, rit->cur_band); rit->cur_band = reg->data->numRects; RECTALLOC_BAIL (reg, 1, bail); *PIXREGION_TOP (reg) = *box; reg->data->numRects++; goto next_rect; } /* Well, this region was inappropriate. Try the next one. */ } /* for j */ /* Uh-oh. No regions were appropriate. Create a new one. */ if (size_ri == num_ri) { size_t data_size; /* Oops, allocate space for new region information */ size_ri <<= 1; data_size = size_ri * sizeof(region_info_t); if (data_size / size_ri != sizeof(region_info_t)) goto bail; if (ri == stack_regions) { rit = malloc (data_size); if (!rit) goto bail; memcpy (rit, ri, num_ri * sizeof (region_info_t)); } else { rit = (region_info_t *) realloc (ri, data_size); if (!rit) goto bail; } ri = rit; rit = &ri[num_ri]; } num_ri++; rit->prev_band = 0; rit->cur_band = 0; rit->reg.extents = *box; rit->reg.data = (region_data_type_t *)NULL; /* MUST force allocation */ if (!pixman_rect_alloc (&rit->reg, (i + num_ri) / num_ri)) goto bail; next_rect: ; } /* for i */ /* Make a final pass over each region in order to COALESCE and set * extents.x2 and extents.y2 */ for (j = num_ri, rit = ri; --j >= 0; rit++) { reg = &rit->reg; ri_box = PIXREGION_END (reg); reg->extents.y2 = ri_box->y2; if (reg->extents.x2 < ri_box->x2) reg->extents.x2 = ri_box->x2; COALESCE (reg, rit->prev_band, rit->cur_band); if (reg->data->numRects == 1) /* keep unions happy below */ { FREE_DATA (reg); reg->data = (region_data_type_t *)NULL; } } /* Step 3: Union all regions into a single region */ while (num_ri > 1) { int half = num_ri / 2; for (j = num_ri & 1; j < (half + (num_ri & 1)); j++) { reg = &ri[j].reg; hreg = &ri[j + half].reg; if (!pixman_op (reg, reg, hreg, pixman_region_union_o, TRUE, TRUE)) ret = FALSE; if (hreg->extents.x1 < reg->extents.x1) reg->extents.x1 = hreg->extents.x1; if (hreg->extents.y1 < reg->extents.y1) reg->extents.y1 = hreg->extents.y1; if (hreg->extents.x2 > reg->extents.x2) reg->extents.x2 = hreg->extents.x2; if (hreg->extents.y2 > reg->extents.y2) reg->extents.y2 = hreg->extents.y2; FREE_DATA (hreg); } num_ri -= half; if (!ret) goto bail; } *badreg = ri[0].reg; if (ri != stack_regions) free (ri); GOOD (badreg); return ret; bail: for (i = 0; i < num_ri; i++) FREE_DATA (&ri[i].reg); if (ri != stack_regions) free (ri); return pixman_break (badreg); } /*====================================================================== * Region Subtraction *====================================================================*/ /*- *----------------------------------------------------------------------- * pixman_region_subtract_o -- * Overlapping band subtraction. x1 is the left-most point not yet * checked. * * Results: * TRUE if successful. * * Side Effects: * region may have rectangles added to it. * *----------------------------------------------------------------------- */ /*ARGSUSED*/ static pixman_bool_t pixman_region_subtract_o (region_type_t * region, box_type_t * r1, box_type_t * r1_end, box_type_t * r2, box_type_t * r2_end, int y1, int y2) { box_type_t * next_rect; int x1; x1 = r1->x1; critical_if_fail (y1 < y2); critical_if_fail (r1 != r1_end && r2 != r2_end); next_rect = PIXREGION_TOP (region); do { if (r2->x2 <= x1) { /* * Subtrahend entirely to left of minuend: go to next subtrahend. */ r2++; } else if (r2->x1 <= x1) { /* * Subtrahend precedes minuend: nuke left edge of minuend. */ x1 = r2->x2; if (x1 >= r1->x2) { /* * Minuend completely covered: advance to next minuend and * reset left fence to edge of new minuend. */ r1++; if (r1 != r1_end) x1 = r1->x1; } else { /* * Subtrahend now used up since it doesn't extend beyond * minuend */ r2++; } } else if (r2->x1 < r1->x2) { /* * Left part of subtrahend covers part of minuend: add uncovered * part of minuend to region and skip to next subtrahend. */ critical_if_fail (x1 < r2->x1); NEWRECT (region, next_rect, x1, y1, r2->x1, y2); x1 = r2->x2; if (x1 >= r1->x2) { /* * Minuend used up: advance to new... */ r1++; if (r1 != r1_end) x1 = r1->x1; } else { /* * Subtrahend used up */ r2++; } } else { /* * Minuend used up: add any remaining piece before advancing. */ if (r1->x2 > x1) NEWRECT (region, next_rect, x1, y1, r1->x2, y2); r1++; if (r1 != r1_end) x1 = r1->x1; } } while ((r1 != r1_end) && (r2 != r2_end)); /* * Add remaining minuend rectangles to region. */ while (r1 != r1_end) { critical_if_fail (x1 < r1->x2); NEWRECT (region, next_rect, x1, y1, r1->x2, y2); r1++; if (r1 != r1_end) x1 = r1->x1; } return TRUE; } /*- *----------------------------------------------------------------------- * pixman_region_subtract -- * Subtract reg_s from reg_m and leave the result in reg_d. * S stands for subtrahend, M for minuend and D for difference. * * Results: * TRUE if successful. * * Side Effects: * reg_d is overwritten. * *----------------------------------------------------------------------- */ PIXMAN_EXPORT pixman_bool_t PREFIX (_subtract) (region_type_t * reg_d, const region_type_t *reg_m, const region_type_t *reg_s) { GOOD (reg_m); GOOD (reg_s); GOOD (reg_d); /* check for trivial rejects */ if (PIXREGION_NIL (reg_m) || PIXREGION_NIL (reg_s) || !EXTENTCHECK (®_m->extents, ®_s->extents)) { if (PIXREGION_NAR (reg_s)) return pixman_break (reg_d); return PREFIX (_copy) (reg_d, reg_m); } else if (reg_m == reg_s) { FREE_DATA (reg_d); reg_d->extents.x2 = reg_d->extents.x1; reg_d->extents.y2 = reg_d->extents.y1; reg_d->data = pixman_region_empty_data; return TRUE; } /* Add those rectangles in region 1 that aren't in region 2, do yucky subtraction for overlaps, and just throw away rectangles in region 2 that aren't in region 1 */ if (!pixman_op (reg_d, reg_m, reg_s, pixman_region_subtract_o, TRUE, FALSE)) return FALSE; /* * Can't alter reg_d's extents before we call pixman_op because * it might be one of the source regions and pixman_op depends * on the extents of those regions being unaltered. Besides, this * way there's no checking against rectangles that will be nuked * due to coalescing, so we have to examine fewer rectangles. */ pixman_set_extents (reg_d); GOOD (reg_d); return TRUE; } /*====================================================================== * Region Inversion *====================================================================*/ /*- *----------------------------------------------------------------------- * pixman_region_inverse -- * Take a region and a box and return a region that is everything * in the box but not in the region. The careful reader will note * that this is the same as subtracting the region from the box... * * Results: * TRUE. * * Side Effects: * new_reg is overwritten. * *----------------------------------------------------------------------- */ PIXMAN_EXPORT pixman_bool_t PREFIX (_inverse) (region_type_t * new_reg, /* Destination region */ const region_type_t *reg1, /* Region to invert */ const box_type_t * inv_rect) /* Bounding box for inversion */ { region_type_t inv_reg; /* Quick and dirty region made from the * bounding box */ GOOD (reg1); GOOD (new_reg); /* check for trivial rejects */ if (PIXREGION_NIL (reg1) || !EXTENTCHECK (inv_rect, ®1->extents)) { if (PIXREGION_NAR (reg1)) return pixman_break (new_reg); new_reg->extents = *inv_rect; FREE_DATA (new_reg); new_reg->data = (region_data_type_t *)NULL; return TRUE; } /* Add those rectangles in region 1 that aren't in region 2, * do yucky subtraction for overlaps, and * just throw away rectangles in region 2 that aren't in region 1 */ inv_reg.extents = *inv_rect; inv_reg.data = (region_data_type_t *)NULL; if (!pixman_op (new_reg, &inv_reg, reg1, pixman_region_subtract_o, TRUE, FALSE)) return FALSE; /* * Can't alter new_reg's extents before we call pixman_op because * it might be one of the source regions and pixman_op depends * on the extents of those regions being unaltered. Besides, this * way there's no checking against rectangles that will be nuked * due to coalescing, so we have to examine fewer rectangles. */ pixman_set_extents (new_reg); GOOD (new_reg); return TRUE; } /* In time O(log n), locate the first box whose y2 is greater than y. * Return @end if no such box exists. */ static box_type_t * find_box_for_y (box_type_t *begin, box_type_t *end, int y) { box_type_t *mid; if (end == begin) return end; if (end - begin == 1) { if (begin->y2 > y) return begin; else return end; } mid = begin + (end - begin) / 2; if (mid->y2 > y) { /* If no box is found in [begin, mid], the function * will return @mid, which is then known to be the * correct answer. */ return find_box_for_y (begin, mid, y); } else { return find_box_for_y (mid, end, y); } } /* * rect_in(region, rect) * This routine takes a pointer to a region and a pointer to a box * and determines if the box is outside/inside/partly inside the region. * * The idea is to travel through the list of rectangles trying to cover the * passed box with them. Anytime a piece of the rectangle isn't covered * by a band of rectangles, part_out is set TRUE. Any time a rectangle in * the region covers part of the box, part_in is set TRUE. The process ends * when either the box has been completely covered (we reached a band that * doesn't overlap the box, part_in is TRUE and part_out is false), the * box has been partially covered (part_in == part_out == TRUE -- because of * the banding, the first time this is true we know the box is only * partially in the region) or is outside the region (we reached a band * that doesn't overlap the box at all and part_in is false) */ PIXMAN_EXPORT pixman_region_overlap_t PREFIX (_contains_rectangle) (const region_type_t * region, const box_type_t * prect) { box_type_t * pbox; box_type_t * pbox_end; int part_in, part_out; int numRects; int x, y; GOOD (region); numRects = PIXREGION_NUMRECTS (region); /* useful optimization */ if (!numRects || !EXTENTCHECK (®ion->extents, prect)) return(PIXMAN_REGION_OUT); if (numRects == 1) { /* We know that it must be PIXMAN_REGION_IN or PIXMAN_REGION_PART */ if (SUBSUMES (®ion->extents, prect)) return(PIXMAN_REGION_IN); else return(PIXMAN_REGION_PART); } part_out = FALSE; part_in = FALSE; /* (x,y) starts at upper left of rect, moving to the right and down */ x = prect->x1; y = prect->y1; /* can stop when both part_out and part_in are TRUE, or we reach prect->y2 */ for (pbox = PIXREGION_BOXPTR (region), pbox_end = pbox + numRects; pbox != pbox_end; pbox++) { /* getting up to speed or skipping remainder of band */ if (pbox->y2 <= y) { if ((pbox = find_box_for_y (pbox, pbox_end, y)) == pbox_end) break; } if (pbox->y1 > y) { part_out = TRUE; /* missed part of rectangle above */ if (part_in || (pbox->y1 >= prect->y2)) break; y = pbox->y1; /* x guaranteed to be == prect->x1 */ } if (pbox->x2 <= x) continue; /* not far enough over yet */ if (pbox->x1 > x) { part_out = TRUE; /* missed part of rectangle to left */ if (part_in) break; } if (pbox->x1 < prect->x2) { part_in = TRUE; /* definitely overlap */ if (part_out) break; } if (pbox->x2 >= prect->x2) { y = pbox->y2; /* finished with this band */ if (y >= prect->y2) break; x = prect->x1; /* reset x out to left again */ } else { /* * Because boxes in a band are maximal width, if the first box * to overlap the rectangle doesn't completely cover it in that * band, the rectangle must be partially out, since some of it * will be uncovered in that band. part_in will have been set true * by now... */ part_out = TRUE; break; } } if (part_in) { if (y < prect->y2) return PIXMAN_REGION_PART; else return PIXMAN_REGION_IN; } else { return PIXMAN_REGION_OUT; } } /* PREFIX(_translate) (region, x, y) * translates in place */ PIXMAN_EXPORT void PREFIX (_translate) (region_type_t *region, int x, int y) { overflow_int_t x1, x2, y1, y2; int nbox; box_type_t * pbox; GOOD (region); if (x == 0 && y == 0) return; region->extents.x1 = x1 = region->extents.x1 + x; region->extents.y1 = y1 = region->extents.y1 + y; region->extents.x2 = x2 = region->extents.x2 + x; region->extents.y2 = y2 = region->extents.y2 + y; if (((x1 - PIXMAN_REGION_MIN) | (y1 - PIXMAN_REGION_MIN) | (PIXMAN_REGION_MAX - x2) | (PIXMAN_REGION_MAX - y2)) >= 0) { if (region->data && (nbox = region->data->numRects)) { for (pbox = PIXREGION_BOXPTR (region); nbox--; pbox++) { pbox->x1 += x; pbox->y1 += y; pbox->x2 += x; pbox->y2 += y; } } return; } if (((x2 - PIXMAN_REGION_MIN) | (y2 - PIXMAN_REGION_MIN) | (PIXMAN_REGION_MAX - x1) | (PIXMAN_REGION_MAX - y1)) <= 0) { region->extents.x2 = region->extents.x1; region->extents.y2 = region->extents.y1; FREE_DATA (region); region->data = pixman_region_empty_data; return; } if (x1 < PIXMAN_REGION_MIN) region->extents.x1 = PIXMAN_REGION_MIN; else if (x2 > PIXMAN_REGION_MAX) region->extents.x2 = PIXMAN_REGION_MAX; if (y1 < PIXMAN_REGION_MIN) region->extents.y1 = PIXMAN_REGION_MIN; else if (y2 > PIXMAN_REGION_MAX) region->extents.y2 = PIXMAN_REGION_MAX; if (region->data && (nbox = region->data->numRects)) { box_type_t * pbox_out; for (pbox_out = pbox = PIXREGION_BOXPTR (region); nbox--; pbox++) { pbox_out->x1 = x1 = pbox->x1 + x; pbox_out->y1 = y1 = pbox->y1 + y; pbox_out->x2 = x2 = pbox->x2 + x; pbox_out->y2 = y2 = pbox->y2 + y; if (((x2 - PIXMAN_REGION_MIN) | (y2 - PIXMAN_REGION_MIN) | (PIXMAN_REGION_MAX - x1) | (PIXMAN_REGION_MAX - y1)) <= 0) { region->data->numRects--; continue; } if (x1 < PIXMAN_REGION_MIN) pbox_out->x1 = PIXMAN_REGION_MIN; else if (x2 > PIXMAN_REGION_MAX) pbox_out->x2 = PIXMAN_REGION_MAX; if (y1 < PIXMAN_REGION_MIN) pbox_out->y1 = PIXMAN_REGION_MIN; else if (y2 > PIXMAN_REGION_MAX) pbox_out->y2 = PIXMAN_REGION_MAX; pbox_out++; } if (pbox_out != pbox) { if (region->data->numRects == 1) { region->extents = *PIXREGION_BOXPTR (region); FREE_DATA (region); region->data = (region_data_type_t *)NULL; } else { pixman_set_extents (region); } } } GOOD (region); } PIXMAN_EXPORT void PREFIX (_reset) (region_type_t *region, const box_type_t *box) { GOOD (region); critical_if_fail (GOOD_RECT (box)); region->extents = *box; FREE_DATA (region); region->data = NULL; } PIXMAN_EXPORT void PREFIX (_clear) (region_type_t *region) { GOOD (region); FREE_DATA (region); region->extents = *pixman_region_empty_box; region->data = pixman_region_empty_data; } /* box is "return" value */ PIXMAN_EXPORT int PREFIX (_contains_point) (const region_type_t * region, int x, int y, box_type_t * box) { box_type_t *pbox, *pbox_end; int numRects; GOOD (region); numRects = PIXREGION_NUMRECTS (region); if (!numRects || !INBOX (®ion->extents, x, y)) return(FALSE); if (numRects == 1) { if (box) *box = region->extents; return(TRUE); } pbox = PIXREGION_BOXPTR (region); pbox_end = pbox + numRects; pbox = find_box_for_y (pbox, pbox_end, y); for (;pbox != pbox_end; pbox++) { if ((y < pbox->y1) || (x < pbox->x1)) break; /* missed it */ if (x >= pbox->x2) continue; /* not there yet */ if (box) *box = *pbox; return(TRUE); } return(FALSE); } PIXMAN_EXPORT int PREFIX (_empty) (const region_type_t * region) { GOOD (region); return(PIXREGION_NIL (region)); } PIXMAN_EXPORT int PREFIX (_not_empty) (const region_type_t * region) { GOOD (region); return(!PIXREGION_NIL (region)); } PIXMAN_EXPORT box_type_t * PREFIX (_extents) (const region_type_t * region) { GOOD (region); return(box_type_t *)(®ion->extents); } /* * Clip a list of scanlines to a region. The caller has allocated the * space. FSorted is non-zero if the scanline origins are in ascending order. * * returns the number of new, clipped scanlines. */ PIXMAN_EXPORT pixman_bool_t PREFIX (_selfcheck) (region_type_t *reg) { int i, numRects; if ((reg->extents.x1 > reg->extents.x2) || (reg->extents.y1 > reg->extents.y2)) { return FALSE; } numRects = PIXREGION_NUMRECTS (reg); if (!numRects) { return ((reg->extents.x1 == reg->extents.x2) && (reg->extents.y1 == reg->extents.y2) && (reg->data->size || (reg->data == pixman_region_empty_data))); } else if (numRects == 1) { return (!reg->data); } else { box_type_t * pbox_p, * pbox_n; box_type_t box; pbox_p = PIXREGION_RECTS (reg); box = *pbox_p; box.y2 = pbox_p[numRects - 1].y2; pbox_n = pbox_p + 1; for (i = numRects; --i > 0; pbox_p++, pbox_n++) { if ((pbox_n->x1 >= pbox_n->x2) || (pbox_n->y1 >= pbox_n->y2)) { return FALSE; } if (pbox_n->x1 < box.x1) box.x1 = pbox_n->x1; if (pbox_n->x2 > box.x2) box.x2 = pbox_n->x2; if ((pbox_n->y1 < pbox_p->y1) || ((pbox_n->y1 == pbox_p->y1) && ((pbox_n->x1 < pbox_p->x2) || (pbox_n->y2 != pbox_p->y2)))) { return FALSE; } } return ((box.x1 == reg->extents.x1) && (box.x2 == reg->extents.x2) && (box.y1 == reg->extents.y1) && (box.y2 == reg->extents.y2)); } } PIXMAN_EXPORT pixman_bool_t PREFIX (_init_rects) (region_type_t *region, const box_type_t *boxes, int count) { box_type_t *rects; int displacement; int i; /* if it's 1, then we just want to set the extents, so call * the existing method. */ if (count == 1) { PREFIX (_init_rect) (region, boxes[0].x1, boxes[0].y1, boxes[0].x2 - boxes[0].x1, boxes[0].y2 - boxes[0].y1); return TRUE; } PREFIX (_init) (region); /* if it's 0, don't call pixman_rect_alloc -- 0 rectangles is * a special case, and causing pixman_rect_alloc would cause * us to leak memory (because the 0-rect case should be the * static pixman_region_empty_data data). */ if (count == 0) return TRUE; if (!pixman_rect_alloc (region, count)) return FALSE; rects = PIXREGION_RECTS (region); /* Copy in the rects */ memcpy (rects, boxes, sizeof(box_type_t) * count); region->data->numRects = count; /* Eliminate empty and malformed rectangles */ displacement = 0; for (i = 0; i < count; ++i) { box_type_t *box = &rects[i]; if (box->x1 >= box->x2 || box->y1 >= box->y2) displacement++; else if (displacement) rects[i - displacement] = rects[i]; } region->data->numRects -= displacement; /* If eliminating empty rectangles caused there * to be only 0 or 1 rectangles, deal with that. */ if (region->data->numRects == 0) { FREE_DATA (region); PREFIX (_init) (region); return TRUE; } if (region->data->numRects == 1) { region->extents = rects[0]; FREE_DATA (region); region->data = NULL; GOOD (region); return TRUE; } /* Validate */ region->extents.x1 = region->extents.x2 = 0; return validate (region); } #define READ(_ptr) (*(_ptr)) static inline box_type_t * bitmap_addrect (region_type_t *reg, box_type_t *r, box_type_t **first_rect, int rx1, int ry1, int rx2, int ry2) { if ((rx1 < rx2) && (ry1 < ry2) && (!(reg->data->numRects && ((r-1)->y1 == ry1) && ((r-1)->y2 == ry2) && ((r-1)->x1 <= rx1) && ((r-1)->x2 >= rx2)))) { if (reg->data->numRects == reg->data->size) { if (!pixman_rect_alloc (reg, 1)) return NULL; *first_rect = PIXREGION_BOXPTR(reg); r = *first_rect + reg->data->numRects; } r->x1 = rx1; r->y1 = ry1; r->x2 = rx2; r->y2 = ry2; reg->data->numRects++; if (r->x1 < reg->extents.x1) reg->extents.x1 = r->x1; if (r->x2 > reg->extents.x2) reg->extents.x2 = r->x2; r++; } return r; } /* Convert bitmap clip mask into clipping region. * First, goes through each line and makes boxes by noting the transitions * from 0 to 1 and 1 to 0. * Then it coalesces the current line with the previous if they have boxes * at the same X coordinates. * Stride is in number of uint32_t per line. */ PIXMAN_EXPORT void PREFIX (_init_from_image) (region_type_t *region, pixman_image_t *image) { uint32_t mask0 = 0xffffffff & ~SCREEN_SHIFT_RIGHT(0xffffffff, 1); box_type_t *first_rect, *rects, *prect_line_start; box_type_t *old_rect, *new_rect; uint32_t *pw, w, *pw_line, *pw_line_end; int irect_prev_start, irect_line_start; int h, base, rx1 = 0, crects; int ib; pixman_bool_t in_box, same; int width, height, stride; PREFIX(_init) (region); critical_if_fail (region->data); return_if_fail (image->type == BITS); return_if_fail (image->bits.format == PIXMAN_a1); pw_line = pixman_image_get_data (image); width = pixman_image_get_width (image); height = pixman_image_get_height (image); stride = pixman_image_get_stride (image) / 4; first_rect = PIXREGION_BOXPTR(region); rects = first_rect; region->extents.x1 = width - 1; region->extents.x2 = 0; irect_prev_start = -1; for (h = 0; h < height; h++) { pw = pw_line; pw_line += stride; irect_line_start = rects - first_rect; /* If the Screen left most bit of the word is set, we're starting in * a box */ if (READ(pw) & mask0) { in_box = TRUE; rx1 = 0; } else { in_box = FALSE; } /* Process all words which are fully in the pixmap */ pw_line_end = pw + (width >> 5); for (base = 0; pw < pw_line_end; base += 32) { w = READ(pw++); if (in_box) { if (!~w) continue; } else { if (!w) continue; } for (ib = 0; ib < 32; ib++) { /* If the Screen left most bit of the word is set, we're * starting a box */ if (w & mask0) { if (!in_box) { rx1 = base + ib; /* start new box */ in_box = TRUE; } } else { if (in_box) { /* end box */ rects = bitmap_addrect (region, rects, &first_rect, rx1, h, base + ib, h + 1); if (rects == NULL) goto error; in_box = FALSE; } } /* Shift the word VISUALLY left one. */ w = SCREEN_SHIFT_LEFT(w, 1); } } if (width & 31) { /* Process final partial word on line */ w = READ(pw++); for (ib = 0; ib < (width & 31); ib++) { /* If the Screen left most bit of the word is set, we're * starting a box */ if (w & mask0) { if (!in_box) { rx1 = base + ib; /* start new box */ in_box = TRUE; } } else { if (in_box) { /* end box */ rects = bitmap_addrect(region, rects, &first_rect, rx1, h, base + ib, h + 1); if (rects == NULL) goto error; in_box = FALSE; } } /* Shift the word VISUALLY left one. */ w = SCREEN_SHIFT_LEFT(w, 1); } } /* If scanline ended with last bit set, end the box */ if (in_box) { rects = bitmap_addrect(region, rects, &first_rect, rx1, h, base + (width & 31), h + 1); if (rects == NULL) goto error; } /* if all rectangles on this line have the same x-coords as * those on the previous line, then add 1 to all the previous y2s and * throw away all the rectangles from this line */ same = FALSE; if (irect_prev_start != -1) { crects = irect_line_start - irect_prev_start; if (crects != 0 && crects == ((rects - first_rect) - irect_line_start)) { old_rect = first_rect + irect_prev_start; new_rect = prect_line_start = first_rect + irect_line_start; same = TRUE; while (old_rect < prect_line_start) { if ((old_rect->x1 != new_rect->x1) || (old_rect->x2 != new_rect->x2)) { same = FALSE; break; } old_rect++; new_rect++; } if (same) { old_rect = first_rect + irect_prev_start; while (old_rect < prect_line_start) { old_rect->y2 += 1; old_rect++; } rects -= crects; region->data->numRects -= crects; } } } if(!same) irect_prev_start = irect_line_start; } if (!region->data->numRects) { region->extents.x1 = region->extents.x2 = 0; } else { region->extents.y1 = PIXREGION_BOXPTR(region)->y1; region->extents.y2 = PIXREGION_END(region)->y2; if (region->data->numRects == 1) { free (region->data); region->data = NULL; } } error: return; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-region16.c0000664000175000017500000000446214712446423017510 0ustar00mattst88mattst88/* * Copyright Âİ 2008 Red Hat, Inc. * * Permission to use, copy, modify, distribute, and sell this software * and its documentation for any purpose is hereby granted without * fee, provided that the above copyright notice appear in all copies * and that both that copyright notice and this permission notice * appear in supporting documentation, and that the name of * Red Hat, Inc. not be used in advertising or publicity pertaining to * distribution of the software without specific, written prior * permission. Red Hat, Inc. makes no representations about the * suitability of this software for any purpose. It is provided "as * is" without express or implied warranty. * * RED HAT, INC. DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS, IN NO EVENT SHALL RED HAT, INC. BE LIABLE FOR ANY SPECIAL, * INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER * RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR * IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * * Author: Soren Sandmann */ #ifdef HAVE_CONFIG_H #include #endif #undef PIXMAN_DISABLE_DEPRECATED #include "pixman-private.h" #include typedef pixman_box16_t box_type_t; typedef pixman_region16_data_t region_data_type_t; typedef pixman_region16_t region_type_t; typedef int32_t overflow_int_t; typedef struct { int x, y; } point_type_t; #define PREFIX(x) pixman_region##x #define PIXMAN_REGION_MAX INT16_MAX #define PIXMAN_REGION_MIN INT16_MIN #include "pixman-region.c" /* This function exists only to make it possible to preserve the X ABI - * it should go away at first opportunity. * * The problem is that the X ABI exports the three structs and has used * them through macros. So the X server calls this function with * the addresses of those structs which makes the existing code continue to * work. */ PIXMAN_EXPORT void pixman_region_set_static_pointers (pixman_box16_t *empty_box, pixman_region16_data_t *empty_data, pixman_region16_data_t *broken_data) { pixman_region_empty_box = empty_box; pixman_region_empty_data = empty_data; pixman_broken_data = broken_data; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-region32.c0000664000175000017500000000322214712446423017477 0ustar00mattst88mattst88/* * Copyright Âİ 2008 Red Hat, Inc. * * Permission to use, copy, modify, distribute, and sell this software * and its documentation for any purpose is hereby granted without * fee, provided that the above copyright notice appear in all copies * and that both that copyright notice and this permission notice * appear in supporting documentation, and that the name of * Red Hat, Inc. not be used in advertising or publicity pertaining to * distribution of the software without specific, written prior * permission. Red Hat, Inc. makes no representations about the * suitability of this software for any purpose. It is provided "as * is" without express or implied warranty. * * RED HAT, INC. DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS, IN NO EVENT SHALL RED HAT, INC. BE LIABLE FOR ANY SPECIAL, * INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER * RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR * IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * * Author: Soren Sandmann */ #ifdef HAVE_CONFIG_H #include #endif #include "pixman-private.h" #include typedef pixman_box32_t box_type_t; typedef pixman_region32_data_t region_data_type_t; typedef pixman_region32_t region_type_t; typedef int64_t overflow_int_t; typedef struct { int x, y; } point_type_t; #define PREFIX(x) pixman_region32##x #define PIXMAN_REGION_MAX INT32_MAX #define PIXMAN_REGION_MIN INT32_MIN #include "pixman-region.c" ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-riscv.c0000664000175000017500000000412614712446423017201 0ustar00mattst88mattst88/* * Copyright Âİ 2024 Filip Wasil, Samsung Electronics * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of Keith Packard not be used in * advertising or publicity pertaining to distribution of the software without * specific, written prior permission. Keith Packard makes no * representations about the suitability of this software for any purpose. It * is provided "as is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS * SOFTWARE. */ #ifdef HAVE_CONFIG_H #include #endif #include "pixman-private.h" #ifdef USE_RVV #if defined(__linux__) #include #include #endif typedef enum { RVV = (1 << 0), } riscv_cpu_features_t; static riscv_cpu_features_t detect_cpu_features (void) { riscv_cpu_features_t features = 0; #if defined(__linux__) if (getauxval (AT_HWCAP) & COMPAT_HWCAP_ISA_V) { features |= RVV; } #else #pragma message( \ "warning: RISC-V Vector Extension runtime check not implemented for this platform. RVV will be disabled") #endif return features; } #endif pixman_implementation_t * _pixman_riscv_get_implementations (pixman_implementation_t *imp) { #ifdef USE_RVV if (!_pixman_disabled ("rvv") && (detect_cpu_features () & RVV)) { imp = _pixman_implementation_create_rvv (imp); } #endif return imp; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-rvv.c0000664000175000017500000011272214712446423016672 0ustar00mattst88mattst88/* * Copyright Âİ 2000 Keith Packard, member of The XFree86 Project, Inc. * 2005 Lars Knoll & Zack Rusin, Trolltech * 2024 Filip Wasil, Samsung Electronics * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of Keith Packard not be used in * advertising or publicity pertaining to distribution of the software without * specific, written prior permission. Keith Packard makes no * representations about the suitability of this software for any purpose. It * is provided "as is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS * SOFTWARE. */ #ifdef HAVE_CONFIG_H #include #endif #include "pixman-combine-float.h" #include "pixman-private.h" #include #include #include #include #include #include #include #include #include /* * Screen * * ad * as * B(d/ad, s/as) * = ad * as * (d/ad + s/as - s/as * d/ad) * = ad * s + as * d - s * d */ static force_inline vfloat32m1_t rvv_blend_screen (const vfloat32m1_t sa, const vfloat32m1_t s, const vfloat32m1_t da, const vfloat32m1_t d, size_t vl) { vfloat32m1_t t0, t1, t2; t0 = __riscv_vfmul_vv_f32m1 (s, da, vl); t1 = __riscv_vfmul_vv_f32m1 (d, sa, vl); t2 = __riscv_vfmul_vv_f32m1 (s, d, vl); return __riscv_vfsub_vv_f32m1 (__riscv_vfadd_vv_f32m1 (t0, t1, vl), t2, vl); } /* * Multiply * * ad * as * B(d / ad, s / as) * = ad * as * d/ad * s/as * = d * s * */ static force_inline vfloat32m1_t rvv_blend_multiply (const vfloat32m1_t sa, const vfloat32m1_t s, const vfloat32m1_t da, const vfloat32m1_t d, size_t vl) { return __riscv_vfmul_vv_f32m1 (s, d, vl); } /* * Overlay * * ad * as * B(d/ad, s/as) * = ad * as * Hardlight (s, d) * = if (d / ad < 0.5) * as * ad * Multiply (s/as, 2 * d/ad) * else * as * ad * Screen (s/as, 2 * d / ad - 1) * = if (d < 0.5 * ad) * as * ad * s/as * 2 * d /ad * else * as * ad * (s/as + 2 * d / ad - 1 - s / as * (2 * d / ad - 1)) * = if (2 * d < ad) * 2 * s * d * else * ad * s + 2 * as * d - as * ad - ad * s * (2 * d / ad - 1) * = if (2 * d < ad) * 2 * s * d * else * as * ad - 2 * (ad - d) * (as - s) */ static force_inline vfloat32m1_t rvv_blend_overlay (const vfloat32m1_t sa, const vfloat32m1_t s, const vfloat32m1_t da, const vfloat32m1_t d, size_t vl) { vfloat32m1_t t0, t1, t2, t3, t4, f0, f1, f2; vbool32_t vb; t0 = __riscv_vfadd_vv_f32m1 (d, d, vl); t1 = __riscv_vfmul_vv_f32m1 (__riscv_vfadd_vv_f32m1 (s, s, vl), d, vl); vb = __riscv_vmflt_vv_f32m1_b32 (t0, da, vl); t2 = __riscv_vfmul_vv_f32m1 (sa, da, vl); f2 = __riscv_vfsub_vv_f32m1 (da, d, vl); t3 = __riscv_vfmul_vf_f32m1 (f2, 2.0f, vl); t4 = __riscv_vfsub_vv_f32m1 (sa, s, vl); f0 = __riscv_vfmul_vv_f32m1 (t3, t4, vl); f1 = __riscv_vfsub_vv_f32m1 (t2, f0, vl); return __riscv_vmerge_vvm_f32m1 (f1, t1, vb, vl); } /* * Darken * * ad * as * B(d/ad, s/as) * = ad * as * MIN(d/ad, s/as) * = MIN (as * d, ad * s) */ static force_inline vfloat32m1_t rvv_blend_darken (const vfloat32m1_t sa, const vfloat32m1_t s, const vfloat32m1_t da, const vfloat32m1_t d, size_t vl) { vfloat32m1_t ss, dd; vbool32_t vb; ss = __riscv_vfmul_vv_f32m1 (da, s, vl); dd = __riscv_vfmul_vv_f32m1 (sa, d, vl); vb = __riscv_vmfgt_vv_f32m1_b32 (ss, dd, vl); return __riscv_vmerge_vvm_f32m1 (ss, dd, vb, vl); } /* * Lighten * * ad * as * B(d/ad, s/as) * = ad * as * MAX(d/ad, s/as) * = MAX (as * d, ad * s) */ static force_inline vfloat32m1_t rvv_blend_lighten (const vfloat32m1_t sa, const vfloat32m1_t s, const vfloat32m1_t da, const vfloat32m1_t d, size_t vl) { vfloat32m1_t ss, dd; vbool32_t vb; ss = __riscv_vfmul_vv_f32m1 (s, da, vl); dd = __riscv_vfmul_vv_f32m1 (d, sa, vl); vb = __riscv_vmfgt_vv_f32m1_b32 (ss, dd, vl); return __riscv_vmerge_vvm_f32m1 (dd, ss, vb, vl); } /* * Color dodge * * ad * as * B(d/ad, s/as) * = if d/ad = 0 * ad * as * 0 * else if (d/ad >= (1 - s/as) * ad * as * 1 * else * ad * as * ((d/ad) / (1 - s/as)) * = if d = 0 * 0 * elif as * d >= ad * (as - s) * ad * as * else * as * (as * d / (as - s)) * */ static force_inline vfloat32m1_t rvv_blend_color_dodge (const vfloat32m1_t sa, const vfloat32m1_t s, const vfloat32m1_t da, const vfloat32m1_t d, size_t vl) { vfloat32m1_t t0, t1, t2, t3, t4; vbool32_t is_d_zero, vb, is_t0_non_zero; is_d_zero = __riscv_vmfeq_vf_f32m1_b32 (d, 0.0f, vl); t0 = __riscv_vfsub_vv_f32m1 (sa, s, vl); // sa - s t1 = __riscv_vfmul_vv_f32m1 (sa, d, vl); // d * sa t2 = __riscv_vfmul_vv_f32m1 (sa, da, vl); // sa * da t3 = __riscv_vfsub_vv_f32m1 (t2, __riscv_vfmul_vv_f32m1 (s, da, vl), vl); // sa * da - s * da is_t0_non_zero = __riscv_vmfne_vf_f32m1_b32 (t0, 0.0f, vl); vb = __riscv_vmflt_vv_f32m1_b32 (t3, t1, vl); t4 = __riscv_vfdiv_vv_f32m1 (__riscv_vfmul_vv_f32m1 (sa, t1, vl), t0, vl); // sa * sa * d / (sa - s); return __riscv_vfmerge_vfm_f32m1 ( __riscv_vmerge_vvm_f32m1 ( __riscv_vmerge_vvm_f32m1 (t2, t4, is_t0_non_zero, vl), t2, vb, vl), 0.0f, is_d_zero, vl); } /* * Color burn * * We modify the first clause "if d = 1" to "if d >= 1" since with * premultiplied colors d > 1 can actually happen. * * ad * as * B(d/ad, s/as) * = if d/ad >= 1 * ad * as * 1 * elif (1 - d/ad) >= s/as * ad * as * 0 * else * ad * as * (1 - ((1 - d/ad) / (s/as))) * = if d >= ad * ad * as * elif as * ad - as * d >= ad * s * 0 * else * ad * as - as * as * (ad - d) / s */ static force_inline vfloat32m1_t rvv_blend_color_burn (const vfloat32m1_t sa, const vfloat32m1_t s, const vfloat32m1_t da, const vfloat32m1_t d, size_t vl) { vfloat32m1_t t0, t1, t2, t3, t4, t5, t6, t7; vbool32_t is_d_ge_da, is_s_zero, vb; is_d_ge_da = __riscv_vmfge_vv_f32m1_b32 (d, da, vl); is_s_zero = __riscv_vmfeq_vf_f32m1_b32 (s, 0.0f, vl); t0 = __riscv_vfmul_vv_f32m1 (sa, __riscv_vfsub_vv_f32m1 (da, d, vl), vl); // sa * (da - d) t1 = __riscv_vfsub_vv_f32m1 (da, __riscv_vfdiv_vv_f32m1 (t0, s, vl), vl); // da - sa * (da - d) / s) t2 = __riscv_vfmul_vv_f32m1 (sa, da, vl); // sa * da t3 = __riscv_vfmul_vv_f32m1 (sa, t1, vl); // sa * (da - sa * (da - d) / s) t4 = __riscv_vfmul_vv_f32m1 (s, da, vl); // s * da vb = __riscv_vmfge_vf_f32m1_b32 (__riscv_vfsub_vv_f32m1 (t0, t4, vl), 0.0f, vl); // if (sa * (da - d) - s * da >= 0.0f) t6 = __riscv_vfmerge_vfm_f32m1 (t3, 0.0f, is_s_zero, vl); t5 = __riscv_vfmerge_vfm_f32m1 (t6, 0.0f, vb, vl); t7 = __riscv_vmerge_vvm_f32m1 (t5, t2, is_d_ge_da, vl); return t7; } /* * Hard light * * ad * as * B(d/ad, s/as) * = if (s/as <= 0.5) * ad * as * Multiply (d/ad, 2 * s/as) * else * ad * as * Screen (d/ad, 2 * s/as - 1) * = if 2 * s <= as * ad * as * d/ad * 2 * s / as * else * ad * as * (d/ad + (2 * s/as - 1) + d/ad * (2 * s/as - 1)) * = if 2 * s <= as * 2 * s * d * else * as * ad - 2 * (ad - d) * (as - s) */ static force_inline vfloat32m1_t rvv_blend_hard_light (const vfloat32m1_t sa, const vfloat32m1_t s, const vfloat32m1_t da, const vfloat32m1_t d, size_t vl) { vfloat32m1_t t0, t1, t2, t3, t4; vbool32_t vb; t0 = __riscv_vfadd_vv_f32m1 (s, s, vl); t1 = __riscv_vfmul_vv_f32m1 (__riscv_vfadd_vv_f32m1 (s, s, vl), d, vl); vb = __riscv_vmfgt_vv_f32m1_b32 (t0, sa, vl); t2 = __riscv_vfmul_vv_f32m1 (sa, da, vl); t3 = __riscv_vfmul_vf_f32m1 (__riscv_vfsub_vv_f32m1 (da, d, vl), 2.0f, vl); t4 = __riscv_vfsub_vv_f32m1 (sa, s, vl); return __riscv_vmerge_vvm_f32m1 ( t1, __riscv_vfsub_vv_f32m1 (t2, __riscv_vfmul_vv_f32m1 (t3, t4, vl), vl), vb, vl); } /* * Soft light * * ad * as * B(d/ad, s/as) * = if (s/as <= 0.5) * ad * as * (d/ad - (1 - 2 * s/as) * d/ad * (1 - d/ad)) * else if (d/ad <= 0.25) * ad * as * (d/ad + (2 * s/as - 1) * ((((16 * d/ad - 12) * d/ad + 4) * d/ad) - d/ad)) * else * ad * as * (d/ad + (2 * s/as - 1) * sqrt (d/ad)) * = if (2 * s <= as) * d * as - d * (ad - d) * (as - 2 * s) / ad; * else if (4 * d <= ad) * (2 * s - as) * d * ((16 * d / ad - 12) * d / ad + 3); * else * d * as + (sqrt (d * ad) - d) * (2 * s - as); */ static force_inline vfloat32m1_t rvv_blend_soft_light (const vfloat32m1_t sa, const vfloat32m1_t s, const vfloat32m1_t da, const vfloat32m1_t d, size_t vl) { vfloat32m1_t t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13; vbool32_t is_sa_lt_2s, is_da_ls_4d, is_da_non_zero; is_da_non_zero = __riscv_vmfne_vf_f32m1_b32 (da, 0.0f, vl); t0 = __riscv_vfadd_vv_f32m1 (s, s, vl); // 2 * s is_sa_lt_2s = __riscv_vmflt_vv_f32m1_b32 (sa, t0, vl); t1 = __riscv_vfmul_vv_f32m1 (sa, d, vl); // d * sa t2 = __riscv_vfsub_vv_f32m1 (sa, t0, vl); // (sa - 2*s) t3 = __riscv_vfmul_vv_f32m1 (d, t2, vl); // (sa - 2*s) * d t7 = __riscv_vfdiv_vv_f32m1 (__riscv_vfmul_vf_f32m1 (d, 16.0f, vl), da, vl); // 16 * d / da t8 = __riscv_vfmul_vv_f32m1 (d, __riscv_vfsub_vf_f32m1 (t7, 12.0f, vl), vl); // (16 * d / da - 12) * d t9 = __riscv_vfadd_vf_f32m1 (__riscv_vfdiv_vv_f32m1 (t8, da, vl), 3.0f, vl); // (16 * d / da - 12) * d / da + 3) t4 = __riscv_vfmul_vv_f32m1 ( t3, t9, vl); // (sa - 2*s) * d * ((16 * d / da - 12) * d / da + 3) t5 = __riscv_vfsub_vv_f32m1 ( t1, t4, vl); // d * sa - (sa - 2*s) * d * ((16 * d / da - 12) * d / da + 3) t6 = __riscv_vfadd_vv_f32m1 (__riscv_vfadd_vv_f32m1 (d, d, vl), __riscv_vfadd_vv_f32m1 (d, d, vl), vl); is_da_ls_4d = __riscv_vmflt_vv_f32m1_b32 (da, t6, vl); t10 = __riscv_vfsub_vv_f32m1 ( __riscv_vfsqrt_v_f32m1 (__riscv_vfmul_vv_f32m1 (d, da, vl), vl), d, vl); // sqrtf (d * da) - d t11 = __riscv_vfmul_vv_f32m1 (t2, t10, vl); // (sqrtf (d * da) - d) * (sa - 2 * s) t12 = __riscv_vfsub_vv_f32m1 ( t1, t11, vl); // d * sa - (sqrtf (d * da) - d) * (sa - 2 * s) // d * sa - d * (da - d) * (sa - 2 * s) / da t13 = __riscv_vfsub_vv_f32m1 ( t1, __riscv_vfdiv_vv_f32m1 ( __riscv_vfmul_vv_f32m1 (__riscv_vfmul_vv_f32m1 (d, t2, vl), __riscv_vfsub_vv_f32m1 (da, d, vl), vl), da, vl), vl); return __riscv_vmerge_vvm_f32m1 ( t1, // if (!FLOAT_IS_ZERO (da)) __riscv_vmerge_vvm_f32m1 ( t13, // if (4 * d > da) __riscv_vmerge_vvm_f32m1 (t5, t12, is_da_ls_4d, vl), is_sa_lt_2s, vl), is_da_non_zero, vl); } /* * Difference * * ad * as * B(s/as, d/ad) * = ad * as * abs (s/as - d/ad) * = if (s/as <= d/ad) * ad * as * (d/ad - s/as) * else * ad * as * (s/as - d/ad) * = if (ad * s <= as * d) * as * d - ad * s * else * ad * s - as * d */ static force_inline vfloat32m1_t rvv_blend_difference (const vfloat32m1_t sa, const vfloat32m1_t s, const vfloat32m1_t da, const vfloat32m1_t d, size_t vl) { vfloat32m1_t dsa, sda; vbool32_t vb; dsa = __riscv_vfmul_vv_f32m1 (d, sa, vl); sda = __riscv_vfmul_vv_f32m1 (s, da, vl); vb = __riscv_vmflt_vv_f32m1_b32 (sda, dsa, vl); return __riscv_vmerge_vvm_f32m1 (__riscv_vfsub_vv_f32m1 (sda, dsa, vl), __riscv_vfsub_vv_f32m1 (dsa, sda, vl), vb, vl); } /* * Exclusion * * ad * as * B(s/as, d/ad) * = ad * as * (d/ad + s/as - 2 * d/ad * s/as) * = as * d + ad * s - 2 * s * d */ static force_inline vfloat32m1_t rvv_blend_exclusion (const vfloat32m1_t sa, const vfloat32m1_t s, const vfloat32m1_t da, const vfloat32m1_t d, size_t vl) { vfloat32m1_t t0, t1; t0 = __riscv_vfmul_vv_f32m1 (__riscv_vfadd_vv_f32m1 (d, d, vl), s, vl); t1 = __riscv_vfadd_vv_f32m1 (__riscv_vfmul_vv_f32m1 (s, da, vl), __riscv_vfmul_vv_f32m1 (d, sa, vl), vl); return __riscv_vfsub_vv_f32m1 (t1, t0, vl); } typedef vfloat32m1_t (*rvv_combine_channel_float_t) (const vfloat32m1_t sa, const vfloat32m1_t s, const vfloat32m1_t da, const vfloat32m1_t d, size_t vl); static force_inline void rvv_combine_inner (pixman_bool_t component, float *dest, const float *src, const float *mask, int n_pixels, rvv_combine_channel_float_t combine_a, rvv_combine_channel_float_t combine_c) { float *__restrict__ pd = dest; const float *__restrict__ ps = src; const float *__restrict__ pm = mask; const int component_count = 4; int vn = component_count * n_pixels; int vl = 0; int vl_step = 0; const ptrdiff_t stride = component_count * sizeof (float); vfloat32m1x4_t sa_sr_sg_sb, da_dr_dg_db, ma_mr_mg_mb; vfloat32m1_t da2, dr2, dg2, db2, ma2, mr2, mg2, mb2, sr2, sg2, sb2, sa2; if (n_pixels == 0) { return; } if (!mask) { for (; vn > 0; vn -= vl_step, pd += vl_step, ps += vl_step) { vl = __riscv_vsetvl_e32m1 (vn / component_count); sa_sr_sg_sb = __riscv_vlseg4e32_v_f32m1x4 (ps, vl); da_dr_dg_db = __riscv_vlseg4e32_v_f32m1x4 (pd, vl); da2 = combine_a (__riscv_vget_v_f32m1x4_f32m1 (sa_sr_sg_sb, 0), __riscv_vget_v_f32m1x4_f32m1 (sa_sr_sg_sb, 0), __riscv_vget_v_f32m1x4_f32m1 (da_dr_dg_db, 0), __riscv_vget_v_f32m1x4_f32m1 (da_dr_dg_db, 0), vl); dr2 = combine_c (__riscv_vget_v_f32m1x4_f32m1 (sa_sr_sg_sb, 0), __riscv_vget_v_f32m1x4_f32m1 (sa_sr_sg_sb, 1), __riscv_vget_v_f32m1x4_f32m1 (da_dr_dg_db, 0), __riscv_vget_v_f32m1x4_f32m1 (da_dr_dg_db, 1), vl); dg2 = combine_c (__riscv_vget_v_f32m1x4_f32m1 (sa_sr_sg_sb, 0), __riscv_vget_v_f32m1x4_f32m1 (sa_sr_sg_sb, 2), __riscv_vget_v_f32m1x4_f32m1 (da_dr_dg_db, 0), __riscv_vget_v_f32m1x4_f32m1 (da_dr_dg_db, 2), vl); db2 = combine_c (__riscv_vget_v_f32m1x4_f32m1 (sa_sr_sg_sb, 0), __riscv_vget_v_f32m1x4_f32m1 (sa_sr_sg_sb, 3), __riscv_vget_v_f32m1x4_f32m1 (da_dr_dg_db, 0), __riscv_vget_v_f32m1x4_f32m1 (da_dr_dg_db, 3), vl); __riscv_vsseg4e32_v_f32m1x4 ( pd, __riscv_vcreate_v_f32m1x4 (da2, dr2, dg2, db2), vl); vl_step = vl * component_count; } } else { if (component) { for (; vn > 0; vn -= vl_step, pd += vl_step, ps += vl_step, pm += vl_step) { vl = __riscv_vsetvl_e32m1 (vn / component_count); sa_sr_sg_sb = __riscv_vlseg4e32_v_f32m1x4 (ps, vl); da_dr_dg_db = __riscv_vlseg4e32_v_f32m1x4 (pd, vl); ma_mr_mg_mb = __riscv_vlseg4e32_v_f32m1x4 (pm, vl); sr2 = __riscv_vfmul_vv_f32m1 ( __riscv_vget_v_f32m1x4_f32m1 (sa_sr_sg_sb, 1), __riscv_vget_v_f32m1x4_f32m1 (ma_mr_mg_mb, 1), vl); sg2 = __riscv_vfmul_vv_f32m1 ( __riscv_vget_v_f32m1x4_f32m1 (sa_sr_sg_sb, 2), __riscv_vget_v_f32m1x4_f32m1 (ma_mr_mg_mb, 2), vl); sb2 = __riscv_vfmul_vv_f32m1 ( __riscv_vget_v_f32m1x4_f32m1 (sa_sr_sg_sb, 3), __riscv_vget_v_f32m1x4_f32m1 (ma_mr_mg_mb, 3), vl); ma2 = __riscv_vfmul_vv_f32m1 ( __riscv_vget_v_f32m1x4_f32m1 (ma_mr_mg_mb, 0), __riscv_vget_v_f32m1x4_f32m1 (sa_sr_sg_sb, 0), vl); mr2 = __riscv_vfmul_vv_f32m1 ( __riscv_vget_v_f32m1x4_f32m1 (ma_mr_mg_mb, 1), __riscv_vget_v_f32m1x4_f32m1 (sa_sr_sg_sb, 0), vl); mg2 = __riscv_vfmul_vv_f32m1 ( __riscv_vget_v_f32m1x4_f32m1 (ma_mr_mg_mb, 2), __riscv_vget_v_f32m1x4_f32m1 (sa_sr_sg_sb, 0), vl); mb2 = __riscv_vfmul_vv_f32m1 ( __riscv_vget_v_f32m1x4_f32m1 (ma_mr_mg_mb, 3), __riscv_vget_v_f32m1x4_f32m1 (sa_sr_sg_sb, 0), vl); da2 = combine_a ( ma2, ma2, __riscv_vget_v_f32m1x4_f32m1 (da_dr_dg_db, 0), __riscv_vget_v_f32m1x4_f32m1 (da_dr_dg_db, 0), vl); dr2 = combine_c ( mr2, sr2, __riscv_vget_v_f32m1x4_f32m1 (da_dr_dg_db, 0), __riscv_vget_v_f32m1x4_f32m1 (da_dr_dg_db, 1), vl); dg2 = combine_c ( mg2, sg2, __riscv_vget_v_f32m1x4_f32m1 (da_dr_dg_db, 0), __riscv_vget_v_f32m1x4_f32m1 (da_dr_dg_db, 2), vl); db2 = combine_c ( mb2, sb2, __riscv_vget_v_f32m1x4_f32m1 (da_dr_dg_db, 0), __riscv_vget_v_f32m1x4_f32m1 (da_dr_dg_db, 3), vl); __riscv_vsseg4e32_v_f32m1x4 ( pd, __riscv_vcreate_v_f32m1x4 (da2, dr2, dg2, db2), vl); vl_step = vl * component_count; } } else { for (; vn > 0; vn -= vl_step, pd += vl_step, ps += vl_step, pm += vl_step) { vl = __riscv_vsetvl_e32m1 (vn / component_count); sa_sr_sg_sb = __riscv_vlseg4e32_v_f32m1x4 (ps, vl); da_dr_dg_db = __riscv_vlseg4e32_v_f32m1x4 (pd, vl); ma2 = __riscv_vlse32_v_f32m1 (pm, stride, vl); sa2 = __riscv_vfmul_vv_f32m1 ( ma2, __riscv_vget_v_f32m1x4_f32m1 (sa_sr_sg_sb, 0), vl); sr2 = __riscv_vfmul_vv_f32m1 ( ma2, __riscv_vget_v_f32m1x4_f32m1 (sa_sr_sg_sb, 1), vl); sg2 = __riscv_vfmul_vv_f32m1 ( ma2, __riscv_vget_v_f32m1x4_f32m1 (sa_sr_sg_sb, 2), vl); sb2 = __riscv_vfmul_vv_f32m1 ( ma2, __riscv_vget_v_f32m1x4_f32m1 (sa_sr_sg_sb, 3), vl); ma2 = sa2; dr2 = combine_c ( ma2, sr2, __riscv_vget_v_f32m1x4_f32m1 (da_dr_dg_db, 0), __riscv_vget_v_f32m1x4_f32m1 (da_dr_dg_db, 1), vl); dg2 = combine_c ( ma2, sg2, __riscv_vget_v_f32m1x4_f32m1 (da_dr_dg_db, 0), __riscv_vget_v_f32m1x4_f32m1 (da_dr_dg_db, 2), vl); db2 = combine_c ( ma2, sb2, __riscv_vget_v_f32m1x4_f32m1 (da_dr_dg_db, 0), __riscv_vget_v_f32m1x4_f32m1 (da_dr_dg_db, 3), vl); da2 = combine_a ( ma2, sa2, __riscv_vget_v_f32m1x4_f32m1 (da_dr_dg_db, 0), __riscv_vget_v_f32m1x4_f32m1 (da_dr_dg_db, 0), vl); __riscv_vsseg4e32_v_f32m1x4 ( pd, __riscv_vcreate_v_f32m1x4 (da2, dr2, dg2, db2), vl); vl_step = vl * component_count; } } } } #define RVV_MAKE_COMBINER(name, component, combine_a, combine_c) \ static void rvv_combine_##name##_float ( \ pixman_implementation_t *imp, pixman_op_t op, float *dest, \ const float *src, const float *mask, int n_pixels) \ { \ rvv_combine_inner (component, dest, src, mask, n_pixels, combine_a, \ combine_c); \ } #define RVV_MAKE_COMBINERS(name, combine_a, combine_c) \ RVV_MAKE_COMBINER (name##_ca, TRUE, combine_a, combine_c) \ RVV_MAKE_COMBINER (name##_u, FALSE, combine_a, combine_c) static force_inline vfloat32m1_t rvv_get_factor (combine_factor_t factor, vfloat32m1_t sa, vfloat32m1_t da, size_t vl) { vfloat32m1_t vone = __riscv_vfmv_v_f_f32m1 (1.0f, vl); vfloat32m1_t vzero = __riscv_vfmv_v_f_f32m1 (0.0f, vl); switch (factor) { case ZERO: return vzero; case ONE: return vone; case SRC_ALPHA: return sa; case DEST_ALPHA: return da; case INV_SA: return __riscv_vfsub_vv_f32m1 (vone, sa, vl); case INV_DA: return __riscv_vfsub_vv_f32m1 (vone, da, vl); case SA_OVER_DA: return __riscv_vmerge_vvm_f32m1 ( vone, __riscv_vfmin_vv_f32m1 ( vone, __riscv_vfmax_vv_f32m1 ( vzero, __riscv_vfdiv_vv_f32m1 (sa, da, vl), vl), vl), __riscv_vmfne_vf_f32m1_b32 (da, 0.0f, vl), vl); case DA_OVER_SA: return __riscv_vmerge_vvm_f32m1 ( __riscv_vfmin_vv_f32m1 ( vone, __riscv_vfmax_vv_f32m1 ( vzero, __riscv_vfdiv_vv_f32m1 (da, sa, vl), vl), vl), vone, __riscv_vmfeq_vf_f32m1_b32 (sa, 0.0f, vl), vl); case INV_SA_OVER_DA: { vfloat32m1_t t0 = __riscv_vfdiv_vv_f32m1 ( __riscv_vfsub_vv_f32m1 (vone, sa, vl), da, vl); return __riscv_vmerge_vvm_f32m1 ( vone, __riscv_vfmin_vv_f32m1 ( vone, __riscv_vfmax_vv_f32m1 (vzero, t0, vl), vl), __riscv_vmfne_vf_f32m1_b32 (da, 0.0f, vl), vl); } case INV_DA_OVER_SA: { vfloat32m1_t t0 = __riscv_vfdiv_vv_f32m1 ( __riscv_vfsub_vv_f32m1 (vone, da, vl), sa, vl); return __riscv_vmerge_vvm_f32m1 ( vone, __riscv_vfmin_vv_f32m1 ( vone, __riscv_vfmax_vv_f32m1 (vzero, t0, vl), vl), __riscv_vmfne_vf_f32m1_b32 (sa, 0.0f, vl), vl); } case ONE_MINUS_SA_OVER_DA: { vfloat32m1_t t0 = __riscv_vfsub_vv_f32m1 ( vone, __riscv_vfdiv_vv_f32m1 (sa, da, vl), vl); return __riscv_vmerge_vvm_f32m1 ( vzero, __riscv_vfmin_vv_f32m1 ( vone, __riscv_vfmax_vv_f32m1 (vzero, t0, vl), vl), __riscv_vmfne_vf_f32m1_b32 (da, 0.0f, vl), vl); } case ONE_MINUS_DA_OVER_SA: { vfloat32m1_t t0 = __riscv_vfsub_vv_f32m1 ( vone, __riscv_vfdiv_vv_f32m1 (da, sa, vl), vl); return __riscv_vmerge_vvm_f32m1 ( vzero, __riscv_vfmin_vv_f32m1 ( vone, __riscv_vfmax_vv_f32m1 (vzero, t0, vl), vl), __riscv_vmfne_vf_f32m1_b32 (sa, 0.0f, vl), vl); } case ONE_MINUS_INV_DA_OVER_SA: { vbool32_t is_zero = __riscv_vmand_mm_b32 ( __riscv_vmflt_vf_f32m1_b32 (sa, FLT_MIN, vl), __riscv_vmfgt_vf_f32m1_b32 (sa, -FLT_MAX, vl), vl); vfloat32m1_t t0 = __riscv_vfsub_vv_f32m1 ( vone, __riscv_vfdiv_vv_f32m1 ( __riscv_vfsub_vv_f32m1 (vone, da, vl), sa, vl), vl); return __riscv_vmerge_vvm_f32m1 ( __riscv_vfmin_vv_f32m1 ( vone, __riscv_vfmax_vv_f32m1 (vzero, t0, vl), vl), vzero, is_zero, vl); } case ONE_MINUS_INV_SA_OVER_DA: { vfloat32m1_t t0 = __riscv_vfsub_vv_f32m1 ( vone, __riscv_vfdiv_vv_f32m1 ( __riscv_vfsub_vv_f32m1 (vone, sa, vl), da, vl), vl); return __riscv_vmerge_vvm_f32m1 ( __riscv_vfmin_vv_f32m1 ( vone, __riscv_vfmax_vv_f32m1 (vzero, t0, vl), vl), vzero, __riscv_vmfeq_vf_f32m1_b32 (da, 0.0f, vl), vl); } } return __riscv_vfmv_v_f_f32m1 (-1.0f, vl); } #define RVV_MAKE_PD_COMBINERS(name, a, b) \ static vfloat32m1_t force_inline rvv_pd_combine_##name ( \ vfloat32m1_t sa, vfloat32m1_t s, vfloat32m1_t da, vfloat32m1_t d, \ size_t vl) \ { \ const vfloat32m1_t fa = rvv_get_factor (a, sa, da, vl); \ const vfloat32m1_t fb = rvv_get_factor (b, sa, da, vl); \ vfloat32m1_t t0 = __riscv_vfadd_vv_f32m1 ( \ __riscv_vfmul_vv_f32m1 (s, fa, vl), \ __riscv_vfmul_vv_f32m1 (d, fb, vl), vl); \ return __riscv_vfmin_vv_f32m1 (__riscv_vfmv_v_f_f32m1 (1.0f, vl), t0, \ vl); \ } \ \ RVV_MAKE_COMBINERS (name, rvv_pd_combine_##name, rvv_pd_combine_##name) RVV_MAKE_PD_COMBINERS (clear, ZERO, ZERO) RVV_MAKE_PD_COMBINERS (src, ONE, ZERO) RVV_MAKE_PD_COMBINERS (dst, ZERO, ONE) RVV_MAKE_PD_COMBINERS (over, ONE, INV_SA) RVV_MAKE_PD_COMBINERS (over_reverse, INV_DA, ONE) RVV_MAKE_PD_COMBINERS (in, DEST_ALPHA, ZERO) RVV_MAKE_PD_COMBINERS (in_reverse, ZERO, SRC_ALPHA) RVV_MAKE_PD_COMBINERS (out, INV_DA, ZERO) RVV_MAKE_PD_COMBINERS (out_reverse, ZERO, INV_SA) RVV_MAKE_PD_COMBINERS (atop, DEST_ALPHA, INV_SA) RVV_MAKE_PD_COMBINERS (atop_reverse, INV_DA, SRC_ALPHA) RVV_MAKE_PD_COMBINERS (xor, INV_DA, INV_SA) RVV_MAKE_PD_COMBINERS (add, ONE, ONE) RVV_MAKE_PD_COMBINERS (saturate, INV_DA_OVER_SA, ONE) RVV_MAKE_PD_COMBINERS (disjoint_clear, ZERO, ZERO) RVV_MAKE_PD_COMBINERS (disjoint_src, ONE, ZERO) RVV_MAKE_PD_COMBINERS (disjoint_dst, ZERO, ONE) RVV_MAKE_PD_COMBINERS (disjoint_over, ONE, INV_SA_OVER_DA) RVV_MAKE_PD_COMBINERS (disjoint_over_reverse, INV_DA_OVER_SA, ONE) RVV_MAKE_PD_COMBINERS (disjoint_in, ONE_MINUS_INV_DA_OVER_SA, ZERO) RVV_MAKE_PD_COMBINERS (disjoint_in_reverse, ZERO, ONE_MINUS_INV_SA_OVER_DA) RVV_MAKE_PD_COMBINERS (disjoint_out, INV_DA_OVER_SA, ZERO) RVV_MAKE_PD_COMBINERS (disjoint_out_reverse, ZERO, INV_SA_OVER_DA) RVV_MAKE_PD_COMBINERS (disjoint_atop, ONE_MINUS_INV_DA_OVER_SA, INV_SA_OVER_DA) RVV_MAKE_PD_COMBINERS (disjoint_atop_reverse, INV_DA_OVER_SA, ONE_MINUS_INV_SA_OVER_DA) RVV_MAKE_PD_COMBINERS (disjoint_xor, INV_DA_OVER_SA, INV_SA_OVER_DA) RVV_MAKE_PD_COMBINERS (conjoint_clear, ZERO, ZERO) RVV_MAKE_PD_COMBINERS (conjoint_src, ONE, ZERO) RVV_MAKE_PD_COMBINERS (conjoint_dst, ZERO, ONE) RVV_MAKE_PD_COMBINERS (conjoint_over, ONE, ONE_MINUS_SA_OVER_DA) RVV_MAKE_PD_COMBINERS (conjoint_over_reverse, ONE_MINUS_DA_OVER_SA, ONE) RVV_MAKE_PD_COMBINERS (conjoint_in, DA_OVER_SA, ZERO) RVV_MAKE_PD_COMBINERS (conjoint_in_reverse, ZERO, SA_OVER_DA) RVV_MAKE_PD_COMBINERS (conjoint_out, ONE_MINUS_DA_OVER_SA, ZERO) RVV_MAKE_PD_COMBINERS (conjoint_out_reverse, ZERO, ONE_MINUS_SA_OVER_DA) RVV_MAKE_PD_COMBINERS (conjoint_atop, DA_OVER_SA, ONE_MINUS_SA_OVER_DA) RVV_MAKE_PD_COMBINERS (conjoint_atop_reverse, ONE_MINUS_DA_OVER_SA, SA_OVER_DA) RVV_MAKE_PD_COMBINERS (conjoint_xor, ONE_MINUS_DA_OVER_SA, ONE_MINUS_SA_OVER_DA) #define RVV_MAKE_SEPARABLE_PDF_COMBINERS(name) \ static force_inline vfloat32m1_t rvv_combine_##name##_a ( \ vfloat32m1_t sa, vfloat32m1_t s, vfloat32m1_t da, vfloat32m1_t d, \ size_t vl) \ { \ return __riscv_vfsub_vv_f32m1 (__riscv_vfadd_vv_f32m1 (da, sa, vl), \ __riscv_vfmul_vv_f32m1 (da, sa, vl), \ vl); \ } \ \ static force_inline vfloat32m1_t rvv_combine_##name##_c ( \ vfloat32m1_t sa, vfloat32m1_t s, vfloat32m1_t da, vfloat32m1_t d, \ size_t vl) \ { \ vfloat32m1_t f = __riscv_vfmul_vf_f32m1 ( \ __riscv_vfadd_vv_f32m1 ( \ __riscv_vfmul_vv_f32m1 (__riscv_vfsub_vf_f32m1 (sa, 1.0f, vl), \ d, vl), \ __riscv_vfmul_vv_f32m1 (__riscv_vfsub_vf_f32m1 (da, 1.0f, vl), \ s, vl), \ vl), \ -1.0f, vl); \ \ return __riscv_vfadd_vv_f32m1 (f, rvv_blend_##name (sa, s, da, d, vl), \ vl); \ } \ \ RVV_MAKE_COMBINERS (name, rvv_combine_##name##_a, rvv_combine_##name##_c) RVV_MAKE_SEPARABLE_PDF_COMBINERS (multiply) RVV_MAKE_SEPARABLE_PDF_COMBINERS (screen) RVV_MAKE_SEPARABLE_PDF_COMBINERS (overlay) RVV_MAKE_SEPARABLE_PDF_COMBINERS (darken) RVV_MAKE_SEPARABLE_PDF_COMBINERS (lighten) RVV_MAKE_SEPARABLE_PDF_COMBINERS (color_dodge) RVV_MAKE_SEPARABLE_PDF_COMBINERS (color_burn) RVV_MAKE_SEPARABLE_PDF_COMBINERS (hard_light) RVV_MAKE_SEPARABLE_PDF_COMBINERS (soft_light) RVV_MAKE_SEPARABLE_PDF_COMBINERS (difference) RVV_MAKE_SEPARABLE_PDF_COMBINERS (exclusion) static const pixman_fast_path_t rvv_fast_paths[] = { {PIXMAN_OP_NONE}, }; // clang-format off pixman_implementation_t * _pixman_implementation_create_rvv (pixman_implementation_t *fallback) { pixman_implementation_t *imp = _pixman_implementation_create (fallback, rvv_fast_paths); imp->combine_float[PIXMAN_OP_CLEAR] = rvv_combine_clear_u_float; imp->combine_float[PIXMAN_OP_SRC] = rvv_combine_src_u_float; imp->combine_float[PIXMAN_OP_DST] = rvv_combine_dst_u_float; imp->combine_float[PIXMAN_OP_OVER] = rvv_combine_over_u_float; imp->combine_float[PIXMAN_OP_OVER_REVERSE] = rvv_combine_over_reverse_u_float; imp->combine_float[PIXMAN_OP_IN] = rvv_combine_in_u_float; imp->combine_float[PIXMAN_OP_IN_REVERSE] = rvv_combine_in_reverse_u_float; imp->combine_float[PIXMAN_OP_OUT] = rvv_combine_out_u_float; imp->combine_float[PIXMAN_OP_OUT_REVERSE] = rvv_combine_out_reverse_u_float; imp->combine_float[PIXMAN_OP_ATOP] = rvv_combine_atop_u_float; imp->combine_float[PIXMAN_OP_ATOP_REVERSE] = rvv_combine_atop_reverse_u_float; imp->combine_float[PIXMAN_OP_XOR] = rvv_combine_xor_u_float; imp->combine_float[PIXMAN_OP_ADD] = rvv_combine_add_u_float; imp->combine_float[PIXMAN_OP_SATURATE] = rvv_combine_saturate_u_float; /* Disjoint, unified */ imp->combine_float[PIXMAN_OP_DISJOINT_CLEAR] = rvv_combine_disjoint_clear_u_float; imp->combine_float[PIXMAN_OP_DISJOINT_SRC] = rvv_combine_disjoint_src_u_float; imp->combine_float[PIXMAN_OP_DISJOINT_DST] = rvv_combine_disjoint_dst_u_float; imp->combine_float[PIXMAN_OP_DISJOINT_OVER] = rvv_combine_disjoint_over_u_float; imp->combine_float[PIXMAN_OP_DISJOINT_OVER_REVERSE] = rvv_combine_disjoint_over_reverse_u_float; imp->combine_float[PIXMAN_OP_DISJOINT_IN] = rvv_combine_disjoint_in_u_float; imp->combine_float[PIXMAN_OP_DISJOINT_IN_REVERSE] = rvv_combine_disjoint_in_reverse_u_float; imp->combine_float[PIXMAN_OP_DISJOINT_OUT] = rvv_combine_disjoint_out_u_float; imp->combine_float[PIXMAN_OP_DISJOINT_OUT_REVERSE] = rvv_combine_disjoint_out_reverse_u_float; imp->combine_float[PIXMAN_OP_DISJOINT_ATOP] = rvv_combine_disjoint_atop_u_float; imp->combine_float[PIXMAN_OP_DISJOINT_ATOP_REVERSE] = rvv_combine_disjoint_atop_reverse_u_float; imp->combine_float[PIXMAN_OP_DISJOINT_XOR] = rvv_combine_disjoint_xor_u_float; /* Conjoint, unified */ imp->combine_float[PIXMAN_OP_CONJOINT_CLEAR] = rvv_combine_conjoint_clear_u_float; imp->combine_float[PIXMAN_OP_CONJOINT_SRC] = rvv_combine_conjoint_src_u_float; imp->combine_float[PIXMAN_OP_CONJOINT_DST] = rvv_combine_conjoint_dst_u_float; imp->combine_float[PIXMAN_OP_CONJOINT_OVER] = rvv_combine_conjoint_over_u_float; imp->combine_float[PIXMAN_OP_CONJOINT_OVER_REVERSE] = rvv_combine_conjoint_over_reverse_u_float; imp->combine_float[PIXMAN_OP_CONJOINT_IN] = rvv_combine_conjoint_in_u_float; imp->combine_float[PIXMAN_OP_CONJOINT_IN_REVERSE] = rvv_combine_conjoint_in_reverse_u_float; imp->combine_float[PIXMAN_OP_CONJOINT_OUT] = rvv_combine_conjoint_out_u_float; imp->combine_float[PIXMAN_OP_CONJOINT_OUT_REVERSE] = rvv_combine_conjoint_out_reverse_u_float; imp->combine_float[PIXMAN_OP_CONJOINT_ATOP] = rvv_combine_conjoint_atop_u_float; imp->combine_float[PIXMAN_OP_CONJOINT_ATOP_REVERSE] = rvv_combine_conjoint_atop_reverse_u_float; imp->combine_float[PIXMAN_OP_CONJOINT_XOR] = rvv_combine_conjoint_xor_u_float; /* PDF operators, unified */ imp->combine_float[PIXMAN_OP_MULTIPLY] = rvv_combine_multiply_u_float; imp->combine_float[PIXMAN_OP_SCREEN] = rvv_combine_screen_u_float; imp->combine_float[PIXMAN_OP_OVERLAY] = rvv_combine_overlay_u_float; imp->combine_float[PIXMAN_OP_DARKEN] = rvv_combine_darken_u_float; imp->combine_float[PIXMAN_OP_LIGHTEN] = rvv_combine_lighten_u_float; imp->combine_float[PIXMAN_OP_HARD_LIGHT] = rvv_combine_hard_light_u_float; imp->combine_float[PIXMAN_OP_SOFT_LIGHT] = rvv_combine_soft_light_u_float; imp->combine_float[PIXMAN_OP_DIFFERENCE] = rvv_combine_difference_u_float; imp->combine_float[PIXMAN_OP_EXCLUSION] = rvv_combine_exclusion_u_float; imp->combine_float[PIXMAN_OP_COLOR_DODGE] = rvv_combine_color_dodge_u_float; imp->combine_float[PIXMAN_OP_COLOR_BURN] = rvv_combine_color_burn_u_float; /* Component alpha combiners */ imp->combine_float_ca[PIXMAN_OP_CLEAR] = rvv_combine_clear_ca_float; imp->combine_float_ca[PIXMAN_OP_SRC] = rvv_combine_src_ca_float; imp->combine_float_ca[PIXMAN_OP_DST] = rvv_combine_dst_ca_float; imp->combine_float_ca[PIXMAN_OP_OVER] = rvv_combine_over_ca_float; imp->combine_float_ca[PIXMAN_OP_OVER_REVERSE] = rvv_combine_over_reverse_ca_float; imp->combine_float_ca[PIXMAN_OP_IN] = rvv_combine_in_ca_float; imp->combine_float_ca[PIXMAN_OP_IN_REVERSE] = rvv_combine_in_reverse_ca_float; imp->combine_float_ca[PIXMAN_OP_OUT] = rvv_combine_out_ca_float; imp->combine_float_ca[PIXMAN_OP_OUT_REVERSE] = rvv_combine_out_reverse_ca_float; imp->combine_float_ca[PIXMAN_OP_ATOP] = rvv_combine_atop_ca_float; imp->combine_float_ca[PIXMAN_OP_ATOP_REVERSE] = rvv_combine_atop_reverse_ca_float; imp->combine_float_ca[PIXMAN_OP_XOR] = rvv_combine_xor_ca_float; imp->combine_float_ca[PIXMAN_OP_ADD] = rvv_combine_add_ca_float; imp->combine_float_ca[PIXMAN_OP_SATURATE] = rvv_combine_saturate_ca_float; /* Disjoint CA */ imp->combine_float_ca[PIXMAN_OP_DISJOINT_CLEAR] = rvv_combine_disjoint_clear_ca_float; imp->combine_float_ca[PIXMAN_OP_DISJOINT_SRC] = rvv_combine_disjoint_src_ca_float; imp->combine_float_ca[PIXMAN_OP_DISJOINT_DST] = rvv_combine_disjoint_dst_ca_float; imp->combine_float_ca[PIXMAN_OP_DISJOINT_OVER] = rvv_combine_disjoint_over_ca_float; imp->combine_float_ca[PIXMAN_OP_DISJOINT_OVER_REVERSE] = rvv_combine_disjoint_over_reverse_ca_float; imp->combine_float_ca[PIXMAN_OP_DISJOINT_IN] = rvv_combine_disjoint_in_ca_float; imp->combine_float_ca[PIXMAN_OP_DISJOINT_IN_REVERSE] = rvv_combine_disjoint_in_reverse_ca_float; imp->combine_float_ca[PIXMAN_OP_DISJOINT_OUT] = rvv_combine_disjoint_out_ca_float; imp->combine_float_ca[PIXMAN_OP_DISJOINT_OUT_REVERSE] = rvv_combine_disjoint_out_reverse_ca_float; imp->combine_float_ca[PIXMAN_OP_DISJOINT_ATOP] = rvv_combine_disjoint_atop_ca_float; imp->combine_float_ca[PIXMAN_OP_DISJOINT_ATOP_REVERSE] = rvv_combine_disjoint_atop_reverse_ca_float; imp->combine_float_ca[PIXMAN_OP_DISJOINT_XOR] = rvv_combine_disjoint_xor_ca_float; /* Conjoint CA */ imp->combine_float_ca[PIXMAN_OP_CONJOINT_CLEAR] = rvv_combine_conjoint_clear_ca_float; imp->combine_float_ca[PIXMAN_OP_CONJOINT_SRC] = rvv_combine_conjoint_src_ca_float; imp->combine_float_ca[PIXMAN_OP_CONJOINT_DST] = rvv_combine_conjoint_dst_ca_float; imp->combine_float_ca[PIXMAN_OP_CONJOINT_OVER] = rvv_combine_conjoint_over_ca_float; imp->combine_float_ca[PIXMAN_OP_CONJOINT_OVER_REVERSE] = rvv_combine_conjoint_over_reverse_ca_float; imp->combine_float_ca[PIXMAN_OP_CONJOINT_IN] = rvv_combine_conjoint_in_ca_float; imp->combine_float_ca[PIXMAN_OP_CONJOINT_IN_REVERSE] =rvv_combine_conjoint_in_reverse_ca_float; imp->combine_float_ca[PIXMAN_OP_CONJOINT_OUT] = rvv_combine_conjoint_out_ca_float; imp->combine_float_ca[PIXMAN_OP_CONJOINT_OUT_REVERSE] = rvv_combine_conjoint_out_reverse_ca_float; imp->combine_float_ca[PIXMAN_OP_CONJOINT_ATOP] = rvv_combine_conjoint_atop_ca_float; imp->combine_float_ca[PIXMAN_OP_CONJOINT_ATOP_REVERSE] = rvv_combine_conjoint_atop_reverse_ca_float; imp->combine_float_ca[PIXMAN_OP_CONJOINT_XOR] = rvv_combine_conjoint_xor_ca_float; /* PDF operators CA */ imp->combine_float_ca[PIXMAN_OP_MULTIPLY] = rvv_combine_multiply_ca_float; imp->combine_float_ca[PIXMAN_OP_SCREEN] = rvv_combine_screen_ca_float; imp->combine_float_ca[PIXMAN_OP_OVERLAY] = rvv_combine_overlay_ca_float; imp->combine_float_ca[PIXMAN_OP_DARKEN] = rvv_combine_darken_ca_float; imp->combine_float_ca[PIXMAN_OP_LIGHTEN] = rvv_combine_lighten_ca_float; imp->combine_float_ca[PIXMAN_OP_COLOR_DODGE] = rvv_combine_color_dodge_ca_float; imp->combine_float_ca[PIXMAN_OP_COLOR_BURN] = rvv_combine_color_burn_ca_float; imp->combine_float_ca[PIXMAN_OP_HARD_LIGHT] = rvv_combine_hard_light_ca_float; imp->combine_float_ca[PIXMAN_OP_SOFT_LIGHT] = rvv_combine_soft_light_ca_float; imp->combine_float_ca[PIXMAN_OP_DIFFERENCE] = rvv_combine_difference_ca_float; imp->combine_float_ca[PIXMAN_OP_EXCLUSION] = rvv_combine_exclusion_ca_float; /* It is not clear that these make sense, so make them noops for now */ imp->combine_float_ca[PIXMAN_OP_HSL_HUE] = rvv_combine_dst_u_float; imp->combine_float_ca[PIXMAN_OP_HSL_SATURATION] = rvv_combine_dst_u_float; imp->combine_float_ca[PIXMAN_OP_HSL_COLOR] = rvv_combine_dst_u_float; imp->combine_float_ca[PIXMAN_OP_HSL_LUMINOSITY] = rvv_combine_dst_u_float; return imp; } // clang-format on././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-solid-fill.c0000664000175000017500000000427214712446423020113 0ustar00mattst88mattst88/* * Copyright Âİ 2000 SuSE, Inc. * Copyright Âİ 2007, 2009 Red Hat, Inc. * Copyright Âİ 2009 Soren Sandmann * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of SuSE not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. SuSE makes no representations about the * suitability of this software for any purpose. It is provided "as is" * without express or implied warranty. * * SuSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL SuSE * BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifdef HAVE_CONFIG_H #include #endif #include "pixman-private.h" static uint32_t color_to_uint32 (const pixman_color_t *color) { return ((unsigned int) color->alpha >> 8 << 24) | ((unsigned int) color->red >> 8 << 16) | ((unsigned int) color->green & 0xff00) | ((unsigned int) color->blue >> 8); } static argb_t color_to_float (const pixman_color_t *color) { argb_t result; result.a = pixman_unorm_to_float (color->alpha, 16); result.r = pixman_unorm_to_float (color->red, 16); result.g = pixman_unorm_to_float (color->green, 16); result.b = pixman_unorm_to_float (color->blue, 16); return result; } PIXMAN_EXPORT pixman_image_t * pixman_image_create_solid_fill (const pixman_color_t *color) { pixman_image_t *img = _pixman_image_allocate (); if (!img) return NULL; img->type = SOLID; img->solid.color = *color; img->solid.color_32 = color_to_uint32 (color); img->solid.color_float = color_to_float (color); return img; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-sse2.c0000664000175000017500000047607514712446423016747 0ustar00mattst88mattst88/* * Copyright Âİ 2008 Rodrigo Kumpera * Copyright Âİ 2008 Andrİ TupinambĦ * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of Red Hat not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. Red Hat makes no representations about the * suitability of this software for any purpose. It is provided "as is" * without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS * SOFTWARE. * * Author: Rodrigo Kumpera (kumpera@gmail.com) * Andrİ TupinambĦ (andrelrt@gmail.com) * * Based on work by Owen Taylor and S¸ren Sandmann */ #ifdef HAVE_CONFIG_H #include #endif /* PSHUFD is slow on a lot of old processors, and new processors have SSSE3 */ #define PSHUFD_IS_FAST 0 #include /* for _mm_shuffle_pi16 and _MM_SHUFFLE */ #include /* for SSE2 intrinsics */ #include "pixman-private.h" #include "pixman-combine32.h" #include "pixman-inlines.h" static __m128i mask_0080; static __m128i mask_00ff; static __m128i mask_0101; static __m128i mask_ffff; static __m128i mask_ff000000; static __m128i mask_alpha; static __m128i mask_565_r; static __m128i mask_565_g1, mask_565_g2; static __m128i mask_565_b; static __m128i mask_red; static __m128i mask_green; static __m128i mask_blue; static __m128i mask_565_fix_rb; static __m128i mask_565_fix_g; static __m128i mask_565_rb; static __m128i mask_565_pack_multiplier; static force_inline __m128i unpack_32_1x128 (uint32_t data) { return _mm_unpacklo_epi8 (_mm_cvtsi32_si128 (data), _mm_setzero_si128 ()); } static force_inline void unpack_128_2x128 (__m128i data, __m128i* data_lo, __m128i* data_hi) { *data_lo = _mm_unpacklo_epi8 (data, _mm_setzero_si128 ()); *data_hi = _mm_unpackhi_epi8 (data, _mm_setzero_si128 ()); } static force_inline __m128i unpack_565_to_8888 (__m128i lo) { __m128i r, g, b, rb, t; r = _mm_and_si128 (_mm_slli_epi32 (lo, 8), mask_red); g = _mm_and_si128 (_mm_slli_epi32 (lo, 5), mask_green); b = _mm_and_si128 (_mm_slli_epi32 (lo, 3), mask_blue); rb = _mm_or_si128 (r, b); t = _mm_and_si128 (rb, mask_565_fix_rb); t = _mm_srli_epi32 (t, 5); rb = _mm_or_si128 (rb, t); t = _mm_and_si128 (g, mask_565_fix_g); t = _mm_srli_epi32 (t, 6); g = _mm_or_si128 (g, t); return _mm_or_si128 (rb, g); } static force_inline void unpack_565_128_4x128 (__m128i data, __m128i* data0, __m128i* data1, __m128i* data2, __m128i* data3) { __m128i lo, hi; lo = _mm_unpacklo_epi16 (data, _mm_setzero_si128 ()); hi = _mm_unpackhi_epi16 (data, _mm_setzero_si128 ()); lo = unpack_565_to_8888 (lo); hi = unpack_565_to_8888 (hi); unpack_128_2x128 (lo, data0, data1); unpack_128_2x128 (hi, data2, data3); } static force_inline uint16_t pack_565_32_16 (uint32_t pixel) { return (uint16_t) (((pixel >> 8) & 0xf800) | ((pixel >> 5) & 0x07e0) | ((pixel >> 3) & 0x001f)); } static force_inline __m128i pack_2x128_128 (__m128i lo, __m128i hi) { return _mm_packus_epi16 (lo, hi); } static force_inline __m128i pack_565_2packedx128_128 (__m128i lo, __m128i hi) { __m128i rb0 = _mm_and_si128 (lo, mask_565_rb); __m128i rb1 = _mm_and_si128 (hi, mask_565_rb); __m128i t0 = _mm_madd_epi16 (rb0, mask_565_pack_multiplier); __m128i t1 = _mm_madd_epi16 (rb1, mask_565_pack_multiplier); __m128i g0 = _mm_and_si128 (lo, mask_green); __m128i g1 = _mm_and_si128 (hi, mask_green); t0 = _mm_or_si128 (t0, g0); t1 = _mm_or_si128 (t1, g1); /* Simulates _mm_packus_epi32 */ t0 = _mm_slli_epi32 (t0, 16 - 5); t1 = _mm_slli_epi32 (t1, 16 - 5); t0 = _mm_srai_epi32 (t0, 16); t1 = _mm_srai_epi32 (t1, 16); return _mm_packs_epi32 (t0, t1); } static force_inline __m128i pack_565_2x128_128 (__m128i lo, __m128i hi) { __m128i data; __m128i r, g1, g2, b; data = pack_2x128_128 (lo, hi); r = _mm_and_si128 (data, mask_565_r); g1 = _mm_and_si128 (_mm_slli_epi32 (data, 3), mask_565_g1); g2 = _mm_and_si128 (_mm_srli_epi32 (data, 5), mask_565_g2); b = _mm_and_si128 (_mm_srli_epi32 (data, 3), mask_565_b); return _mm_or_si128 (_mm_or_si128 (_mm_or_si128 (r, g1), g2), b); } static force_inline __m128i pack_565_4x128_128 (__m128i* xmm0, __m128i* xmm1, __m128i* xmm2, __m128i* xmm3) { return _mm_packus_epi16 (pack_565_2x128_128 (*xmm0, *xmm1), pack_565_2x128_128 (*xmm2, *xmm3)); } static force_inline int is_opaque (__m128i x) { __m128i ffs = _mm_cmpeq_epi8 (x, x); return (_mm_movemask_epi8 (_mm_cmpeq_epi8 (x, ffs)) & 0x8888) == 0x8888; } static force_inline int is_zero (__m128i x) { return _mm_movemask_epi8 ( _mm_cmpeq_epi8 (x, _mm_setzero_si128 ())) == 0xffff; } static force_inline int is_transparent (__m128i x) { return (_mm_movemask_epi8 ( _mm_cmpeq_epi8 (x, _mm_setzero_si128 ())) & 0x8888) == 0x8888; } static force_inline __m128i expand_pixel_32_1x128 (uint32_t data) { return _mm_shuffle_epi32 (unpack_32_1x128 (data), _MM_SHUFFLE (1, 0, 1, 0)); } static force_inline __m128i expand_alpha_1x128 (__m128i data) { return _mm_shufflehi_epi16 (_mm_shufflelo_epi16 (data, _MM_SHUFFLE (3, 3, 3, 3)), _MM_SHUFFLE (3, 3, 3, 3)); } static force_inline void expand_alpha_2x128 (__m128i data_lo, __m128i data_hi, __m128i* alpha_lo, __m128i* alpha_hi) { __m128i lo, hi; lo = _mm_shufflelo_epi16 (data_lo, _MM_SHUFFLE (3, 3, 3, 3)); hi = _mm_shufflelo_epi16 (data_hi, _MM_SHUFFLE (3, 3, 3, 3)); *alpha_lo = _mm_shufflehi_epi16 (lo, _MM_SHUFFLE (3, 3, 3, 3)); *alpha_hi = _mm_shufflehi_epi16 (hi, _MM_SHUFFLE (3, 3, 3, 3)); } static force_inline void expand_alpha_rev_2x128 (__m128i data_lo, __m128i data_hi, __m128i* alpha_lo, __m128i* alpha_hi) { __m128i lo, hi; lo = _mm_shufflelo_epi16 (data_lo, _MM_SHUFFLE (0, 0, 0, 0)); hi = _mm_shufflelo_epi16 (data_hi, _MM_SHUFFLE (0, 0, 0, 0)); *alpha_lo = _mm_shufflehi_epi16 (lo, _MM_SHUFFLE (0, 0, 0, 0)); *alpha_hi = _mm_shufflehi_epi16 (hi, _MM_SHUFFLE (0, 0, 0, 0)); } static force_inline void pix_multiply_2x128 (__m128i* data_lo, __m128i* data_hi, __m128i* alpha_lo, __m128i* alpha_hi, __m128i* ret_lo, __m128i* ret_hi) { __m128i lo, hi; lo = _mm_mullo_epi16 (*data_lo, *alpha_lo); hi = _mm_mullo_epi16 (*data_hi, *alpha_hi); lo = _mm_adds_epu16 (lo, mask_0080); hi = _mm_adds_epu16 (hi, mask_0080); *ret_lo = _mm_mulhi_epu16 (lo, mask_0101); *ret_hi = _mm_mulhi_epu16 (hi, mask_0101); } static force_inline void pix_add_multiply_2x128 (__m128i* src_lo, __m128i* src_hi, __m128i* alpha_dst_lo, __m128i* alpha_dst_hi, __m128i* dst_lo, __m128i* dst_hi, __m128i* alpha_src_lo, __m128i* alpha_src_hi, __m128i* ret_lo, __m128i* ret_hi) { __m128i t1_lo, t1_hi; __m128i t2_lo, t2_hi; pix_multiply_2x128 (src_lo, src_hi, alpha_dst_lo, alpha_dst_hi, &t1_lo, &t1_hi); pix_multiply_2x128 (dst_lo, dst_hi, alpha_src_lo, alpha_src_hi, &t2_lo, &t2_hi); *ret_lo = _mm_adds_epu8 (t1_lo, t2_lo); *ret_hi = _mm_adds_epu8 (t1_hi, t2_hi); } static force_inline void negate_2x128 (__m128i data_lo, __m128i data_hi, __m128i* neg_lo, __m128i* neg_hi) { *neg_lo = _mm_xor_si128 (data_lo, mask_00ff); *neg_hi = _mm_xor_si128 (data_hi, mask_00ff); } static force_inline void invert_colors_2x128 (__m128i data_lo, __m128i data_hi, __m128i* inv_lo, __m128i* inv_hi) { __m128i lo, hi; lo = _mm_shufflelo_epi16 (data_lo, _MM_SHUFFLE (3, 0, 1, 2)); hi = _mm_shufflelo_epi16 (data_hi, _MM_SHUFFLE (3, 0, 1, 2)); *inv_lo = _mm_shufflehi_epi16 (lo, _MM_SHUFFLE (3, 0, 1, 2)); *inv_hi = _mm_shufflehi_epi16 (hi, _MM_SHUFFLE (3, 0, 1, 2)); } static force_inline void over_2x128 (__m128i* src_lo, __m128i* src_hi, __m128i* alpha_lo, __m128i* alpha_hi, __m128i* dst_lo, __m128i* dst_hi) { __m128i t1, t2; negate_2x128 (*alpha_lo, *alpha_hi, &t1, &t2); pix_multiply_2x128 (dst_lo, dst_hi, &t1, &t2, dst_lo, dst_hi); *dst_lo = _mm_adds_epu8 (*src_lo, *dst_lo); *dst_hi = _mm_adds_epu8 (*src_hi, *dst_hi); } static force_inline void over_rev_non_pre_2x128 (__m128i src_lo, __m128i src_hi, __m128i* dst_lo, __m128i* dst_hi) { __m128i lo, hi; __m128i alpha_lo, alpha_hi; expand_alpha_2x128 (src_lo, src_hi, &alpha_lo, &alpha_hi); lo = _mm_or_si128 (alpha_lo, mask_alpha); hi = _mm_or_si128 (alpha_hi, mask_alpha); invert_colors_2x128 (src_lo, src_hi, &src_lo, &src_hi); pix_multiply_2x128 (&src_lo, &src_hi, &lo, &hi, &lo, &hi); over_2x128 (&lo, &hi, &alpha_lo, &alpha_hi, dst_lo, dst_hi); } static force_inline void in_over_2x128 (__m128i* src_lo, __m128i* src_hi, __m128i* alpha_lo, __m128i* alpha_hi, __m128i* mask_lo, __m128i* mask_hi, __m128i* dst_lo, __m128i* dst_hi) { __m128i s_lo, s_hi; __m128i a_lo, a_hi; pix_multiply_2x128 (src_lo, src_hi, mask_lo, mask_hi, &s_lo, &s_hi); pix_multiply_2x128 (alpha_lo, alpha_hi, mask_lo, mask_hi, &a_lo, &a_hi); over_2x128 (&s_lo, &s_hi, &a_lo, &a_hi, dst_lo, dst_hi); } /* load 4 pixels from a 16-byte boundary aligned address */ static force_inline __m128i load_128_aligned (__m128i* src) { return _mm_load_si128 (src); } /* load 4 pixels from a unaligned address */ static force_inline __m128i load_128_unaligned (const __m128i* src) { return _mm_loadu_si128 (src); } /* save 4 pixels on a 16-byte boundary aligned address */ static force_inline void save_128_aligned (__m128i* dst, __m128i data) { _mm_store_si128 (dst, data); } static force_inline __m128i load_32_1x128 (uint32_t data) { return _mm_cvtsi32_si128 (data); } static force_inline __m128i expand_alpha_rev_1x128 (__m128i data) { return _mm_shufflelo_epi16 (data, _MM_SHUFFLE (0, 0, 0, 0)); } static force_inline __m128i expand_pixel_8_1x128 (uint8_t data) { return _mm_shufflelo_epi16 ( unpack_32_1x128 ((uint32_t)data), _MM_SHUFFLE (0, 0, 0, 0)); } static force_inline __m128i pix_multiply_1x128 (__m128i data, __m128i alpha) { return _mm_mulhi_epu16 (_mm_adds_epu16 (_mm_mullo_epi16 (data, alpha), mask_0080), mask_0101); } static force_inline __m128i pix_add_multiply_1x128 (__m128i* src, __m128i* alpha_dst, __m128i* dst, __m128i* alpha_src) { __m128i t1 = pix_multiply_1x128 (*src, *alpha_dst); __m128i t2 = pix_multiply_1x128 (*dst, *alpha_src); return _mm_adds_epu8 (t1, t2); } static force_inline __m128i negate_1x128 (__m128i data) { return _mm_xor_si128 (data, mask_00ff); } static force_inline __m128i invert_colors_1x128 (__m128i data) { return _mm_shufflelo_epi16 (data, _MM_SHUFFLE (3, 0, 1, 2)); } static force_inline __m128i over_1x128 (__m128i src, __m128i alpha, __m128i dst) { return _mm_adds_epu8 (src, pix_multiply_1x128 (dst, negate_1x128 (alpha))); } static force_inline __m128i in_over_1x128 (__m128i* src, __m128i* alpha, __m128i* mask, __m128i* dst) { return over_1x128 (pix_multiply_1x128 (*src, *mask), pix_multiply_1x128 (*alpha, *mask), *dst); } static force_inline __m128i over_rev_non_pre_1x128 (__m128i src, __m128i dst) { __m128i alpha = expand_alpha_1x128 (src); return over_1x128 (pix_multiply_1x128 (invert_colors_1x128 (src), _mm_or_si128 (alpha, mask_alpha)), alpha, dst); } static force_inline uint32_t pack_1x128_32 (__m128i data) { return _mm_cvtsi128_si32 (_mm_packus_epi16 (data, _mm_setzero_si128 ())); } static force_inline __m128i expand565_16_1x128 (uint16_t pixel) { __m128i m = _mm_cvtsi32_si128 (pixel); m = unpack_565_to_8888 (m); return _mm_unpacklo_epi8 (m, _mm_setzero_si128 ()); } static force_inline uint32_t core_combine_over_u_pixel_sse2 (uint32_t src, uint32_t dst) { uint8_t a; __m128i xmms; a = src >> 24; if (a == 0xff) { return src; } else if (src) { xmms = unpack_32_1x128 (src); return pack_1x128_32 ( over_1x128 (xmms, expand_alpha_1x128 (xmms), unpack_32_1x128 (dst))); } return dst; } static force_inline uint32_t combine1 (const uint32_t *ps, const uint32_t *pm) { uint32_t s; memcpy(&s, ps, sizeof(uint32_t)); if (pm) { __m128i ms, mm; mm = unpack_32_1x128 (*pm); mm = expand_alpha_1x128 (mm); ms = unpack_32_1x128 (s); ms = pix_multiply_1x128 (ms, mm); s = pack_1x128_32 (ms); } return s; } static force_inline __m128i combine4 (const __m128i *ps, const __m128i *pm) { __m128i xmm_src_lo, xmm_src_hi; __m128i xmm_msk_lo, xmm_msk_hi; __m128i s; if (pm) { xmm_msk_lo = load_128_unaligned (pm); if (is_transparent (xmm_msk_lo)) return _mm_setzero_si128 (); } s = load_128_unaligned (ps); if (pm) { unpack_128_2x128 (s, &xmm_src_lo, &xmm_src_hi); unpack_128_2x128 (xmm_msk_lo, &xmm_msk_lo, &xmm_msk_hi); expand_alpha_2x128 (xmm_msk_lo, xmm_msk_hi, &xmm_msk_lo, &xmm_msk_hi); pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_msk_lo, &xmm_msk_hi, &xmm_src_lo, &xmm_src_hi); s = pack_2x128_128 (xmm_src_lo, xmm_src_hi); } return s; } static force_inline void core_combine_over_u_sse2_mask (uint32_t * pd, const uint32_t* ps, const uint32_t* pm, int w) { uint32_t s, d; /* Align dst on a 16-byte boundary */ while (w && ((uintptr_t)pd & 15)) { d = *pd; s = combine1 (ps, pm); if (s) *pd = core_combine_over_u_pixel_sse2 (s, d); pd++; ps++; pm++; w--; } while (w >= 4) { __m128i mask = load_128_unaligned ((__m128i *)pm); if (!is_zero (mask)) { __m128i src; __m128i src_hi, src_lo; __m128i mask_hi, mask_lo; __m128i alpha_hi, alpha_lo; src = load_128_unaligned ((__m128i *)ps); if (is_opaque (_mm_and_si128 (src, mask))) { save_128_aligned ((__m128i *)pd, src); } else { __m128i dst = load_128_aligned ((__m128i *)pd); __m128i dst_hi, dst_lo; unpack_128_2x128 (mask, &mask_lo, &mask_hi); unpack_128_2x128 (src, &src_lo, &src_hi); expand_alpha_2x128 (mask_lo, mask_hi, &mask_lo, &mask_hi); pix_multiply_2x128 (&src_lo, &src_hi, &mask_lo, &mask_hi, &src_lo, &src_hi); unpack_128_2x128 (dst, &dst_lo, &dst_hi); expand_alpha_2x128 (src_lo, src_hi, &alpha_lo, &alpha_hi); over_2x128 (&src_lo, &src_hi, &alpha_lo, &alpha_hi, &dst_lo, &dst_hi); save_128_aligned ( (__m128i *)pd, pack_2x128_128 (dst_lo, dst_hi)); } } pm += 4; ps += 4; pd += 4; w -= 4; } while (w) { d = *pd; s = combine1 (ps, pm); if (s) *pd = core_combine_over_u_pixel_sse2 (s, d); pd++; ps++; pm++; w--; } } static force_inline void core_combine_over_u_sse2_no_mask (uint32_t * pd, const uint32_t* ps, int w) { uint32_t s, d; /* Align dst on a 16-byte boundary */ while (w && ((uintptr_t)pd & 15)) { d = *pd; s = *ps; if (s) *pd = core_combine_over_u_pixel_sse2 (s, d); pd++; ps++; w--; } while (w >= 4) { __m128i src; __m128i src_hi, src_lo, dst_hi, dst_lo; __m128i alpha_hi, alpha_lo; src = load_128_unaligned ((__m128i *)ps); if (!is_zero (src)) { if (is_opaque (src)) { save_128_aligned ((__m128i *)pd, src); } else { __m128i dst = load_128_aligned ((__m128i *)pd); unpack_128_2x128 (src, &src_lo, &src_hi); unpack_128_2x128 (dst, &dst_lo, &dst_hi); expand_alpha_2x128 (src_lo, src_hi, &alpha_lo, &alpha_hi); over_2x128 (&src_lo, &src_hi, &alpha_lo, &alpha_hi, &dst_lo, &dst_hi); save_128_aligned ( (__m128i *)pd, pack_2x128_128 (dst_lo, dst_hi)); } } ps += 4; pd += 4; w -= 4; } while (w) { d = *pd; s = *ps; if (s) *pd = core_combine_over_u_pixel_sse2 (s, d); pd++; ps++; w--; } } static force_inline void sse2_combine_over_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * pd, const uint32_t * ps, const uint32_t * pm, int w) { if (pm) core_combine_over_u_sse2_mask (pd, ps, pm, w); else core_combine_over_u_sse2_no_mask (pd, ps, w); } static void sse2_combine_over_reverse_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * pd, const uint32_t * ps, const uint32_t * pm, int w) { uint32_t s, d; __m128i xmm_dst_lo, xmm_dst_hi; __m128i xmm_src_lo, xmm_src_hi; __m128i xmm_alpha_lo, xmm_alpha_hi; /* Align dst on a 16-byte boundary */ while (w && ((uintptr_t)pd & 15)) { d = *pd; s = combine1 (ps, pm); *pd++ = core_combine_over_u_pixel_sse2 (d, s); w--; ps++; if (pm) pm++; } while (w >= 4) { /* I'm loading unaligned because I'm not sure * about the address alignment. */ xmm_src_hi = combine4 ((__m128i*)ps, (__m128i*)pm); xmm_dst_hi = load_128_aligned ((__m128i*) pd); unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi); unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_alpha_lo, &xmm_alpha_hi); over_2x128 (&xmm_dst_lo, &xmm_dst_hi, &xmm_alpha_lo, &xmm_alpha_hi, &xmm_src_lo, &xmm_src_hi); /* rebuid the 4 pixel data and save*/ save_128_aligned ((__m128i*)pd, pack_2x128_128 (xmm_src_lo, xmm_src_hi)); w -= 4; ps += 4; pd += 4; if (pm) pm += 4; } while (w) { d = *pd; s = combine1 (ps, pm); *pd++ = core_combine_over_u_pixel_sse2 (d, s); ps++; w--; if (pm) pm++; } } static force_inline uint32_t core_combine_in_u_pixel_sse2 (uint32_t src, uint32_t dst) { uint32_t maska = src >> 24; if (maska == 0) { return 0; } else if (maska != 0xff) { return pack_1x128_32 ( pix_multiply_1x128 (unpack_32_1x128 (dst), expand_alpha_1x128 (unpack_32_1x128 (src)))); } return dst; } static void sse2_combine_in_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * pd, const uint32_t * ps, const uint32_t * pm, int w) { uint32_t s, d; __m128i xmm_src_lo, xmm_src_hi; __m128i xmm_dst_lo, xmm_dst_hi; while (w && ((uintptr_t)pd & 15)) { s = combine1 (ps, pm); d = *pd; *pd++ = core_combine_in_u_pixel_sse2 (d, s); w--; ps++; if (pm) pm++; } while (w >= 4) { xmm_dst_hi = load_128_aligned ((__m128i*) pd); xmm_src_hi = combine4 ((__m128i*) ps, (__m128i*) pm); unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi); pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_dst_lo, &xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); save_128_aligned ((__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); ps += 4; pd += 4; w -= 4; if (pm) pm += 4; } while (w) { s = combine1 (ps, pm); d = *pd; *pd++ = core_combine_in_u_pixel_sse2 (d, s); w--; ps++; if (pm) pm++; } } static void sse2_combine_in_reverse_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * pd, const uint32_t * ps, const uint32_t * pm, int w) { uint32_t s, d; __m128i xmm_src_lo, xmm_src_hi; __m128i xmm_dst_lo, xmm_dst_hi; while (w && ((uintptr_t)pd & 15)) { s = combine1 (ps, pm); d = *pd; *pd++ = core_combine_in_u_pixel_sse2 (s, d); ps++; w--; if (pm) pm++; } while (w >= 4) { xmm_dst_hi = load_128_aligned ((__m128i*) pd); xmm_src_hi = combine4 ((__m128i*) ps, (__m128i*)pm); unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi); expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_src_lo, &xmm_src_hi); unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi, &xmm_src_lo, &xmm_src_hi, &xmm_dst_lo, &xmm_dst_hi); save_128_aligned ( (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); ps += 4; pd += 4; w -= 4; if (pm) pm += 4; } while (w) { s = combine1 (ps, pm); d = *pd; *pd++ = core_combine_in_u_pixel_sse2 (s, d); w--; ps++; if (pm) pm++; } } static void sse2_combine_out_reverse_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * pd, const uint32_t * ps, const uint32_t * pm, int w) { while (w && ((uintptr_t)pd & 15)) { uint32_t s = combine1 (ps, pm); uint32_t d = *pd; *pd++ = pack_1x128_32 ( pix_multiply_1x128 ( unpack_32_1x128 (d), negate_1x128 ( expand_alpha_1x128 (unpack_32_1x128 (s))))); if (pm) pm++; ps++; w--; } while (w >= 4) { __m128i xmm_src_lo, xmm_src_hi; __m128i xmm_dst_lo, xmm_dst_hi; xmm_src_hi = combine4 ((__m128i*)ps, (__m128i*)pm); xmm_dst_hi = load_128_aligned ((__m128i*) pd); unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi); unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_src_lo, &xmm_src_hi); negate_2x128 (xmm_src_lo, xmm_src_hi, &xmm_src_lo, &xmm_src_hi); pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi, &xmm_src_lo, &xmm_src_hi, &xmm_dst_lo, &xmm_dst_hi); save_128_aligned ( (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); ps += 4; pd += 4; if (pm) pm += 4; w -= 4; } while (w) { uint32_t s = combine1 (ps, pm); uint32_t d = *pd; *pd++ = pack_1x128_32 ( pix_multiply_1x128 ( unpack_32_1x128 (d), negate_1x128 ( expand_alpha_1x128 (unpack_32_1x128 (s))))); ps++; if (pm) pm++; w--; } } static void sse2_combine_out_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * pd, const uint32_t * ps, const uint32_t * pm, int w) { while (w && ((uintptr_t)pd & 15)) { uint32_t s = combine1 (ps, pm); uint32_t d = *pd; *pd++ = pack_1x128_32 ( pix_multiply_1x128 ( unpack_32_1x128 (s), negate_1x128 ( expand_alpha_1x128 (unpack_32_1x128 (d))))); w--; ps++; if (pm) pm++; } while (w >= 4) { __m128i xmm_src_lo, xmm_src_hi; __m128i xmm_dst_lo, xmm_dst_hi; xmm_src_hi = combine4 ((__m128i*) ps, (__m128i*)pm); xmm_dst_hi = load_128_aligned ((__m128i*) pd); unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi); unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); negate_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_dst_lo, &xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); save_128_aligned ( (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); ps += 4; pd += 4; w -= 4; if (pm) pm += 4; } while (w) { uint32_t s = combine1 (ps, pm); uint32_t d = *pd; *pd++ = pack_1x128_32 ( pix_multiply_1x128 ( unpack_32_1x128 (s), negate_1x128 ( expand_alpha_1x128 (unpack_32_1x128 (d))))); w--; ps++; if (pm) pm++; } } static force_inline uint32_t core_combine_atop_u_pixel_sse2 (uint32_t src, uint32_t dst) { __m128i s = unpack_32_1x128 (src); __m128i d = unpack_32_1x128 (dst); __m128i sa = negate_1x128 (expand_alpha_1x128 (s)); __m128i da = expand_alpha_1x128 (d); return pack_1x128_32 (pix_add_multiply_1x128 (&s, &da, &d, &sa)); } static void sse2_combine_atop_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * pd, const uint32_t * ps, const uint32_t * pm, int w) { uint32_t s, d; __m128i xmm_src_lo, xmm_src_hi; __m128i xmm_dst_lo, xmm_dst_hi; __m128i xmm_alpha_src_lo, xmm_alpha_src_hi; __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi; while (w && ((uintptr_t)pd & 15)) { s = combine1 (ps, pm); d = *pd; *pd++ = core_combine_atop_u_pixel_sse2 (s, d); w--; ps++; if (pm) pm++; } while (w >= 4) { xmm_src_hi = combine4 ((__m128i*)ps, (__m128i*)pm); xmm_dst_hi = load_128_aligned ((__m128i*) pd); unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi); unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi); expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi); negate_2x128 (xmm_alpha_src_lo, xmm_alpha_src_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi); pix_add_multiply_2x128 ( &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi, &xmm_dst_lo, &xmm_dst_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi, &xmm_dst_lo, &xmm_dst_hi); save_128_aligned ( (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); ps += 4; pd += 4; w -= 4; if (pm) pm += 4; } while (w) { s = combine1 (ps, pm); d = *pd; *pd++ = core_combine_atop_u_pixel_sse2 (s, d); w--; ps++; if (pm) pm++; } } static force_inline uint32_t core_combine_reverse_atop_u_pixel_sse2 (uint32_t src, uint32_t dst) { __m128i s = unpack_32_1x128 (src); __m128i d = unpack_32_1x128 (dst); __m128i sa = expand_alpha_1x128 (s); __m128i da = negate_1x128 (expand_alpha_1x128 (d)); return pack_1x128_32 (pix_add_multiply_1x128 (&s, &da, &d, &sa)); } static void sse2_combine_atop_reverse_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * pd, const uint32_t * ps, const uint32_t * pm, int w) { uint32_t s, d; __m128i xmm_src_lo, xmm_src_hi; __m128i xmm_dst_lo, xmm_dst_hi; __m128i xmm_alpha_src_lo, xmm_alpha_src_hi; __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi; while (w && ((uintptr_t)pd & 15)) { s = combine1 (ps, pm); d = *pd; *pd++ = core_combine_reverse_atop_u_pixel_sse2 (s, d); ps++; w--; if (pm) pm++; } while (w >= 4) { xmm_src_hi = combine4 ((__m128i*)ps, (__m128i*)pm); xmm_dst_hi = load_128_aligned ((__m128i*) pd); unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi); unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi); expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi); negate_2x128 (xmm_alpha_dst_lo, xmm_alpha_dst_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi); pix_add_multiply_2x128 ( &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi, &xmm_dst_lo, &xmm_dst_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi, &xmm_dst_lo, &xmm_dst_hi); save_128_aligned ( (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); ps += 4; pd += 4; w -= 4; if (pm) pm += 4; } while (w) { s = combine1 (ps, pm); d = *pd; *pd++ = core_combine_reverse_atop_u_pixel_sse2 (s, d); ps++; w--; if (pm) pm++; } } static force_inline uint32_t core_combine_xor_u_pixel_sse2 (uint32_t src, uint32_t dst) { __m128i s = unpack_32_1x128 (src); __m128i d = unpack_32_1x128 (dst); __m128i neg_d = negate_1x128 (expand_alpha_1x128 (d)); __m128i neg_s = negate_1x128 (expand_alpha_1x128 (s)); return pack_1x128_32 (pix_add_multiply_1x128 (&s, &neg_d, &d, &neg_s)); } static void sse2_combine_xor_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dst, const uint32_t * src, const uint32_t * mask, int width) { int w = width; uint32_t s, d; uint32_t* pd = dst; const uint32_t* ps = src; const uint32_t* pm = mask; __m128i xmm_src, xmm_src_lo, xmm_src_hi; __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi; __m128i xmm_alpha_src_lo, xmm_alpha_src_hi; __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi; while (w && ((uintptr_t)pd & 15)) { s = combine1 (ps, pm); d = *pd; *pd++ = core_combine_xor_u_pixel_sse2 (s, d); w--; ps++; if (pm) pm++; } while (w >= 4) { xmm_src = combine4 ((__m128i*) ps, (__m128i*) pm); xmm_dst = load_128_aligned ((__m128i*) pd); unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi); unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi); expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi); expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi); negate_2x128 (xmm_alpha_src_lo, xmm_alpha_src_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi); negate_2x128 (xmm_alpha_dst_lo, xmm_alpha_dst_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi); pix_add_multiply_2x128 ( &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi, &xmm_dst_lo, &xmm_dst_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi, &xmm_dst_lo, &xmm_dst_hi); save_128_aligned ( (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); ps += 4; pd += 4; w -= 4; if (pm) pm += 4; } while (w) { s = combine1 (ps, pm); d = *pd; *pd++ = core_combine_xor_u_pixel_sse2 (s, d); w--; ps++; if (pm) pm++; } } static force_inline void sse2_combine_add_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dst, const uint32_t * src, const uint32_t * mask, int width) { int w = width; uint32_t s, d; uint32_t* pd = dst; const uint32_t* ps = src; const uint32_t* pm = mask; while (w && (uintptr_t)pd & 15) { s = combine1 (ps, pm); d = *pd; ps++; if (pm) pm++; *pd++ = _mm_cvtsi128_si32 ( _mm_adds_epu8 (_mm_cvtsi32_si128 (s), _mm_cvtsi32_si128 (d))); w--; } while (w >= 4) { __m128i s; s = combine4 ((__m128i*)ps, (__m128i*)pm); save_128_aligned ( (__m128i*)pd, _mm_adds_epu8 (s, load_128_aligned ((__m128i*)pd))); pd += 4; ps += 4; if (pm) pm += 4; w -= 4; } while (w--) { s = combine1 (ps, pm); d = *pd; ps++; *pd++ = _mm_cvtsi128_si32 ( _mm_adds_epu8 (_mm_cvtsi32_si128 (s), _mm_cvtsi32_si128 (d))); if (pm) pm++; } } static force_inline uint32_t core_combine_saturate_u_pixel_sse2 (uint32_t src, uint32_t dst) { __m128i ms = unpack_32_1x128 (src); __m128i md = unpack_32_1x128 (dst); uint32_t sa = src >> 24; uint32_t da = ~dst >> 24; if (sa > da) { ms = pix_multiply_1x128 ( ms, expand_alpha_1x128 (unpack_32_1x128 (DIV_UN8 (da, sa) << 24))); } return pack_1x128_32 (_mm_adds_epu16 (md, ms)); } static void sse2_combine_saturate_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * pd, const uint32_t * ps, const uint32_t * pm, int w) { uint32_t s, d; uint32_t pack_cmp; __m128i xmm_src, xmm_dst; while (w && (uintptr_t)pd & 15) { s = combine1 (ps, pm); d = *pd; *pd++ = core_combine_saturate_u_pixel_sse2 (s, d); w--; ps++; if (pm) pm++; } while (w >= 4) { xmm_dst = load_128_aligned ((__m128i*)pd); xmm_src = combine4 ((__m128i*)ps, (__m128i*)pm); pack_cmp = _mm_movemask_epi8 ( _mm_cmpgt_epi32 ( _mm_srli_epi32 (xmm_src, 24), _mm_srli_epi32 (_mm_xor_si128 (xmm_dst, mask_ff000000), 24))); /* if some alpha src is grater than respective ~alpha dst */ if (pack_cmp) { s = combine1 (ps++, pm); d = *pd; *pd++ = core_combine_saturate_u_pixel_sse2 (s, d); if (pm) pm++; s = combine1 (ps++, pm); d = *pd; *pd++ = core_combine_saturate_u_pixel_sse2 (s, d); if (pm) pm++; s = combine1 (ps++, pm); d = *pd; *pd++ = core_combine_saturate_u_pixel_sse2 (s, d); if (pm) pm++; s = combine1 (ps++, pm); d = *pd; *pd++ = core_combine_saturate_u_pixel_sse2 (s, d); if (pm) pm++; } else { save_128_aligned ((__m128i*)pd, _mm_adds_epu8 (xmm_dst, xmm_src)); pd += 4; ps += 4; if (pm) pm += 4; } w -= 4; } while (w--) { s = combine1 (ps, pm); d = *pd; *pd++ = core_combine_saturate_u_pixel_sse2 (s, d); ps++; if (pm) pm++; } } static void sse2_combine_src_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * pd, const uint32_t * ps, const uint32_t * pm, int w) { uint32_t s, m; __m128i xmm_src_lo, xmm_src_hi; __m128i xmm_mask_lo, xmm_mask_hi; __m128i xmm_dst_lo, xmm_dst_hi; while (w && (uintptr_t)pd & 15) { s = *ps++; m = *pm++; *pd++ = pack_1x128_32 ( pix_multiply_1x128 (unpack_32_1x128 (s), unpack_32_1x128 (m))); w--; } while (w >= 4) { xmm_src_hi = load_128_unaligned ((__m128i*)ps); xmm_mask_hi = load_128_unaligned ((__m128i*)pm); unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi); unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst_lo, &xmm_dst_hi); save_128_aligned ( (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); ps += 4; pd += 4; pm += 4; w -= 4; } while (w) { s = *ps++; m = *pm++; *pd++ = pack_1x128_32 ( pix_multiply_1x128 (unpack_32_1x128 (s), unpack_32_1x128 (m))); w--; } } static force_inline uint32_t core_combine_over_ca_pixel_sse2 (uint32_t src, uint32_t mask, uint32_t dst) { __m128i s = unpack_32_1x128 (src); __m128i expAlpha = expand_alpha_1x128 (s); __m128i unpk_mask = unpack_32_1x128 (mask); __m128i unpk_dst = unpack_32_1x128 (dst); return pack_1x128_32 (in_over_1x128 (&s, &expAlpha, &unpk_mask, &unpk_dst)); } static void sse2_combine_over_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * pd, const uint32_t * ps, const uint32_t * pm, int w) { uint32_t s, m, d; __m128i xmm_alpha_lo, xmm_alpha_hi; __m128i xmm_src_lo, xmm_src_hi; __m128i xmm_dst_lo, xmm_dst_hi; __m128i xmm_mask_lo, xmm_mask_hi; while (w && (uintptr_t)pd & 15) { s = *ps++; m = *pm++; d = *pd; *pd++ = core_combine_over_ca_pixel_sse2 (s, m, d); w--; } while (w >= 4) { xmm_dst_hi = load_128_aligned ((__m128i*)pd); xmm_src_hi = load_128_unaligned ((__m128i*)ps); xmm_mask_hi = load_128_unaligned ((__m128i*)pm); unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi); unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi); in_over_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst_lo, &xmm_dst_hi); save_128_aligned ( (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); ps += 4; pd += 4; pm += 4; w -= 4; } while (w) { s = *ps++; m = *pm++; d = *pd; *pd++ = core_combine_over_ca_pixel_sse2 (s, m, d); w--; } } static force_inline uint32_t core_combine_over_reverse_ca_pixel_sse2 (uint32_t src, uint32_t mask, uint32_t dst) { __m128i d = unpack_32_1x128 (dst); return pack_1x128_32 ( over_1x128 (d, expand_alpha_1x128 (d), pix_multiply_1x128 (unpack_32_1x128 (src), unpack_32_1x128 (mask)))); } static void sse2_combine_over_reverse_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * pd, const uint32_t * ps, const uint32_t * pm, int w) { uint32_t s, m, d; __m128i xmm_alpha_lo, xmm_alpha_hi; __m128i xmm_src_lo, xmm_src_hi; __m128i xmm_dst_lo, xmm_dst_hi; __m128i xmm_mask_lo, xmm_mask_hi; while (w && (uintptr_t)pd & 15) { s = *ps++; m = *pm++; d = *pd; *pd++ = core_combine_over_reverse_ca_pixel_sse2 (s, m, d); w--; } while (w >= 4) { xmm_dst_hi = load_128_aligned ((__m128i*)pd); xmm_src_hi = load_128_unaligned ((__m128i*)ps); xmm_mask_hi = load_128_unaligned ((__m128i*)pm); unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi); unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_alpha_lo, &xmm_alpha_hi); pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_mask_lo, &xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); over_2x128 (&xmm_dst_lo, &xmm_dst_hi, &xmm_alpha_lo, &xmm_alpha_hi, &xmm_mask_lo, &xmm_mask_hi); save_128_aligned ( (__m128i*)pd, pack_2x128_128 (xmm_mask_lo, xmm_mask_hi)); ps += 4; pd += 4; pm += 4; w -= 4; } while (w) { s = *ps++; m = *pm++; d = *pd; *pd++ = core_combine_over_reverse_ca_pixel_sse2 (s, m, d); w--; } } static void sse2_combine_in_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * pd, const uint32_t * ps, const uint32_t * pm, int w) { uint32_t s, m, d; __m128i xmm_alpha_lo, xmm_alpha_hi; __m128i xmm_src_lo, xmm_src_hi; __m128i xmm_dst_lo, xmm_dst_hi; __m128i xmm_mask_lo, xmm_mask_hi; while (w && (uintptr_t)pd & 15) { s = *ps++; m = *pm++; d = *pd; *pd++ = pack_1x128_32 ( pix_multiply_1x128 ( pix_multiply_1x128 (unpack_32_1x128 (s), unpack_32_1x128 (m)), expand_alpha_1x128 (unpack_32_1x128 (d)))); w--; } while (w >= 4) { xmm_dst_hi = load_128_aligned ((__m128i*)pd); xmm_src_hi = load_128_unaligned ((__m128i*)ps); xmm_mask_hi = load_128_unaligned ((__m128i*)pm); unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi); unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_alpha_lo, &xmm_alpha_hi); pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst_lo, &xmm_dst_hi); pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi, &xmm_alpha_lo, &xmm_alpha_hi, &xmm_dst_lo, &xmm_dst_hi); save_128_aligned ( (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); ps += 4; pd += 4; pm += 4; w -= 4; } while (w) { s = *ps++; m = *pm++; d = *pd; *pd++ = pack_1x128_32 ( pix_multiply_1x128 ( pix_multiply_1x128 ( unpack_32_1x128 (s), unpack_32_1x128 (m)), expand_alpha_1x128 (unpack_32_1x128 (d)))); w--; } } static void sse2_combine_in_reverse_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * pd, const uint32_t * ps, const uint32_t * pm, int w) { uint32_t s, m, d; __m128i xmm_alpha_lo, xmm_alpha_hi; __m128i xmm_src_lo, xmm_src_hi; __m128i xmm_dst_lo, xmm_dst_hi; __m128i xmm_mask_lo, xmm_mask_hi; while (w && (uintptr_t)pd & 15) { s = *ps++; m = *pm++; d = *pd; *pd++ = pack_1x128_32 ( pix_multiply_1x128 ( unpack_32_1x128 (d), pix_multiply_1x128 (unpack_32_1x128 (m), expand_alpha_1x128 (unpack_32_1x128 (s))))); w--; } while (w >= 4) { xmm_dst_hi = load_128_aligned ((__m128i*)pd); xmm_src_hi = load_128_unaligned ((__m128i*)ps); xmm_mask_hi = load_128_unaligned ((__m128i*)pm); unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi); unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi); pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi, &xmm_alpha_lo, &xmm_alpha_hi, &xmm_alpha_lo, &xmm_alpha_hi); pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi, &xmm_alpha_lo, &xmm_alpha_hi, &xmm_dst_lo, &xmm_dst_hi); save_128_aligned ( (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); ps += 4; pd += 4; pm += 4; w -= 4; } while (w) { s = *ps++; m = *pm++; d = *pd; *pd++ = pack_1x128_32 ( pix_multiply_1x128 ( unpack_32_1x128 (d), pix_multiply_1x128 (unpack_32_1x128 (m), expand_alpha_1x128 (unpack_32_1x128 (s))))); w--; } } static void sse2_combine_out_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * pd, const uint32_t * ps, const uint32_t * pm, int w) { uint32_t s, m, d; __m128i xmm_alpha_lo, xmm_alpha_hi; __m128i xmm_src_lo, xmm_src_hi; __m128i xmm_dst_lo, xmm_dst_hi; __m128i xmm_mask_lo, xmm_mask_hi; while (w && (uintptr_t)pd & 15) { s = *ps++; m = *pm++; d = *pd; *pd++ = pack_1x128_32 ( pix_multiply_1x128 ( pix_multiply_1x128 ( unpack_32_1x128 (s), unpack_32_1x128 (m)), negate_1x128 (expand_alpha_1x128 (unpack_32_1x128 (d))))); w--; } while (w >= 4) { xmm_dst_hi = load_128_aligned ((__m128i*)pd); xmm_src_hi = load_128_unaligned ((__m128i*)ps); xmm_mask_hi = load_128_unaligned ((__m128i*)pm); unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi); unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_alpha_lo, &xmm_alpha_hi); negate_2x128 (xmm_alpha_lo, xmm_alpha_hi, &xmm_alpha_lo, &xmm_alpha_hi); pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst_lo, &xmm_dst_hi); pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi, &xmm_alpha_lo, &xmm_alpha_hi, &xmm_dst_lo, &xmm_dst_hi); save_128_aligned ( (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); ps += 4; pd += 4; pm += 4; w -= 4; } while (w) { s = *ps++; m = *pm++; d = *pd; *pd++ = pack_1x128_32 ( pix_multiply_1x128 ( pix_multiply_1x128 ( unpack_32_1x128 (s), unpack_32_1x128 (m)), negate_1x128 (expand_alpha_1x128 (unpack_32_1x128 (d))))); w--; } } static void sse2_combine_out_reverse_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * pd, const uint32_t * ps, const uint32_t * pm, int w) { uint32_t s, m, d; __m128i xmm_alpha_lo, xmm_alpha_hi; __m128i xmm_src_lo, xmm_src_hi; __m128i xmm_dst_lo, xmm_dst_hi; __m128i xmm_mask_lo, xmm_mask_hi; while (w && (uintptr_t)pd & 15) { s = *ps++; m = *pm++; d = *pd; *pd++ = pack_1x128_32 ( pix_multiply_1x128 ( unpack_32_1x128 (d), negate_1x128 (pix_multiply_1x128 ( unpack_32_1x128 (m), expand_alpha_1x128 (unpack_32_1x128 (s)))))); w--; } while (w >= 4) { xmm_dst_hi = load_128_aligned ((__m128i*)pd); xmm_src_hi = load_128_unaligned ((__m128i*)ps); xmm_mask_hi = load_128_unaligned ((__m128i*)pm); unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi); unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi); pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi, &xmm_alpha_lo, &xmm_alpha_hi, &xmm_mask_lo, &xmm_mask_hi); negate_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst_lo, &xmm_dst_hi); save_128_aligned ( (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); ps += 4; pd += 4; pm += 4; w -= 4; } while (w) { s = *ps++; m = *pm++; d = *pd; *pd++ = pack_1x128_32 ( pix_multiply_1x128 ( unpack_32_1x128 (d), negate_1x128 (pix_multiply_1x128 ( unpack_32_1x128 (m), expand_alpha_1x128 (unpack_32_1x128 (s)))))); w--; } } static force_inline uint32_t core_combine_atop_ca_pixel_sse2 (uint32_t src, uint32_t mask, uint32_t dst) { __m128i m = unpack_32_1x128 (mask); __m128i s = unpack_32_1x128 (src); __m128i d = unpack_32_1x128 (dst); __m128i sa = expand_alpha_1x128 (s); __m128i da = expand_alpha_1x128 (d); s = pix_multiply_1x128 (s, m); m = negate_1x128 (pix_multiply_1x128 (m, sa)); return pack_1x128_32 (pix_add_multiply_1x128 (&d, &m, &s, &da)); } static void sse2_combine_atop_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * pd, const uint32_t * ps, const uint32_t * pm, int w) { uint32_t s, m, d; __m128i xmm_src_lo, xmm_src_hi; __m128i xmm_dst_lo, xmm_dst_hi; __m128i xmm_alpha_src_lo, xmm_alpha_src_hi; __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi; __m128i xmm_mask_lo, xmm_mask_hi; while (w && (uintptr_t)pd & 15) { s = *ps++; m = *pm++; d = *pd; *pd++ = core_combine_atop_ca_pixel_sse2 (s, m, d); w--; } while (w >= 4) { xmm_dst_hi = load_128_aligned ((__m128i*)pd); xmm_src_hi = load_128_unaligned ((__m128i*)ps); xmm_mask_hi = load_128_unaligned ((__m128i*)pm); unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi); unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi); expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi); pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_mask_lo, &xmm_mask_hi, &xmm_src_lo, &xmm_src_hi); pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi, &xmm_mask_lo, &xmm_mask_hi); negate_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); pix_add_multiply_2x128 ( &xmm_dst_lo, &xmm_dst_hi, &xmm_mask_lo, &xmm_mask_hi, &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi, &xmm_dst_lo, &xmm_dst_hi); save_128_aligned ( (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); ps += 4; pd += 4; pm += 4; w -= 4; } while (w) { s = *ps++; m = *pm++; d = *pd; *pd++ = core_combine_atop_ca_pixel_sse2 (s, m, d); w--; } } static force_inline uint32_t core_combine_reverse_atop_ca_pixel_sse2 (uint32_t src, uint32_t mask, uint32_t dst) { __m128i m = unpack_32_1x128 (mask); __m128i s = unpack_32_1x128 (src); __m128i d = unpack_32_1x128 (dst); __m128i da = negate_1x128 (expand_alpha_1x128 (d)); __m128i sa = expand_alpha_1x128 (s); s = pix_multiply_1x128 (s, m); m = pix_multiply_1x128 (m, sa); return pack_1x128_32 (pix_add_multiply_1x128 (&d, &m, &s, &da)); } static void sse2_combine_atop_reverse_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * pd, const uint32_t * ps, const uint32_t * pm, int w) { uint32_t s, m, d; __m128i xmm_src_lo, xmm_src_hi; __m128i xmm_dst_lo, xmm_dst_hi; __m128i xmm_alpha_src_lo, xmm_alpha_src_hi; __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi; __m128i xmm_mask_lo, xmm_mask_hi; while (w && (uintptr_t)pd & 15) { s = *ps++; m = *pm++; d = *pd; *pd++ = core_combine_reverse_atop_ca_pixel_sse2 (s, m, d); w--; } while (w >= 4) { xmm_dst_hi = load_128_aligned ((__m128i*)pd); xmm_src_hi = load_128_unaligned ((__m128i*)ps); xmm_mask_hi = load_128_unaligned ((__m128i*)pm); unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi); unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi); expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi); pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_mask_lo, &xmm_mask_hi, &xmm_src_lo, &xmm_src_hi); pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi, &xmm_mask_lo, &xmm_mask_hi); negate_2x128 (xmm_alpha_dst_lo, xmm_alpha_dst_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi); pix_add_multiply_2x128 ( &xmm_dst_lo, &xmm_dst_hi, &xmm_mask_lo, &xmm_mask_hi, &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi, &xmm_dst_lo, &xmm_dst_hi); save_128_aligned ( (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); ps += 4; pd += 4; pm += 4; w -= 4; } while (w) { s = *ps++; m = *pm++; d = *pd; *pd++ = core_combine_reverse_atop_ca_pixel_sse2 (s, m, d); w--; } } static force_inline uint32_t core_combine_xor_ca_pixel_sse2 (uint32_t src, uint32_t mask, uint32_t dst) { __m128i a = unpack_32_1x128 (mask); __m128i s = unpack_32_1x128 (src); __m128i d = unpack_32_1x128 (dst); __m128i alpha_dst = negate_1x128 (pix_multiply_1x128 ( a, expand_alpha_1x128 (s))); __m128i dest = pix_multiply_1x128 (s, a); __m128i alpha_src = negate_1x128 (expand_alpha_1x128 (d)); return pack_1x128_32 (pix_add_multiply_1x128 (&d, &alpha_dst, &dest, &alpha_src)); } static void sse2_combine_xor_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * pd, const uint32_t * ps, const uint32_t * pm, int w) { uint32_t s, m, d; __m128i xmm_src_lo, xmm_src_hi; __m128i xmm_dst_lo, xmm_dst_hi; __m128i xmm_alpha_src_lo, xmm_alpha_src_hi; __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi; __m128i xmm_mask_lo, xmm_mask_hi; while (w && (uintptr_t)pd & 15) { s = *ps++; m = *pm++; d = *pd; *pd++ = core_combine_xor_ca_pixel_sse2 (s, m, d); w--; } while (w >= 4) { xmm_dst_hi = load_128_aligned ((__m128i*)pd); xmm_src_hi = load_128_unaligned ((__m128i*)ps); xmm_mask_hi = load_128_unaligned ((__m128i*)pm); unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi); unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi); expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi); pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_mask_lo, &xmm_mask_hi, &xmm_src_lo, &xmm_src_hi); pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi, &xmm_mask_lo, &xmm_mask_hi); negate_2x128 (xmm_alpha_dst_lo, xmm_alpha_dst_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi); negate_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); pix_add_multiply_2x128 ( &xmm_dst_lo, &xmm_dst_hi, &xmm_mask_lo, &xmm_mask_hi, &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi, &xmm_dst_lo, &xmm_dst_hi); save_128_aligned ( (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); ps += 4; pd += 4; pm += 4; w -= 4; } while (w) { s = *ps++; m = *pm++; d = *pd; *pd++ = core_combine_xor_ca_pixel_sse2 (s, m, d); w--; } } static void sse2_combine_add_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * pd, const uint32_t * ps, const uint32_t * pm, int w) { uint32_t s, m, d; __m128i xmm_src_lo, xmm_src_hi; __m128i xmm_dst_lo, xmm_dst_hi; __m128i xmm_mask_lo, xmm_mask_hi; while (w && (uintptr_t)pd & 15) { s = *ps++; m = *pm++; d = *pd; *pd++ = pack_1x128_32 ( _mm_adds_epu8 (pix_multiply_1x128 (unpack_32_1x128 (s), unpack_32_1x128 (m)), unpack_32_1x128 (d))); w--; } while (w >= 4) { xmm_src_hi = load_128_unaligned ((__m128i*)ps); xmm_mask_hi = load_128_unaligned ((__m128i*)pm); xmm_dst_hi = load_128_aligned ((__m128i*)pd); unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi); unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_mask_lo, &xmm_mask_hi, &xmm_src_lo, &xmm_src_hi); save_128_aligned ( (__m128i*)pd, pack_2x128_128 ( _mm_adds_epu8 (xmm_src_lo, xmm_dst_lo), _mm_adds_epu8 (xmm_src_hi, xmm_dst_hi))); ps += 4; pd += 4; pm += 4; w -= 4; } while (w) { s = *ps++; m = *pm++; d = *pd; *pd++ = pack_1x128_32 ( _mm_adds_epu8 (pix_multiply_1x128 (unpack_32_1x128 (s), unpack_32_1x128 (m)), unpack_32_1x128 (d))); w--; } } static force_inline __m128i create_mask_16_128 (uint16_t mask) { return _mm_set1_epi16 (mask); } /* Work around a code generation bug in Sun Studio 12. */ #if defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590) # define create_mask_2x32_128(mask0, mask1) \ (_mm_set_epi32 ((mask0), (mask1), (mask0), (mask1))) #else static force_inline __m128i create_mask_2x32_128 (uint32_t mask0, uint32_t mask1) { return _mm_set_epi32 (mask0, mask1, mask0, mask1); } #endif static void sse2_composite_over_n_8888 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t src; uint32_t *dst_line, *dst, d; int32_t w; int dst_stride; __m128i xmm_src, xmm_alpha; __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi; src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); if (src == 0) return; PIXMAN_IMAGE_GET_LINE ( dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); xmm_src = expand_pixel_32_1x128 (src); xmm_alpha = expand_alpha_1x128 (xmm_src); while (height--) { dst = dst_line; dst_line += dst_stride; w = width; while (w && (uintptr_t)dst & 15) { d = *dst; *dst++ = pack_1x128_32 (over_1x128 (xmm_src, xmm_alpha, unpack_32_1x128 (d))); w--; } while (w >= 4) { xmm_dst = load_128_aligned ((__m128i*)dst); unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi); over_2x128 (&xmm_src, &xmm_src, &xmm_alpha, &xmm_alpha, &xmm_dst_lo, &xmm_dst_hi); /* rebuid the 4 pixel data and save*/ save_128_aligned ( (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); w -= 4; dst += 4; } while (w) { d = *dst; *dst++ = pack_1x128_32 (over_1x128 (xmm_src, xmm_alpha, unpack_32_1x128 (d))); w--; } } } static void sse2_composite_over_n_0565 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t src; uint16_t *dst_line, *dst, d; int32_t w; int dst_stride; __m128i xmm_src, xmm_alpha; __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3; src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); if (src == 0) return; PIXMAN_IMAGE_GET_LINE ( dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1); xmm_src = expand_pixel_32_1x128 (src); xmm_alpha = expand_alpha_1x128 (xmm_src); while (height--) { dst = dst_line; dst_line += dst_stride; w = width; while (w && (uintptr_t)dst & 15) { d = *dst; *dst++ = pack_565_32_16 ( pack_1x128_32 (over_1x128 (xmm_src, xmm_alpha, expand565_16_1x128 (d)))); w--; } while (w >= 8) { xmm_dst = load_128_aligned ((__m128i*)dst); unpack_565_128_4x128 (xmm_dst, &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3); over_2x128 (&xmm_src, &xmm_src, &xmm_alpha, &xmm_alpha, &xmm_dst0, &xmm_dst1); over_2x128 (&xmm_src, &xmm_src, &xmm_alpha, &xmm_alpha, &xmm_dst2, &xmm_dst3); xmm_dst = pack_565_4x128_128 ( &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3); save_128_aligned ((__m128i*)dst, xmm_dst); dst += 8; w -= 8; } while (w--) { d = *dst; *dst++ = pack_565_32_16 ( pack_1x128_32 (over_1x128 (xmm_src, xmm_alpha, expand565_16_1x128 (d)))); } } } static void sse2_composite_add_n_8888_8888_ca (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t src; uint32_t *dst_line, d; uint32_t *mask_line, m; uint32_t pack_cmp; int dst_stride, mask_stride; __m128i xmm_src; __m128i xmm_dst; __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi; __m128i mmx_src, mmx_mask, mmx_dest; src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); if (src == 0) return; PIXMAN_IMAGE_GET_LINE ( dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE ( mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1); xmm_src = _mm_unpacklo_epi8 ( create_mask_2x32_128 (src, src), _mm_setzero_si128 ()); mmx_src = xmm_src; while (height--) { int w = width; const uint32_t *pm = (uint32_t *)mask_line; uint32_t *pd = (uint32_t *)dst_line; dst_line += dst_stride; mask_line += mask_stride; while (w && (uintptr_t)pd & 15) { m = *pm++; if (m) { d = *pd; mmx_mask = unpack_32_1x128 (m); mmx_dest = unpack_32_1x128 (d); *pd = pack_1x128_32 ( _mm_adds_epu8 (pix_multiply_1x128 (mmx_mask, mmx_src), mmx_dest)); } pd++; w--; } while (w >= 4) { xmm_mask = load_128_unaligned ((__m128i*)pm); pack_cmp = _mm_movemask_epi8 ( _mm_cmpeq_epi32 (xmm_mask, _mm_setzero_si128 ())); /* if all bits in mask are zero, pack_cmp are equal to 0xffff */ if (pack_cmp != 0xffff) { xmm_dst = load_128_aligned ((__m128i*)pd); unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi); pix_multiply_2x128 (&xmm_src, &xmm_src, &xmm_mask_lo, &xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); xmm_mask_hi = pack_2x128_128 (xmm_mask_lo, xmm_mask_hi); save_128_aligned ( (__m128i*)pd, _mm_adds_epu8 (xmm_mask_hi, xmm_dst)); } pd += 4; pm += 4; w -= 4; } while (w) { m = *pm++; if (m) { d = *pd; mmx_mask = unpack_32_1x128 (m); mmx_dest = unpack_32_1x128 (d); *pd = pack_1x128_32 ( _mm_adds_epu8 (pix_multiply_1x128 (mmx_mask, mmx_src), mmx_dest)); } pd++; w--; } } } static void sse2_composite_over_n_8888_8888_ca (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t src; uint32_t *dst_line, d; uint32_t *mask_line, m; uint32_t pack_cmp; int dst_stride, mask_stride; __m128i xmm_src, xmm_alpha; __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi; __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi; __m128i mmx_src, mmx_alpha, mmx_mask, mmx_dest; src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); if (src == 0) return; PIXMAN_IMAGE_GET_LINE ( dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE ( mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1); xmm_src = _mm_unpacklo_epi8 ( create_mask_2x32_128 (src, src), _mm_setzero_si128 ()); xmm_alpha = expand_alpha_1x128 (xmm_src); mmx_src = xmm_src; mmx_alpha = xmm_alpha; while (height--) { int w = width; const uint32_t *pm = (uint32_t *)mask_line; uint32_t *pd = (uint32_t *)dst_line; dst_line += dst_stride; mask_line += mask_stride; while (w && (uintptr_t)pd & 15) { m = *pm++; if (m) { d = *pd; mmx_mask = unpack_32_1x128 (m); mmx_dest = unpack_32_1x128 (d); *pd = pack_1x128_32 (in_over_1x128 (&mmx_src, &mmx_alpha, &mmx_mask, &mmx_dest)); } pd++; w--; } while (w >= 4) { xmm_mask = load_128_unaligned ((__m128i*)pm); pack_cmp = _mm_movemask_epi8 ( _mm_cmpeq_epi32 (xmm_mask, _mm_setzero_si128 ())); /* if all bits in mask are zero, pack_cmp are equal to 0xffff */ if (pack_cmp != 0xffff) { xmm_dst = load_128_aligned ((__m128i*)pd); unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi); unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi); in_over_2x128 (&xmm_src, &xmm_src, &xmm_alpha, &xmm_alpha, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst_lo, &xmm_dst_hi); save_128_aligned ( (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); } pd += 4; pm += 4; w -= 4; } while (w) { m = *pm++; if (m) { d = *pd; mmx_mask = unpack_32_1x128 (m); mmx_dest = unpack_32_1x128 (d); *pd = pack_1x128_32 ( in_over_1x128 (&mmx_src, &mmx_alpha, &mmx_mask, &mmx_dest)); } pd++; w--; } } } static void sse2_composite_over_8888_n_8888 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t *dst_line, *dst; uint32_t *src_line, *src; uint32_t mask; int32_t w; int dst_stride, src_stride; __m128i xmm_mask; __m128i xmm_src, xmm_src_lo, xmm_src_hi; __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi; __m128i xmm_alpha_lo, xmm_alpha_hi; PIXMAN_IMAGE_GET_LINE ( dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE ( src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); mask = _pixman_image_get_solid (imp, mask_image, PIXMAN_a8r8g8b8); xmm_mask = create_mask_16_128 (mask >> 24); while (height--) { dst = dst_line; dst_line += dst_stride; src = src_line; src_line += src_stride; w = width; while (w && (uintptr_t)dst & 15) { uint32_t s = *src++; if (s) { uint32_t d = *dst; __m128i ms = unpack_32_1x128 (s); __m128i alpha = expand_alpha_1x128 (ms); __m128i dest = xmm_mask; __m128i alpha_dst = unpack_32_1x128 (d); *dst = pack_1x128_32 ( in_over_1x128 (&ms, &alpha, &dest, &alpha_dst)); } dst++; w--; } while (w >= 4) { xmm_src = load_128_unaligned ((__m128i*)src); if (!is_zero (xmm_src)) { xmm_dst = load_128_aligned ((__m128i*)dst); unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi); unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi); expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi); in_over_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi, &xmm_mask, &xmm_mask, &xmm_dst_lo, &xmm_dst_hi); save_128_aligned ( (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); } dst += 4; src += 4; w -= 4; } while (w) { uint32_t s = *src++; if (s) { uint32_t d = *dst; __m128i ms = unpack_32_1x128 (s); __m128i alpha = expand_alpha_1x128 (ms); __m128i mask = xmm_mask; __m128i dest = unpack_32_1x128 (d); *dst = pack_1x128_32 ( in_over_1x128 (&ms, &alpha, &mask, &dest)); } dst++; w--; } } } static void sse2_composite_src_x888_0565 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint16_t *dst_line, *dst; uint32_t *src_line, *src, s; int dst_stride, src_stride; int32_t w; PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1); while (height--) { dst = dst_line; dst_line += dst_stride; src = src_line; src_line += src_stride; w = width; while (w && (uintptr_t)dst & 15) { s = *src++; *dst = convert_8888_to_0565 (s); dst++; w--; } while (w >= 8) { __m128i xmm_src0 = load_128_unaligned ((__m128i *)src + 0); __m128i xmm_src1 = load_128_unaligned ((__m128i *)src + 1); save_128_aligned ((__m128i*)dst, pack_565_2packedx128_128 (xmm_src0, xmm_src1)); w -= 8; src += 8; dst += 8; } while (w) { s = *src++; *dst = convert_8888_to_0565 (s); dst++; w--; } } } static void sse2_composite_src_x888_8888 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t *dst_line, *dst; uint32_t *src_line, *src; int32_t w; int dst_stride, src_stride; PIXMAN_IMAGE_GET_LINE ( dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE ( src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); while (height--) { dst = dst_line; dst_line += dst_stride; src = src_line; src_line += src_stride; w = width; while (w && (uintptr_t)dst & 15) { *dst++ = *src++ | 0xff000000; w--; } while (w >= 16) { __m128i xmm_src1, xmm_src2, xmm_src3, xmm_src4; xmm_src1 = load_128_unaligned ((__m128i*)src + 0); xmm_src2 = load_128_unaligned ((__m128i*)src + 1); xmm_src3 = load_128_unaligned ((__m128i*)src + 2); xmm_src4 = load_128_unaligned ((__m128i*)src + 3); save_128_aligned ((__m128i*)dst + 0, _mm_or_si128 (xmm_src1, mask_ff000000)); save_128_aligned ((__m128i*)dst + 1, _mm_or_si128 (xmm_src2, mask_ff000000)); save_128_aligned ((__m128i*)dst + 2, _mm_or_si128 (xmm_src3, mask_ff000000)); save_128_aligned ((__m128i*)dst + 3, _mm_or_si128 (xmm_src4, mask_ff000000)); dst += 16; src += 16; w -= 16; } while (w) { *dst++ = *src++ | 0xff000000; w--; } } } static void sse2_composite_over_x888_n_8888 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t *dst_line, *dst; uint32_t *src_line, *src; uint32_t mask; int dst_stride, src_stride; int32_t w; __m128i xmm_mask, xmm_alpha; __m128i xmm_src, xmm_src_lo, xmm_src_hi; __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi; PIXMAN_IMAGE_GET_LINE ( dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE ( src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); mask = _pixman_image_get_solid (imp, mask_image, PIXMAN_a8r8g8b8); xmm_mask = create_mask_16_128 (mask >> 24); xmm_alpha = mask_00ff; while (height--) { dst = dst_line; dst_line += dst_stride; src = src_line; src_line += src_stride; w = width; while (w && (uintptr_t)dst & 15) { uint32_t s = (*src++) | 0xff000000; uint32_t d = *dst; __m128i src = unpack_32_1x128 (s); __m128i alpha = xmm_alpha; __m128i mask = xmm_mask; __m128i dest = unpack_32_1x128 (d); *dst++ = pack_1x128_32 ( in_over_1x128 (&src, &alpha, &mask, &dest)); w--; } while (w >= 4) { xmm_src = _mm_or_si128 ( load_128_unaligned ((__m128i*)src), mask_ff000000); xmm_dst = load_128_aligned ((__m128i*)dst); unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi); unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi); in_over_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_alpha, &xmm_alpha, &xmm_mask, &xmm_mask, &xmm_dst_lo, &xmm_dst_hi); save_128_aligned ( (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); dst += 4; src += 4; w -= 4; } while (w) { uint32_t s = (*src++) | 0xff000000; uint32_t d = *dst; __m128i src = unpack_32_1x128 (s); __m128i alpha = xmm_alpha; __m128i mask = xmm_mask; __m128i dest = unpack_32_1x128 (d); *dst++ = pack_1x128_32 ( in_over_1x128 (&src, &alpha, &mask, &dest)); w--; } } } static void sse2_composite_over_8888_8888 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); int dst_stride, src_stride; uint32_t *dst_line, *dst; uint32_t *src_line, *src; PIXMAN_IMAGE_GET_LINE ( dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE ( src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); dst = dst_line; src = src_line; while (height--) { sse2_combine_over_u (imp, op, dst, src, NULL, width); dst += dst_stride; src += src_stride; } } static force_inline uint16_t composite_over_8888_0565pixel (uint32_t src, uint16_t dst) { __m128i ms; ms = unpack_32_1x128 (src); return pack_565_32_16 ( pack_1x128_32 ( over_1x128 ( ms, expand_alpha_1x128 (ms), expand565_16_1x128 (dst)))); } static void sse2_composite_over_8888_0565 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint16_t *dst_line, *dst, d; uint32_t *src_line, *src, s; int dst_stride, src_stride; int32_t w; __m128i xmm_alpha_lo, xmm_alpha_hi; __m128i xmm_src, xmm_src_lo, xmm_src_hi; __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3; PIXMAN_IMAGE_GET_LINE ( dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE ( src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); while (height--) { dst = dst_line; src = src_line; dst_line += dst_stride; src_line += src_stride; w = width; /* Align dst on a 16-byte boundary */ while (w && ((uintptr_t)dst & 15)) { s = *src++; d = *dst; *dst++ = composite_over_8888_0565pixel (s, d); w--; } /* It's a 8 pixel loop */ while (w >= 8) { /* I'm loading unaligned because I'm not sure * about the address alignment. */ xmm_src = load_128_unaligned ((__m128i*) src); xmm_dst = load_128_aligned ((__m128i*) dst); /* Unpacking */ unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi); unpack_565_128_4x128 (xmm_dst, &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3); expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi); /* I'm loading next 4 pixels from memory * before to optimze the memory read. */ xmm_src = load_128_unaligned ((__m128i*) (src + 4)); over_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi, &xmm_dst0, &xmm_dst1); /* Unpacking */ unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi); expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi); over_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi, &xmm_dst2, &xmm_dst3); save_128_aligned ( (__m128i*)dst, pack_565_4x128_128 ( &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3)); w -= 8; dst += 8; src += 8; } while (w--) { s = *src++; d = *dst; *dst++ = composite_over_8888_0565pixel (s, d); } } } static void sse2_composite_over_n_8_8888 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t src, srca; uint32_t *dst_line, *dst; uint8_t *mask_line, *mask; int dst_stride, mask_stride; int32_t w; uint32_t d; __m128i xmm_src, xmm_alpha, xmm_def; __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi; __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi; __m128i mmx_src, mmx_alpha, mmx_mask, mmx_dest; src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); srca = src >> 24; if (src == 0) return; PIXMAN_IMAGE_GET_LINE ( dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE ( mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1); xmm_def = create_mask_2x32_128 (src, src); xmm_src = expand_pixel_32_1x128 (src); xmm_alpha = expand_alpha_1x128 (xmm_src); mmx_src = xmm_src; mmx_alpha = xmm_alpha; while (height--) { dst = dst_line; dst_line += dst_stride; mask = mask_line; mask_line += mask_stride; w = width; while (w && (uintptr_t)dst & 15) { uint8_t m = *mask++; if (m) { d = *dst; mmx_mask = expand_pixel_8_1x128 (m); mmx_dest = unpack_32_1x128 (d); *dst = pack_1x128_32 (in_over_1x128 (&mmx_src, &mmx_alpha, &mmx_mask, &mmx_dest)); } w--; dst++; } while (w >= 4) { uint32_t m; memcpy(&m, mask, sizeof(uint32_t)); if (srca == 0xff && m == 0xffffffff) { save_128_aligned ((__m128i*)dst, xmm_def); } else if (m) { xmm_dst = load_128_aligned ((__m128i*) dst); xmm_mask = unpack_32_1x128 (m); xmm_mask = _mm_unpacklo_epi8 (xmm_mask, _mm_setzero_si128 ()); /* Unpacking */ unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi); unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi); expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); in_over_2x128 (&xmm_src, &xmm_src, &xmm_alpha, &xmm_alpha, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst_lo, &xmm_dst_hi); save_128_aligned ( (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); } w -= 4; dst += 4; mask += 4; } while (w) { uint8_t m = *mask++; if (m) { d = *dst; mmx_mask = expand_pixel_8_1x128 (m); mmx_dest = unpack_32_1x128 (d); *dst = pack_1x128_32 (in_over_1x128 (&mmx_src, &mmx_alpha, &mmx_mask, &mmx_dest)); } w--; dst++; } } } #if defined(__GNUC__) && !defined(__x86_64__) && !defined(__amd64__) __attribute__((__force_align_arg_pointer__)) #endif static pixman_bool_t sse2_fill (pixman_implementation_t *imp, uint32_t * bits, int stride, int bpp, int x, int y, int width, int height, uint32_t filler) { uint32_t byte_width; uint8_t *byte_line; __m128i xmm_def; if (bpp == 8) { uint32_t b; uint32_t w; stride = stride * (int) sizeof (uint32_t) / 1; byte_line = (uint8_t *)(((uint8_t *)bits) + stride * y + x); byte_width = width; stride *= 1; b = filler & 0xff; w = (b << 8) | b; filler = (w << 16) | w; } else if (bpp == 16) { stride = stride * (int) sizeof (uint32_t) / 2; byte_line = (uint8_t *)(((uint16_t *)bits) + stride * y + x); byte_width = 2 * width; stride *= 2; filler = (filler & 0xffff) * 0x00010001; } else if (bpp == 32) { stride = stride * (int) sizeof (uint32_t) / 4; byte_line = (uint8_t *)(((uint32_t *)bits) + stride * y + x); byte_width = 4 * width; stride *= 4; } else { return FALSE; } xmm_def = create_mask_2x32_128 (filler, filler); while (height--) { int w; uint8_t *d = byte_line; byte_line += stride; w = byte_width; if (w >= 1 && ((uintptr_t)d & 1)) { *(uint8_t *)d = filler; w -= 1; d += 1; } while (w >= 2 && ((uintptr_t)d & 3)) { *(uint16_t *)d = filler; w -= 2; d += 2; } while (w >= 4 && ((uintptr_t)d & 15)) { *(uint32_t *)d = filler; w -= 4; d += 4; } while (w >= 128) { save_128_aligned ((__m128i*)(d), xmm_def); save_128_aligned ((__m128i*)(d + 16), xmm_def); save_128_aligned ((__m128i*)(d + 32), xmm_def); save_128_aligned ((__m128i*)(d + 48), xmm_def); save_128_aligned ((__m128i*)(d + 64), xmm_def); save_128_aligned ((__m128i*)(d + 80), xmm_def); save_128_aligned ((__m128i*)(d + 96), xmm_def); save_128_aligned ((__m128i*)(d + 112), xmm_def); d += 128; w -= 128; } if (w >= 64) { save_128_aligned ((__m128i*)(d), xmm_def); save_128_aligned ((__m128i*)(d + 16), xmm_def); save_128_aligned ((__m128i*)(d + 32), xmm_def); save_128_aligned ((__m128i*)(d + 48), xmm_def); d += 64; w -= 64; } if (w >= 32) { save_128_aligned ((__m128i*)(d), xmm_def); save_128_aligned ((__m128i*)(d + 16), xmm_def); d += 32; w -= 32; } if (w >= 16) { save_128_aligned ((__m128i*)(d), xmm_def); d += 16; w -= 16; } while (w >= 4) { *(uint32_t *)d = filler; w -= 4; d += 4; } if (w >= 2) { *(uint16_t *)d = filler; w -= 2; d += 2; } if (w >= 1) { *(uint8_t *)d = filler; w -= 1; d += 1; } } return TRUE; } static void sse2_composite_src_n_8_8888 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t src, srca; uint32_t *dst_line, *dst; uint8_t *mask_line, *mask; int dst_stride, mask_stride; int32_t w; __m128i xmm_src, xmm_def; __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi; src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); srca = src >> 24; if (src == 0) { sse2_fill (imp, dest_image->bits.bits, dest_image->bits.rowstride, PIXMAN_FORMAT_BPP (dest_image->bits.format), dest_x, dest_y, width, height, 0); return; } PIXMAN_IMAGE_GET_LINE ( dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE ( mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1); xmm_def = create_mask_2x32_128 (src, src); xmm_src = expand_pixel_32_1x128 (src); while (height--) { dst = dst_line; dst_line += dst_stride; mask = mask_line; mask_line += mask_stride; w = width; while (w && (uintptr_t)dst & 15) { uint8_t m = *mask++; if (m) { *dst = pack_1x128_32 ( pix_multiply_1x128 (xmm_src, expand_pixel_8_1x128 (m))); } else { *dst = 0; } w--; dst++; } while (w >= 4) { uint32_t m; memcpy(&m, mask, sizeof(uint32_t)); if (srca == 0xff && m == 0xffffffff) { save_128_aligned ((__m128i*)dst, xmm_def); } else if (m) { xmm_mask = unpack_32_1x128 (m); xmm_mask = _mm_unpacklo_epi8 (xmm_mask, _mm_setzero_si128 ()); /* Unpacking */ unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi); expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); pix_multiply_2x128 (&xmm_src, &xmm_src, &xmm_mask_lo, &xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); save_128_aligned ( (__m128i*)dst, pack_2x128_128 (xmm_mask_lo, xmm_mask_hi)); } else { save_128_aligned ((__m128i*)dst, _mm_setzero_si128 ()); } w -= 4; dst += 4; mask += 4; } while (w) { uint8_t m = *mask++; if (m) { *dst = pack_1x128_32 ( pix_multiply_1x128 ( xmm_src, expand_pixel_8_1x128 (m))); } else { *dst = 0; } w--; dst++; } } } static void sse2_composite_over_n_8_0565 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t src; uint16_t *dst_line, *dst, d; uint8_t *mask_line, *mask; int dst_stride, mask_stride; int32_t w; __m128i mmx_src, mmx_alpha, mmx_mask, mmx_dest; __m128i xmm_src, xmm_alpha; __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi; __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3; src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); if (src == 0) return; PIXMAN_IMAGE_GET_LINE ( dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE ( mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1); xmm_src = expand_pixel_32_1x128 (src); xmm_alpha = expand_alpha_1x128 (xmm_src); mmx_src = xmm_src; mmx_alpha = xmm_alpha; while (height--) { dst = dst_line; dst_line += dst_stride; mask = mask_line; mask_line += mask_stride; w = width; while (w && (uintptr_t)dst & 15) { uint8_t m = *mask++; if (m) { d = *dst; mmx_mask = expand_alpha_rev_1x128 (unpack_32_1x128 (m)); mmx_dest = expand565_16_1x128 (d); *dst = pack_565_32_16 ( pack_1x128_32 ( in_over_1x128 ( &mmx_src, &mmx_alpha, &mmx_mask, &mmx_dest))); } w--; dst++; } while (w >= 8) { uint32_t m; xmm_dst = load_128_aligned ((__m128i*) dst); unpack_565_128_4x128 (xmm_dst, &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3); memcpy(&m, mask, sizeof(uint32_t)); mask += 4; if (m) { xmm_mask = unpack_32_1x128 (m); xmm_mask = _mm_unpacklo_epi8 (xmm_mask, _mm_setzero_si128 ()); /* Unpacking */ unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi); expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); in_over_2x128 (&xmm_src, &xmm_src, &xmm_alpha, &xmm_alpha, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst0, &xmm_dst1); } memcpy(&m, mask, sizeof(uint32_t)); mask += 4; if (m) { xmm_mask = unpack_32_1x128 (m); xmm_mask = _mm_unpacklo_epi8 (xmm_mask, _mm_setzero_si128 ()); /* Unpacking */ unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi); expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); in_over_2x128 (&xmm_src, &xmm_src, &xmm_alpha, &xmm_alpha, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst2, &xmm_dst3); } save_128_aligned ( (__m128i*)dst, pack_565_4x128_128 ( &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3)); w -= 8; dst += 8; } while (w) { uint8_t m = *mask++; if (m) { d = *dst; mmx_mask = expand_alpha_rev_1x128 (unpack_32_1x128 (m)); mmx_dest = expand565_16_1x128 (d); *dst = pack_565_32_16 ( pack_1x128_32 ( in_over_1x128 ( &mmx_src, &mmx_alpha, &mmx_mask, &mmx_dest))); } w--; dst++; } } } static void sse2_composite_over_pixbuf_0565 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint16_t *dst_line, *dst, d; uint32_t *src_line, *src, s; int dst_stride, src_stride; int32_t w; uint32_t opaque, zero; __m128i ms; __m128i xmm_src, xmm_src_lo, xmm_src_hi; __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3; PIXMAN_IMAGE_GET_LINE ( dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE ( src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); while (height--) { dst = dst_line; dst_line += dst_stride; src = src_line; src_line += src_stride; w = width; while (w && (uintptr_t)dst & 15) { s = *src++; d = *dst; ms = unpack_32_1x128 (s); *dst++ = pack_565_32_16 ( pack_1x128_32 ( over_rev_non_pre_1x128 (ms, expand565_16_1x128 (d)))); w--; } while (w >= 8) { /* First round */ xmm_src = load_128_unaligned ((__m128i*)src); xmm_dst = load_128_aligned ((__m128i*)dst); opaque = is_opaque (xmm_src); zero = is_zero (xmm_src); unpack_565_128_4x128 (xmm_dst, &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3); unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi); /* preload next round*/ xmm_src = load_128_unaligned ((__m128i*)(src + 4)); if (opaque) { invert_colors_2x128 (xmm_src_lo, xmm_src_hi, &xmm_dst0, &xmm_dst1); } else if (!zero) { over_rev_non_pre_2x128 (xmm_src_lo, xmm_src_hi, &xmm_dst0, &xmm_dst1); } /* Second round */ opaque = is_opaque (xmm_src); zero = is_zero (xmm_src); unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi); if (opaque) { invert_colors_2x128 (xmm_src_lo, xmm_src_hi, &xmm_dst2, &xmm_dst3); } else if (!zero) { over_rev_non_pre_2x128 (xmm_src_lo, xmm_src_hi, &xmm_dst2, &xmm_dst3); } save_128_aligned ( (__m128i*)dst, pack_565_4x128_128 ( &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3)); w -= 8; src += 8; dst += 8; } while (w) { s = *src++; d = *dst; ms = unpack_32_1x128 (s); *dst++ = pack_565_32_16 ( pack_1x128_32 ( over_rev_non_pre_1x128 (ms, expand565_16_1x128 (d)))); w--; } } } static void sse2_composite_over_pixbuf_8888 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t *dst_line, *dst, d; uint32_t *src_line, *src, s; int dst_stride, src_stride; int32_t w; uint32_t opaque, zero; __m128i xmm_src_lo, xmm_src_hi; __m128i xmm_dst_lo, xmm_dst_hi; PIXMAN_IMAGE_GET_LINE ( dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE ( src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); while (height--) { dst = dst_line; dst_line += dst_stride; src = src_line; src_line += src_stride; w = width; while (w && (uintptr_t)dst & 15) { s = *src++; d = *dst; *dst++ = pack_1x128_32 ( over_rev_non_pre_1x128 ( unpack_32_1x128 (s), unpack_32_1x128 (d))); w--; } while (w >= 4) { xmm_src_hi = load_128_unaligned ((__m128i*)src); opaque = is_opaque (xmm_src_hi); zero = is_zero (xmm_src_hi); unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi); if (opaque) { invert_colors_2x128 (xmm_src_lo, xmm_src_hi, &xmm_dst_lo, &xmm_dst_hi); save_128_aligned ( (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); } else if (!zero) { xmm_dst_hi = load_128_aligned ((__m128i*)dst); unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); over_rev_non_pre_2x128 (xmm_src_lo, xmm_src_hi, &xmm_dst_lo, &xmm_dst_hi); save_128_aligned ( (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); } w -= 4; dst += 4; src += 4; } while (w) { s = *src++; d = *dst; *dst++ = pack_1x128_32 ( over_rev_non_pre_1x128 ( unpack_32_1x128 (s), unpack_32_1x128 (d))); w--; } } } static void sse2_composite_over_n_8888_0565_ca (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t src; uint16_t *dst_line, *dst, d; uint32_t *mask_line, *mask, m; int dst_stride, mask_stride; int w; uint32_t pack_cmp; __m128i xmm_src, xmm_alpha; __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi; __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3; __m128i mmx_src, mmx_alpha, mmx_mask, mmx_dest; src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); if (src == 0) return; PIXMAN_IMAGE_GET_LINE ( dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE ( mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1); xmm_src = expand_pixel_32_1x128 (src); xmm_alpha = expand_alpha_1x128 (xmm_src); mmx_src = xmm_src; mmx_alpha = xmm_alpha; while (height--) { w = width; mask = mask_line; dst = dst_line; mask_line += mask_stride; dst_line += dst_stride; while (w && ((uintptr_t)dst & 15)) { m = *(uint32_t *) mask; if (m) { d = *dst; mmx_mask = unpack_32_1x128 (m); mmx_dest = expand565_16_1x128 (d); *dst = pack_565_32_16 ( pack_1x128_32 ( in_over_1x128 ( &mmx_src, &mmx_alpha, &mmx_mask, &mmx_dest))); } w--; dst++; mask++; } while (w >= 8) { /* First round */ xmm_mask = load_128_unaligned ((__m128i*)mask); xmm_dst = load_128_aligned ((__m128i*)dst); pack_cmp = _mm_movemask_epi8 ( _mm_cmpeq_epi32 (xmm_mask, _mm_setzero_si128 ())); unpack_565_128_4x128 (xmm_dst, &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3); unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi); /* preload next round */ xmm_mask = load_128_unaligned ((__m128i*)(mask + 4)); /* preload next round */ if (pack_cmp != 0xffff) { in_over_2x128 (&xmm_src, &xmm_src, &xmm_alpha, &xmm_alpha, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst0, &xmm_dst1); } /* Second round */ pack_cmp = _mm_movemask_epi8 ( _mm_cmpeq_epi32 (xmm_mask, _mm_setzero_si128 ())); unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi); if (pack_cmp != 0xffff) { in_over_2x128 (&xmm_src, &xmm_src, &xmm_alpha, &xmm_alpha, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst2, &xmm_dst3); } save_128_aligned ( (__m128i*)dst, pack_565_4x128_128 ( &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3)); w -= 8; dst += 8; mask += 8; } while (w) { m = *(uint32_t *) mask; if (m) { d = *dst; mmx_mask = unpack_32_1x128 (m); mmx_dest = expand565_16_1x128 (d); *dst = pack_565_32_16 ( pack_1x128_32 ( in_over_1x128 ( &mmx_src, &mmx_alpha, &mmx_mask, &mmx_dest))); } w--; dst++; mask++; } } } static void sse2_composite_in_n_8_8 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint8_t *dst_line, *dst; uint8_t *mask_line, *mask; int dst_stride, mask_stride; uint32_t d; uint32_t src; int32_t w; __m128i xmm_alpha; __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi; __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi; PIXMAN_IMAGE_GET_LINE ( dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE ( mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1); src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); xmm_alpha = expand_alpha_1x128 (expand_pixel_32_1x128 (src)); while (height--) { dst = dst_line; dst_line += dst_stride; mask = mask_line; mask_line += mask_stride; w = width; while (w && ((uintptr_t)dst & 15)) { uint8_t m = *mask++; d = (uint32_t) *dst; *dst++ = (uint8_t) pack_1x128_32 ( pix_multiply_1x128 ( pix_multiply_1x128 (xmm_alpha, unpack_32_1x128 (m)), unpack_32_1x128 (d))); w--; } while (w >= 16) { xmm_mask = load_128_unaligned ((__m128i*)mask); xmm_dst = load_128_aligned ((__m128i*)dst); unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi); unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi); pix_multiply_2x128 (&xmm_alpha, &xmm_alpha, &xmm_mask_lo, &xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi, &xmm_dst_lo, &xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); save_128_aligned ( (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); mask += 16; dst += 16; w -= 16; } while (w) { uint8_t m = *mask++; d = (uint32_t) *dst; *dst++ = (uint8_t) pack_1x128_32 ( pix_multiply_1x128 ( pix_multiply_1x128 ( xmm_alpha, unpack_32_1x128 (m)), unpack_32_1x128 (d))); w--; } } } static void sse2_composite_in_n_8 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint8_t *dst_line, *dst; int dst_stride; uint32_t d; uint32_t src; int32_t w; __m128i xmm_alpha; __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi; PIXMAN_IMAGE_GET_LINE ( dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1); src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); xmm_alpha = expand_alpha_1x128 (expand_pixel_32_1x128 (src)); src = src >> 24; if (src == 0xff) return; if (src == 0x00) { pixman_fill (dest_image->bits.bits, dest_image->bits.rowstride, 8, dest_x, dest_y, width, height, src); return; } while (height--) { dst = dst_line; dst_line += dst_stride; w = width; while (w && ((uintptr_t)dst & 15)) { d = (uint32_t) *dst; *dst++ = (uint8_t) pack_1x128_32 ( pix_multiply_1x128 ( xmm_alpha, unpack_32_1x128 (d))); w--; } while (w >= 16) { xmm_dst = load_128_aligned ((__m128i*)dst); unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi); pix_multiply_2x128 (&xmm_alpha, &xmm_alpha, &xmm_dst_lo, &xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); save_128_aligned ( (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); dst += 16; w -= 16; } while (w) { d = (uint32_t) *dst; *dst++ = (uint8_t) pack_1x128_32 ( pix_multiply_1x128 ( xmm_alpha, unpack_32_1x128 (d))); w--; } } } static void sse2_composite_in_8_8 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint8_t *dst_line, *dst; uint8_t *src_line, *src; int src_stride, dst_stride; int32_t w; uint32_t s, d; __m128i xmm_src, xmm_src_lo, xmm_src_hi; __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi; PIXMAN_IMAGE_GET_LINE ( dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE ( src_image, src_x, src_y, uint8_t, src_stride, src_line, 1); while (height--) { dst = dst_line; dst_line += dst_stride; src = src_line; src_line += src_stride; w = width; while (w && ((uintptr_t)dst & 15)) { s = (uint32_t) *src++; d = (uint32_t) *dst; *dst++ = (uint8_t) pack_1x128_32 ( pix_multiply_1x128 ( unpack_32_1x128 (s), unpack_32_1x128 (d))); w--; } while (w >= 16) { xmm_src = load_128_unaligned ((__m128i*)src); xmm_dst = load_128_aligned ((__m128i*)dst); unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi); unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi); pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_dst_lo, &xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); save_128_aligned ( (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); src += 16; dst += 16; w -= 16; } while (w) { s = (uint32_t) *src++; d = (uint32_t) *dst; *dst++ = (uint8_t) pack_1x128_32 ( pix_multiply_1x128 (unpack_32_1x128 (s), unpack_32_1x128 (d))); w--; } } } static void sse2_composite_add_n_8_8 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint8_t *dst_line, *dst; uint8_t *mask_line, *mask; int dst_stride, mask_stride; int32_t w; uint32_t src; uint32_t d; __m128i xmm_alpha; __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi; __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi; PIXMAN_IMAGE_GET_LINE ( dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE ( mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1); src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); xmm_alpha = expand_alpha_1x128 (expand_pixel_32_1x128 (src)); while (height--) { dst = dst_line; dst_line += dst_stride; mask = mask_line; mask_line += mask_stride; w = width; while (w && ((uintptr_t)dst & 15)) { uint8_t m = *mask++; d = (uint32_t) *dst; *dst++ = (uint8_t) pack_1x128_32 ( _mm_adds_epu16 ( pix_multiply_1x128 ( xmm_alpha, unpack_32_1x128 (m)), unpack_32_1x128 (d))); w--; } while (w >= 16) { xmm_mask = load_128_unaligned ((__m128i*)mask); xmm_dst = load_128_aligned ((__m128i*)dst); unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi); unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi); pix_multiply_2x128 (&xmm_alpha, &xmm_alpha, &xmm_mask_lo, &xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); xmm_dst_lo = _mm_adds_epu16 (xmm_mask_lo, xmm_dst_lo); xmm_dst_hi = _mm_adds_epu16 (xmm_mask_hi, xmm_dst_hi); save_128_aligned ( (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); mask += 16; dst += 16; w -= 16; } while (w) { uint8_t m = (uint32_t) *mask++; d = (uint32_t) *dst; *dst++ = (uint8_t) pack_1x128_32 ( _mm_adds_epu16 ( pix_multiply_1x128 ( xmm_alpha, unpack_32_1x128 (m)), unpack_32_1x128 (d))); w--; } } } static void sse2_composite_add_n_8 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint8_t *dst_line, *dst; int dst_stride; int32_t w; uint32_t src; __m128i xmm_src; PIXMAN_IMAGE_GET_LINE ( dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1); src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); src >>= 24; if (src == 0x00) return; if (src == 0xff) { pixman_fill (dest_image->bits.bits, dest_image->bits.rowstride, 8, dest_x, dest_y, width, height, 0xff); return; } src = (src << 24) | (src << 16) | (src << 8) | src; xmm_src = _mm_set_epi32 (src, src, src, src); while (height--) { dst = dst_line; dst_line += dst_stride; w = width; while (w && ((uintptr_t)dst & 15)) { *dst = (uint8_t)_mm_cvtsi128_si32 ( _mm_adds_epu8 ( xmm_src, _mm_cvtsi32_si128 (*dst))); w--; dst++; } while (w >= 16) { save_128_aligned ( (__m128i*)dst, _mm_adds_epu8 (xmm_src, load_128_aligned ((__m128i*)dst))); dst += 16; w -= 16; } while (w) { *dst = (uint8_t)_mm_cvtsi128_si32 ( _mm_adds_epu8 ( xmm_src, _mm_cvtsi32_si128 (*dst))); w--; dst++; } } } static void sse2_composite_add_8_8 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint8_t *dst_line, *dst; uint8_t *src_line, *src; int dst_stride, src_stride; int32_t w; uint16_t t; PIXMAN_IMAGE_GET_LINE ( src_image, src_x, src_y, uint8_t, src_stride, src_line, 1); PIXMAN_IMAGE_GET_LINE ( dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1); while (height--) { dst = dst_line; src = src_line; dst_line += dst_stride; src_line += src_stride; w = width; /* Small head */ while (w && (uintptr_t)dst & 3) { t = (*dst) + (*src++); *dst++ = t | (0 - (t >> 8)); w--; } sse2_combine_add_u (imp, op, (uint32_t*)dst, (uint32_t*)src, NULL, w >> 2); /* Small tail */ dst += w & 0xfffc; src += w & 0xfffc; w &= 3; while (w) { t = (*dst) + (*src++); *dst++ = t | (0 - (t >> 8)); w--; } } } static void sse2_composite_add_8888_8888 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t *dst_line, *dst; uint32_t *src_line, *src; int dst_stride, src_stride; PIXMAN_IMAGE_GET_LINE ( src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); PIXMAN_IMAGE_GET_LINE ( dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); while (height--) { dst = dst_line; dst_line += dst_stride; src = src_line; src_line += src_stride; sse2_combine_add_u (imp, op, dst, src, NULL, width); } } static void sse2_composite_add_n_8888 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t *dst_line, *dst, src; int dst_stride; __m128i xmm_src; PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); if (src == 0) return; if (src == ~0) { pixman_fill (dest_image->bits.bits, dest_image->bits.rowstride, 32, dest_x, dest_y, width, height, ~0); return; } xmm_src = _mm_set_epi32 (src, src, src, src); while (height--) { int w = width; uint32_t d; dst = dst_line; dst_line += dst_stride; while (w && (uintptr_t)dst & 15) { d = *dst; *dst++ = _mm_cvtsi128_si32 ( _mm_adds_epu8 (xmm_src, _mm_cvtsi32_si128 (d))); w--; } while (w >= 4) { save_128_aligned ((__m128i*)dst, _mm_adds_epu8 (xmm_src, load_128_aligned ((__m128i*)dst))); dst += 4; w -= 4; } while (w--) { d = *dst; *dst++ = _mm_cvtsi128_si32 (_mm_adds_epu8 (xmm_src, _mm_cvtsi32_si128 (d))); } } } static void sse2_composite_add_n_8_8888 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t *dst_line, *dst; uint8_t *mask_line, *mask; int dst_stride, mask_stride; int32_t w; uint32_t src; __m128i xmm_src; src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); if (src == 0) return; xmm_src = expand_pixel_32_1x128 (src); PIXMAN_IMAGE_GET_LINE ( dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE ( mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1); while (height--) { dst = dst_line; dst_line += dst_stride; mask = mask_line; mask_line += mask_stride; w = width; while (w && ((uintptr_t)dst & 15)) { uint8_t m = *mask++; if (m) { *dst = pack_1x128_32 (_mm_adds_epu16 (pix_multiply_1x128 (xmm_src, expand_pixel_8_1x128 (m)), unpack_32_1x128 (*dst))); } dst++; w--; } while (w >= 4) { uint32_t m; memcpy(&m, mask, sizeof(uint32_t)); if (m) { __m128i xmm_mask_lo, xmm_mask_hi; __m128i xmm_dst_lo, xmm_dst_hi; __m128i xmm_dst = load_128_aligned ((__m128i*)dst); __m128i xmm_mask = _mm_unpacklo_epi8 (unpack_32_1x128(m), _mm_setzero_si128 ()); unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi); unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi); expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); pix_multiply_2x128 (&xmm_src, &xmm_src, &xmm_mask_lo, &xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); xmm_dst_lo = _mm_adds_epu16 (xmm_mask_lo, xmm_dst_lo); xmm_dst_hi = _mm_adds_epu16 (xmm_mask_hi, xmm_dst_hi); save_128_aligned ( (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); } w -= 4; dst += 4; mask += 4; } while (w) { uint8_t m = *mask++; if (m) { *dst = pack_1x128_32 (_mm_adds_epu16 (pix_multiply_1x128 (xmm_src, expand_pixel_8_1x128 (m)), unpack_32_1x128 (*dst))); } dst++; w--; } } } static pixman_bool_t sse2_blt (pixman_implementation_t *imp, uint32_t * src_bits, uint32_t * dst_bits, int src_stride, int dst_stride, int src_bpp, int dst_bpp, int src_x, int src_y, int dest_x, int dest_y, int width, int height) { uint8_t * src_bytes; uint8_t * dst_bytes; int byte_width; if (src_bpp != dst_bpp) return FALSE; if (src_bpp == 16) { src_stride = src_stride * (int) sizeof (uint32_t) / 2; dst_stride = dst_stride * (int) sizeof (uint32_t) / 2; src_bytes =(uint8_t *)(((uint16_t *)src_bits) + src_stride * (src_y) + (src_x)); dst_bytes = (uint8_t *)(((uint16_t *)dst_bits) + dst_stride * (dest_y) + (dest_x)); byte_width = 2 * width; src_stride *= 2; dst_stride *= 2; } else if (src_bpp == 32) { src_stride = src_stride * (int) sizeof (uint32_t) / 4; dst_stride = dst_stride * (int) sizeof (uint32_t) / 4; src_bytes = (uint8_t *)(((uint32_t *)src_bits) + src_stride * (src_y) + (src_x)); dst_bytes = (uint8_t *)(((uint32_t *)dst_bits) + dst_stride * (dest_y) + (dest_x)); byte_width = 4 * width; src_stride *= 4; dst_stride *= 4; } else { return FALSE; } while (height--) { int w; uint8_t *s = src_bytes; uint8_t *d = dst_bytes; src_bytes += src_stride; dst_bytes += dst_stride; w = byte_width; while (w >= 2 && ((uintptr_t)d & 3)) { memmove(d, s, 2); w -= 2; s += 2; d += 2; } while (w >= 4 && ((uintptr_t)d & 15)) { memmove(d, s, 4); w -= 4; s += 4; d += 4; } while (w >= 64) { __m128i xmm0, xmm1, xmm2, xmm3; xmm0 = load_128_unaligned ((__m128i*)(s)); xmm1 = load_128_unaligned ((__m128i*)(s + 16)); xmm2 = load_128_unaligned ((__m128i*)(s + 32)); xmm3 = load_128_unaligned ((__m128i*)(s + 48)); save_128_aligned ((__m128i*)(d), xmm0); save_128_aligned ((__m128i*)(d + 16), xmm1); save_128_aligned ((__m128i*)(d + 32), xmm2); save_128_aligned ((__m128i*)(d + 48), xmm3); s += 64; d += 64; w -= 64; } while (w >= 16) { save_128_aligned ((__m128i*)d, load_128_unaligned ((__m128i*)s) ); w -= 16; d += 16; s += 16; } while (w >= 4) { memmove(d, s, 4); w -= 4; s += 4; d += 4; } if (w >= 2) { memmove(d, s, 2); w -= 2; s += 2; d += 2; } } return TRUE; } static void sse2_composite_copy_area (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); sse2_blt (imp, src_image->bits.bits, dest_image->bits.bits, src_image->bits.rowstride, dest_image->bits.rowstride, PIXMAN_FORMAT_BPP (src_image->bits.format), PIXMAN_FORMAT_BPP (dest_image->bits.format), src_x, src_y, dest_x, dest_y, width, height); } static void sse2_composite_over_x888_8_8888 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t *src, *src_line, s; uint32_t *dst, *dst_line, d; uint8_t *mask, *mask_line; int src_stride, mask_stride, dst_stride; int32_t w; __m128i ms; __m128i xmm_src, xmm_src_lo, xmm_src_hi; __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi; __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi; PIXMAN_IMAGE_GET_LINE ( dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE ( mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1); PIXMAN_IMAGE_GET_LINE ( src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); while (height--) { src = src_line; src_line += src_stride; dst = dst_line; dst_line += dst_stride; mask = mask_line; mask_line += mask_stride; w = width; while (w && (uintptr_t)dst & 15) { uint8_t m = *mask++; s = 0xff000000 | *src++; d = *dst; ms = unpack_32_1x128 (s); if (m != 0xff) { __m128i ma = expand_alpha_rev_1x128 (unpack_32_1x128 (m)); __m128i md = unpack_32_1x128 (d); ms = in_over_1x128 (&ms, &mask_00ff, &ma, &md); } *dst++ = pack_1x128_32 (ms); w--; } while (w >= 4) { uint32_t m; memcpy(&m, mask, sizeof(uint32_t)); xmm_src = _mm_or_si128 ( load_128_unaligned ((__m128i*)src), mask_ff000000); if (m == 0xffffffff) { save_128_aligned ((__m128i*)dst, xmm_src); } else { xmm_dst = load_128_aligned ((__m128i*)dst); xmm_mask = _mm_unpacklo_epi16 (unpack_32_1x128 (m), _mm_setzero_si128()); unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi); unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi); unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi); expand_alpha_rev_2x128 ( xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); in_over_2x128 (&xmm_src_lo, &xmm_src_hi, &mask_00ff, &mask_00ff, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst_lo, &xmm_dst_hi); save_128_aligned ((__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); } src += 4; dst += 4; mask += 4; w -= 4; } while (w) { uint8_t m = *mask++; if (m) { s = 0xff000000 | *src; if (m == 0xff) { *dst = s; } else { __m128i ma, md, ms; d = *dst; ma = expand_alpha_rev_1x128 (unpack_32_1x128 (m)); md = unpack_32_1x128 (d); ms = unpack_32_1x128 (s); *dst = pack_1x128_32 (in_over_1x128 (&ms, &mask_00ff, &ma, &md)); } } src++; dst++; w--; } } } static void sse2_composite_over_8888_8_8888 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t *src, *src_line, s; uint32_t *dst, *dst_line, d; uint8_t *mask, *mask_line; int src_stride, mask_stride, dst_stride; int32_t w; __m128i xmm_src, xmm_src_lo, xmm_src_hi, xmm_srca_lo, xmm_srca_hi; __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi; __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi; PIXMAN_IMAGE_GET_LINE ( dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE ( mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1); PIXMAN_IMAGE_GET_LINE ( src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); while (height--) { src = src_line; src_line += src_stride; dst = dst_line; dst_line += dst_stride; mask = mask_line; mask_line += mask_stride; w = width; while (w && (uintptr_t)dst & 15) { uint32_t sa; uint8_t m = *mask++; s = *src++; d = *dst; sa = s >> 24; if (m) { if (sa == 0xff && m == 0xff) { *dst = s; } else { __m128i ms, md, ma, msa; ma = expand_alpha_rev_1x128 (load_32_1x128 (m)); ms = unpack_32_1x128 (s); md = unpack_32_1x128 (d); msa = expand_alpha_rev_1x128 (load_32_1x128 (sa)); *dst = pack_1x128_32 (in_over_1x128 (&ms, &msa, &ma, &md)); } } dst++; w--; } while (w >= 4) { uint32_t m; memcpy(&m, mask, sizeof(uint32_t)); if (m) { xmm_src = load_128_unaligned ((__m128i*)src); if (m == 0xffffffff && is_opaque (xmm_src)) { save_128_aligned ((__m128i *)dst, xmm_src); } else { xmm_dst = load_128_aligned ((__m128i *)dst); xmm_mask = _mm_unpacklo_epi16 (unpack_32_1x128 (m), _mm_setzero_si128()); unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi); unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi); unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi); expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_srca_lo, &xmm_srca_hi); expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); in_over_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_srca_lo, &xmm_srca_hi, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst_lo, &xmm_dst_hi); save_128_aligned ((__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); } } src += 4; dst += 4; mask += 4; w -= 4; } while (w) { uint32_t sa; uint8_t m = *mask++; s = *src++; d = *dst; sa = s >> 24; if (m) { if (sa == 0xff && m == 0xff) { *dst = s; } else { __m128i ms, md, ma, msa; ma = expand_alpha_rev_1x128 (load_32_1x128 (m)); ms = unpack_32_1x128 (s); md = unpack_32_1x128 (d); msa = expand_alpha_rev_1x128 (load_32_1x128 (sa)); *dst = pack_1x128_32 (in_over_1x128 (&ms, &msa, &ma, &md)); } } dst++; w--; } } } static void sse2_composite_over_reverse_n_8888 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t src; uint32_t *dst_line, *dst; __m128i xmm_src; __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi; __m128i xmm_dsta_hi, xmm_dsta_lo; int dst_stride; int32_t w; src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); if (src == 0) return; PIXMAN_IMAGE_GET_LINE ( dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); xmm_src = expand_pixel_32_1x128 (src); while (height--) { dst = dst_line; dst_line += dst_stride; w = width; while (w && (uintptr_t)dst & 15) { __m128i vd; vd = unpack_32_1x128 (*dst); *dst = pack_1x128_32 (over_1x128 (vd, expand_alpha_1x128 (vd), xmm_src)); w--; dst++; } while (w >= 4) { __m128i tmp_lo, tmp_hi; xmm_dst = load_128_aligned ((__m128i*)dst); unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi); expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_dsta_lo, &xmm_dsta_hi); tmp_lo = xmm_src; tmp_hi = xmm_src; over_2x128 (&xmm_dst_lo, &xmm_dst_hi, &xmm_dsta_lo, &xmm_dsta_hi, &tmp_lo, &tmp_hi); save_128_aligned ( (__m128i*)dst, pack_2x128_128 (tmp_lo, tmp_hi)); w -= 4; dst += 4; } while (w) { __m128i vd; vd = unpack_32_1x128 (*dst); *dst = pack_1x128_32 (over_1x128 (vd, expand_alpha_1x128 (vd), xmm_src)); w--; dst++; } } } static void sse2_composite_over_8888_8888_8888 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t *src, *src_line, s; uint32_t *dst, *dst_line, d; uint32_t *mask, *mask_line; uint32_t m; int src_stride, mask_stride, dst_stride; int32_t w; __m128i xmm_src, xmm_src_lo, xmm_src_hi, xmm_srca_lo, xmm_srca_hi; __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi; __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi; PIXMAN_IMAGE_GET_LINE ( dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE ( mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1); PIXMAN_IMAGE_GET_LINE ( src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); while (height--) { src = src_line; src_line += src_stride; dst = dst_line; dst_line += dst_stride; mask = mask_line; mask_line += mask_stride; w = width; while (w && (uintptr_t)dst & 15) { uint32_t sa; s = *src++; m = (*mask++) >> 24; d = *dst; sa = s >> 24; if (m) { if (sa == 0xff && m == 0xff) { *dst = s; } else { __m128i ms, md, ma, msa; ma = expand_alpha_rev_1x128 (load_32_1x128 (m)); ms = unpack_32_1x128 (s); md = unpack_32_1x128 (d); msa = expand_alpha_rev_1x128 (load_32_1x128 (sa)); *dst = pack_1x128_32 (in_over_1x128 (&ms, &msa, &ma, &md)); } } dst++; w--; } while (w >= 4) { xmm_mask = load_128_unaligned ((__m128i*)mask); if (!is_transparent (xmm_mask)) { xmm_src = load_128_unaligned ((__m128i*)src); if (is_opaque (xmm_mask) && is_opaque (xmm_src)) { save_128_aligned ((__m128i *)dst, xmm_src); } else { xmm_dst = load_128_aligned ((__m128i *)dst); unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi); unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi); unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi); expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_srca_lo, &xmm_srca_hi); expand_alpha_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); in_over_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_srca_lo, &xmm_srca_hi, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst_lo, &xmm_dst_hi); save_128_aligned ((__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); } } src += 4; dst += 4; mask += 4; w -= 4; } while (w) { uint32_t sa; s = *src++; m = (*mask++) >> 24; d = *dst; sa = s >> 24; if (m) { if (sa == 0xff && m == 0xff) { *dst = s; } else { __m128i ms, md, ma, msa; ma = expand_alpha_rev_1x128 (load_32_1x128 (m)); ms = unpack_32_1x128 (s); md = unpack_32_1x128 (d); msa = expand_alpha_rev_1x128 (load_32_1x128 (sa)); *dst = pack_1x128_32 (in_over_1x128 (&ms, &msa, &ma, &md)); } } dst++; w--; } } } /* A variant of 'sse2_combine_over_u' with minor tweaks */ static force_inline void scaled_nearest_scanline_sse2_8888_8888_OVER (uint32_t* pd, const uint32_t* ps, int32_t w, pixman_fixed_t vx, pixman_fixed_t unit_x, pixman_fixed_t src_width_fixed, pixman_bool_t fully_transparent_src) { uint32_t s, d; const uint32_t* pm = NULL; __m128i xmm_dst_lo, xmm_dst_hi; __m128i xmm_src_lo, xmm_src_hi; __m128i xmm_alpha_lo, xmm_alpha_hi; if (fully_transparent_src) return; /* Align dst on a 16-byte boundary */ while (w && ((uintptr_t)pd & 15)) { d = *pd; s = combine1 (ps + pixman_fixed_to_int (vx), pm); vx += unit_x; while (vx >= 0) vx -= src_width_fixed; *pd++ = core_combine_over_u_pixel_sse2 (s, d); if (pm) pm++; w--; } while (w >= 4) { __m128i tmp; uint32_t tmp1, tmp2, tmp3, tmp4; tmp1 = *(ps + pixman_fixed_to_int (vx)); vx += unit_x; while (vx >= 0) vx -= src_width_fixed; tmp2 = *(ps + pixman_fixed_to_int (vx)); vx += unit_x; while (vx >= 0) vx -= src_width_fixed; tmp3 = *(ps + pixman_fixed_to_int (vx)); vx += unit_x; while (vx >= 0) vx -= src_width_fixed; tmp4 = *(ps + pixman_fixed_to_int (vx)); vx += unit_x; while (vx >= 0) vx -= src_width_fixed; tmp = _mm_set_epi32 (tmp4, tmp3, tmp2, tmp1); xmm_src_hi = combine4 ((__m128i*)&tmp, (__m128i*)pm); if (is_opaque (xmm_src_hi)) { save_128_aligned ((__m128i*)pd, xmm_src_hi); } else if (!is_zero (xmm_src_hi)) { xmm_dst_hi = load_128_aligned ((__m128i*) pd); unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi); unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi); expand_alpha_2x128 ( xmm_src_lo, xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi); over_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi, &xmm_dst_lo, &xmm_dst_hi); /* rebuid the 4 pixel data and save*/ save_128_aligned ((__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); } w -= 4; pd += 4; if (pm) pm += 4; } while (w) { d = *pd; s = combine1 (ps + pixman_fixed_to_int (vx), pm); vx += unit_x; while (vx >= 0) vx -= src_width_fixed; *pd++ = core_combine_over_u_pixel_sse2 (s, d); if (pm) pm++; w--; } } FAST_NEAREST_MAINLOOP (sse2_8888_8888_cover_OVER, scaled_nearest_scanline_sse2_8888_8888_OVER, uint32_t, uint32_t, COVER) FAST_NEAREST_MAINLOOP (sse2_8888_8888_none_OVER, scaled_nearest_scanline_sse2_8888_8888_OVER, uint32_t, uint32_t, NONE) FAST_NEAREST_MAINLOOP (sse2_8888_8888_pad_OVER, scaled_nearest_scanline_sse2_8888_8888_OVER, uint32_t, uint32_t, PAD) FAST_NEAREST_MAINLOOP (sse2_8888_8888_normal_OVER, scaled_nearest_scanline_sse2_8888_8888_OVER, uint32_t, uint32_t, NORMAL) static force_inline void scaled_nearest_scanline_sse2_8888_n_8888_OVER (const uint32_t * mask, uint32_t * dst, const uint32_t * src, int32_t w, pixman_fixed_t vx, pixman_fixed_t unit_x, pixman_fixed_t src_width_fixed, pixman_bool_t zero_src) { __m128i xmm_mask; __m128i xmm_src, xmm_src_lo, xmm_src_hi; __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi; __m128i xmm_alpha_lo, xmm_alpha_hi; if (zero_src || (*mask >> 24) == 0) return; xmm_mask = create_mask_16_128 (*mask >> 24); while (w && (uintptr_t)dst & 15) { uint32_t s = *(src + pixman_fixed_to_int (vx)); vx += unit_x; while (vx >= 0) vx -= src_width_fixed; if (s) { uint32_t d = *dst; __m128i ms = unpack_32_1x128 (s); __m128i alpha = expand_alpha_1x128 (ms); __m128i dest = xmm_mask; __m128i alpha_dst = unpack_32_1x128 (d); *dst = pack_1x128_32 ( in_over_1x128 (&ms, &alpha, &dest, &alpha_dst)); } dst++; w--; } while (w >= 4) { uint32_t tmp1, tmp2, tmp3, tmp4; tmp1 = *(src + pixman_fixed_to_int (vx)); vx += unit_x; while (vx >= 0) vx -= src_width_fixed; tmp2 = *(src + pixman_fixed_to_int (vx)); vx += unit_x; while (vx >= 0) vx -= src_width_fixed; tmp3 = *(src + pixman_fixed_to_int (vx)); vx += unit_x; while (vx >= 0) vx -= src_width_fixed; tmp4 = *(src + pixman_fixed_to_int (vx)); vx += unit_x; while (vx >= 0) vx -= src_width_fixed; xmm_src = _mm_set_epi32 (tmp4, tmp3, tmp2, tmp1); if (!is_zero (xmm_src)) { xmm_dst = load_128_aligned ((__m128i*)dst); unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi); unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi); expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi); in_over_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi, &xmm_mask, &xmm_mask, &xmm_dst_lo, &xmm_dst_hi); save_128_aligned ( (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); } dst += 4; w -= 4; } while (w) { uint32_t s = *(src + pixman_fixed_to_int (vx)); vx += unit_x; while (vx >= 0) vx -= src_width_fixed; if (s) { uint32_t d = *dst; __m128i ms = unpack_32_1x128 (s); __m128i alpha = expand_alpha_1x128 (ms); __m128i mask = xmm_mask; __m128i dest = unpack_32_1x128 (d); *dst = pack_1x128_32 ( in_over_1x128 (&ms, &alpha, &mask, &dest)); } dst++; w--; } } FAST_NEAREST_MAINLOOP_COMMON (sse2_8888_n_8888_cover_OVER, scaled_nearest_scanline_sse2_8888_n_8888_OVER, uint32_t, uint32_t, uint32_t, COVER, TRUE, TRUE) FAST_NEAREST_MAINLOOP_COMMON (sse2_8888_n_8888_pad_OVER, scaled_nearest_scanline_sse2_8888_n_8888_OVER, uint32_t, uint32_t, uint32_t, PAD, TRUE, TRUE) FAST_NEAREST_MAINLOOP_COMMON (sse2_8888_n_8888_none_OVER, scaled_nearest_scanline_sse2_8888_n_8888_OVER, uint32_t, uint32_t, uint32_t, NONE, TRUE, TRUE) FAST_NEAREST_MAINLOOP_COMMON (sse2_8888_n_8888_normal_OVER, scaled_nearest_scanline_sse2_8888_n_8888_OVER, uint32_t, uint32_t, uint32_t, NORMAL, TRUE, TRUE) #if PSHUFD_IS_FAST /***********************************************************************************/ # define BILINEAR_DECLARE_VARIABLES \ const __m128i xmm_wt = _mm_set_epi16 (wt, wt, wt, wt, wt, wt, wt, wt); \ const __m128i xmm_wb = _mm_set_epi16 (wb, wb, wb, wb, wb, wb, wb, wb); \ const __m128i xmm_addc = _mm_set_epi16 (0, 1, 0, 1, 0, 1, 0, 1); \ const __m128i xmm_ux1 = _mm_set_epi16 (unit_x, -unit_x, unit_x, -unit_x, \ unit_x, -unit_x, unit_x, -unit_x); \ const __m128i xmm_ux4 = _mm_set_epi16 (unit_x * 4, -unit_x * 4, \ unit_x * 4, -unit_x * 4, \ unit_x * 4, -unit_x * 4, \ unit_x * 4, -unit_x * 4); \ const __m128i xmm_zero = _mm_setzero_si128 (); \ __m128i xmm_x = _mm_set_epi16 (vx + unit_x * 3, -(vx + 1) - unit_x * 3, \ vx + unit_x * 2, -(vx + 1) - unit_x * 2, \ vx + unit_x * 1, -(vx + 1) - unit_x * 1, \ vx + unit_x * 0, -(vx + 1) - unit_x * 0); \ __m128i xmm_wh_state; #define BILINEAR_INTERPOLATE_ONE_PIXEL_HELPER(pix, phase_) \ do { \ int phase = phase_; \ __m128i xmm_wh, xmm_a, xmm_b; \ /* fetch 2x2 pixel block into sse2 registers */ \ __m128i tltr = _mm_loadl_epi64 ((__m128i *)&src_top[vx >> 16]); \ __m128i blbr = _mm_loadl_epi64 ((__m128i *)&src_bottom[vx >> 16]); \ vx += unit_x; \ /* vertical interpolation */ \ xmm_a = _mm_mullo_epi16 (_mm_unpacklo_epi8 (tltr, xmm_zero), xmm_wt); \ xmm_b = _mm_mullo_epi16 (_mm_unpacklo_epi8 (blbr, xmm_zero), xmm_wb); \ xmm_a = _mm_add_epi16 (xmm_a, xmm_b); \ /* calculate horizontal weights */ \ if (phase <= 0) \ { \ xmm_wh_state = _mm_add_epi16 (xmm_addc, _mm_srli_epi16 (xmm_x, \ 16 - BILINEAR_INTERPOLATION_BITS)); \ xmm_x = _mm_add_epi16 (xmm_x, (phase < 0) ? xmm_ux1 : xmm_ux4); \ phase = 0; \ } \ xmm_wh = _mm_shuffle_epi32 (xmm_wh_state, _MM_SHUFFLE (phase, phase, \ phase, phase)); \ /* horizontal interpolation */ \ xmm_a = _mm_madd_epi16 (_mm_unpackhi_epi16 (_mm_shuffle_epi32 ( \ xmm_a, _MM_SHUFFLE (1, 0, 3, 2)), xmm_a), xmm_wh); \ /* shift the result */ \ pix = _mm_srli_epi32 (xmm_a, BILINEAR_INTERPOLATION_BITS * 2); \ } while (0) #else /************************************************************************/ # define BILINEAR_DECLARE_VARIABLES \ const __m128i xmm_wt = _mm_set_epi16 (wt, wt, wt, wt, wt, wt, wt, wt); \ const __m128i xmm_wb = _mm_set_epi16 (wb, wb, wb, wb, wb, wb, wb, wb); \ const __m128i xmm_addc = _mm_set_epi16 (0, 1, 0, 1, 0, 1, 0, 1); \ const __m128i xmm_ux1 = _mm_set_epi16 (unit_x, -unit_x, unit_x, -unit_x, \ unit_x, -unit_x, unit_x, -unit_x); \ const __m128i xmm_ux4 = _mm_set_epi16 (unit_x * 4, -unit_x * 4, \ unit_x * 4, -unit_x * 4, \ unit_x * 4, -unit_x * 4, \ unit_x * 4, -unit_x * 4); \ const __m128i xmm_zero = _mm_setzero_si128 (); \ __m128i xmm_x = _mm_set_epi16 (vx, -(vx + 1), vx, -(vx + 1), \ vx, -(vx + 1), vx, -(vx + 1)) #define BILINEAR_INTERPOLATE_ONE_PIXEL_HELPER(pix, phase) \ do { \ __m128i xmm_wh, xmm_a, xmm_b; \ /* fetch 2x2 pixel block into sse2 registers */ \ __m128i tltr = _mm_loadl_epi64 ((__m128i *)&src_top[vx >> 16]); \ __m128i blbr = _mm_loadl_epi64 ((__m128i *)&src_bottom[vx >> 16]); \ (void)xmm_ux4; /* suppress warning: unused variable 'xmm_ux4' */ \ vx += unit_x; \ /* vertical interpolation */ \ xmm_a = _mm_mullo_epi16 (_mm_unpacklo_epi8 (tltr, xmm_zero), xmm_wt); \ xmm_b = _mm_mullo_epi16 (_mm_unpacklo_epi8 (blbr, xmm_zero), xmm_wb); \ xmm_a = _mm_add_epi16 (xmm_a, xmm_b); \ /* calculate horizontal weights */ \ xmm_wh = _mm_add_epi16 (xmm_addc, _mm_srli_epi16 (xmm_x, \ 16 - BILINEAR_INTERPOLATION_BITS)); \ xmm_x = _mm_add_epi16 (xmm_x, xmm_ux1); \ /* horizontal interpolation */ \ xmm_b = _mm_unpacklo_epi64 (/* any value is fine here */ xmm_b, xmm_a); \ xmm_a = _mm_madd_epi16 (_mm_unpackhi_epi16 (xmm_b, xmm_a), xmm_wh); \ /* shift the result */ \ pix = _mm_srli_epi32 (xmm_a, BILINEAR_INTERPOLATION_BITS * 2); \ } while (0) /***********************************************************************************/ #endif #define BILINEAR_INTERPOLATE_ONE_PIXEL(pix); \ do { \ __m128i xmm_pix; \ BILINEAR_INTERPOLATE_ONE_PIXEL_HELPER (xmm_pix, -1); \ xmm_pix = _mm_packs_epi32 (xmm_pix, xmm_pix); \ xmm_pix = _mm_packus_epi16 (xmm_pix, xmm_pix); \ pix = _mm_cvtsi128_si32 (xmm_pix); \ } while(0) #define BILINEAR_INTERPOLATE_FOUR_PIXELS(pix); \ do { \ __m128i xmm_pix1, xmm_pix2, xmm_pix3, xmm_pix4; \ BILINEAR_INTERPOLATE_ONE_PIXEL_HELPER (xmm_pix1, 0); \ BILINEAR_INTERPOLATE_ONE_PIXEL_HELPER (xmm_pix2, 1); \ BILINEAR_INTERPOLATE_ONE_PIXEL_HELPER (xmm_pix3, 2); \ BILINEAR_INTERPOLATE_ONE_PIXEL_HELPER (xmm_pix4, 3); \ xmm_pix1 = _mm_packs_epi32 (xmm_pix1, xmm_pix2); \ xmm_pix3 = _mm_packs_epi32 (xmm_pix3, xmm_pix4); \ pix = _mm_packus_epi16 (xmm_pix1, xmm_pix3); \ } while(0) #define BILINEAR_SKIP_ONE_PIXEL() \ do { \ vx += unit_x; \ xmm_x = _mm_add_epi16 (xmm_x, xmm_ux1); \ } while(0) #define BILINEAR_SKIP_FOUR_PIXELS() \ do { \ vx += unit_x * 4; \ xmm_x = _mm_add_epi16 (xmm_x, xmm_ux4); \ } while(0) /***********************************************************************************/ static force_inline void scaled_bilinear_scanline_sse2_8888_8888_SRC (uint32_t * dst, const uint32_t * mask, const uint32_t * src_top, const uint32_t * src_bottom, int32_t w, int wt, int wb, pixman_fixed_t vx_, pixman_fixed_t unit_x_, pixman_fixed_t max_vx, pixman_bool_t zero_src) { intptr_t vx = vx_; intptr_t unit_x = unit_x_; BILINEAR_DECLARE_VARIABLES; uint32_t pix1, pix2; while (w && ((uintptr_t)dst & 15)) { BILINEAR_INTERPOLATE_ONE_PIXEL (pix1); *dst++ = pix1; w--; } while ((w -= 4) >= 0) { __m128i xmm_src; BILINEAR_INTERPOLATE_FOUR_PIXELS (xmm_src); _mm_store_si128 ((__m128i *)dst, xmm_src); dst += 4; } if (w & 2) { BILINEAR_INTERPOLATE_ONE_PIXEL (pix1); BILINEAR_INTERPOLATE_ONE_PIXEL (pix2); *dst++ = pix1; *dst++ = pix2; } if (w & 1) { BILINEAR_INTERPOLATE_ONE_PIXEL (pix1); *dst = pix1; } } FAST_BILINEAR_MAINLOOP_COMMON (sse2_8888_8888_cover_SRC, scaled_bilinear_scanline_sse2_8888_8888_SRC, uint32_t, uint32_t, uint32_t, COVER, FLAG_NONE) FAST_BILINEAR_MAINLOOP_COMMON (sse2_8888_8888_pad_SRC, scaled_bilinear_scanline_sse2_8888_8888_SRC, uint32_t, uint32_t, uint32_t, PAD, FLAG_NONE) FAST_BILINEAR_MAINLOOP_COMMON (sse2_8888_8888_none_SRC, scaled_bilinear_scanline_sse2_8888_8888_SRC, uint32_t, uint32_t, uint32_t, NONE, FLAG_NONE) FAST_BILINEAR_MAINLOOP_COMMON (sse2_8888_8888_normal_SRC, scaled_bilinear_scanline_sse2_8888_8888_SRC, uint32_t, uint32_t, uint32_t, NORMAL, FLAG_NONE) static force_inline void scaled_bilinear_scanline_sse2_x888_8888_SRC (uint32_t * dst, const uint32_t * mask, const uint32_t * src_top, const uint32_t * src_bottom, int32_t w, int wt, int wb, pixman_fixed_t vx_, pixman_fixed_t unit_x_, pixman_fixed_t max_vx, pixman_bool_t zero_src) { intptr_t vx = vx_; intptr_t unit_x = unit_x_; BILINEAR_DECLARE_VARIABLES; uint32_t pix1, pix2; while (w && ((uintptr_t)dst & 15)) { BILINEAR_INTERPOLATE_ONE_PIXEL (pix1); *dst++ = pix1 | 0xFF000000; w--; } while ((w -= 4) >= 0) { __m128i xmm_src; BILINEAR_INTERPOLATE_FOUR_PIXELS (xmm_src); _mm_store_si128 ((__m128i *)dst, _mm_or_si128 (xmm_src, mask_ff000000)); dst += 4; } if (w & 2) { BILINEAR_INTERPOLATE_ONE_PIXEL (pix1); BILINEAR_INTERPOLATE_ONE_PIXEL (pix2); *dst++ = pix1 | 0xFF000000; *dst++ = pix2 | 0xFF000000; } if (w & 1) { BILINEAR_INTERPOLATE_ONE_PIXEL (pix1); *dst = pix1 | 0xFF000000; } } FAST_BILINEAR_MAINLOOP_COMMON (sse2_x888_8888_cover_SRC, scaled_bilinear_scanline_sse2_x888_8888_SRC, uint32_t, uint32_t, uint32_t, COVER, FLAG_NONE) FAST_BILINEAR_MAINLOOP_COMMON (sse2_x888_8888_pad_SRC, scaled_bilinear_scanline_sse2_x888_8888_SRC, uint32_t, uint32_t, uint32_t, PAD, FLAG_NONE) FAST_BILINEAR_MAINLOOP_COMMON (sse2_x888_8888_normal_SRC, scaled_bilinear_scanline_sse2_x888_8888_SRC, uint32_t, uint32_t, uint32_t, NORMAL, FLAG_NONE) static force_inline void scaled_bilinear_scanline_sse2_8888_8888_OVER (uint32_t * dst, const uint32_t * mask, const uint32_t * src_top, const uint32_t * src_bottom, int32_t w, int wt, int wb, pixman_fixed_t vx_, pixman_fixed_t unit_x_, pixman_fixed_t max_vx, pixman_bool_t zero_src) { intptr_t vx = vx_; intptr_t unit_x = unit_x_; BILINEAR_DECLARE_VARIABLES; uint32_t pix1, pix2; while (w && ((uintptr_t)dst & 15)) { BILINEAR_INTERPOLATE_ONE_PIXEL (pix1); if (pix1) { pix2 = *dst; *dst = core_combine_over_u_pixel_sse2 (pix1, pix2); } w--; dst++; } while (w >= 4) { __m128i xmm_src; __m128i xmm_src_hi, xmm_src_lo, xmm_dst_hi, xmm_dst_lo; __m128i xmm_alpha_hi, xmm_alpha_lo; BILINEAR_INTERPOLATE_FOUR_PIXELS (xmm_src); if (!is_zero (xmm_src)) { if (is_opaque (xmm_src)) { save_128_aligned ((__m128i *)dst, xmm_src); } else { __m128i xmm_dst = load_128_aligned ((__m128i *)dst); unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi); unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi); expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi); over_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi, &xmm_dst_lo, &xmm_dst_hi); save_128_aligned ((__m128i *)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); } } w -= 4; dst += 4; } while (w) { BILINEAR_INTERPOLATE_ONE_PIXEL (pix1); if (pix1) { pix2 = *dst; *dst = core_combine_over_u_pixel_sse2 (pix1, pix2); } w--; dst++; } } FAST_BILINEAR_MAINLOOP_COMMON (sse2_8888_8888_cover_OVER, scaled_bilinear_scanline_sse2_8888_8888_OVER, uint32_t, uint32_t, uint32_t, COVER, FLAG_NONE) FAST_BILINEAR_MAINLOOP_COMMON (sse2_8888_8888_pad_OVER, scaled_bilinear_scanline_sse2_8888_8888_OVER, uint32_t, uint32_t, uint32_t, PAD, FLAG_NONE) FAST_BILINEAR_MAINLOOP_COMMON (sse2_8888_8888_none_OVER, scaled_bilinear_scanline_sse2_8888_8888_OVER, uint32_t, uint32_t, uint32_t, NONE, FLAG_NONE) FAST_BILINEAR_MAINLOOP_COMMON (sse2_8888_8888_normal_OVER, scaled_bilinear_scanline_sse2_8888_8888_OVER, uint32_t, uint32_t, uint32_t, NORMAL, FLAG_NONE) static force_inline void scaled_bilinear_scanline_sse2_8888_8_8888_OVER (uint32_t * dst, const uint8_t * mask, const uint32_t * src_top, const uint32_t * src_bottom, int32_t w, int wt, int wb, pixman_fixed_t vx_, pixman_fixed_t unit_x_, pixman_fixed_t max_vx, pixman_bool_t zero_src) { intptr_t vx = vx_; intptr_t unit_x = unit_x_; BILINEAR_DECLARE_VARIABLES; uint32_t pix1, pix2; while (w && ((uintptr_t)dst & 15)) { uint32_t sa; uint8_t m = *mask++; if (m) { BILINEAR_INTERPOLATE_ONE_PIXEL (pix1); sa = pix1 >> 24; if (sa == 0xff && m == 0xff) { *dst = pix1; } else { __m128i ms, md, ma, msa; pix2 = *dst; ma = expand_alpha_rev_1x128 (load_32_1x128 (m)); ms = unpack_32_1x128 (pix1); md = unpack_32_1x128 (pix2); msa = expand_alpha_rev_1x128 (load_32_1x128 (sa)); *dst = pack_1x128_32 (in_over_1x128 (&ms, &msa, &ma, &md)); } } else { BILINEAR_SKIP_ONE_PIXEL (); } w--; dst++; } while (w >= 4) { uint32_t m; __m128i xmm_src, xmm_src_lo, xmm_src_hi, xmm_srca_lo, xmm_srca_hi; __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi; __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi; memcpy(&m, mask, sizeof(uint32_t)); if (m) { BILINEAR_INTERPOLATE_FOUR_PIXELS (xmm_src); if (m == 0xffffffff && is_opaque (xmm_src)) { save_128_aligned ((__m128i *)dst, xmm_src); } else { xmm_dst = load_128_aligned ((__m128i *)dst); xmm_mask = _mm_unpacklo_epi16 (unpack_32_1x128 (m), _mm_setzero_si128()); unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi); unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi); unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi); expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_srca_lo, &xmm_srca_hi); expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi); in_over_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_srca_lo, &xmm_srca_hi, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst_lo, &xmm_dst_hi); save_128_aligned ((__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); } } else { BILINEAR_SKIP_FOUR_PIXELS (); } w -= 4; dst += 4; mask += 4; } while (w) { uint32_t sa; uint8_t m = *mask++; if (m) { BILINEAR_INTERPOLATE_ONE_PIXEL (pix1); sa = pix1 >> 24; if (sa == 0xff && m == 0xff) { *dst = pix1; } else { __m128i ms, md, ma, msa; pix2 = *dst; ma = expand_alpha_rev_1x128 (load_32_1x128 (m)); ms = unpack_32_1x128 (pix1); md = unpack_32_1x128 (pix2); msa = expand_alpha_rev_1x128 (load_32_1x128 (sa)); *dst = pack_1x128_32 (in_over_1x128 (&ms, &msa, &ma, &md)); } } else { BILINEAR_SKIP_ONE_PIXEL (); } w--; dst++; } } FAST_BILINEAR_MAINLOOP_COMMON (sse2_8888_8_8888_cover_OVER, scaled_bilinear_scanline_sse2_8888_8_8888_OVER, uint32_t, uint8_t, uint32_t, COVER, FLAG_HAVE_NON_SOLID_MASK) FAST_BILINEAR_MAINLOOP_COMMON (sse2_8888_8_8888_pad_OVER, scaled_bilinear_scanline_sse2_8888_8_8888_OVER, uint32_t, uint8_t, uint32_t, PAD, FLAG_HAVE_NON_SOLID_MASK) FAST_BILINEAR_MAINLOOP_COMMON (sse2_8888_8_8888_none_OVER, scaled_bilinear_scanline_sse2_8888_8_8888_OVER, uint32_t, uint8_t, uint32_t, NONE, FLAG_HAVE_NON_SOLID_MASK) FAST_BILINEAR_MAINLOOP_COMMON (sse2_8888_8_8888_normal_OVER, scaled_bilinear_scanline_sse2_8888_8_8888_OVER, uint32_t, uint8_t, uint32_t, NORMAL, FLAG_HAVE_NON_SOLID_MASK) static force_inline void scaled_bilinear_scanline_sse2_8888_n_8888_OVER (uint32_t * dst, const uint32_t * mask, const uint32_t * src_top, const uint32_t * src_bottom, int32_t w, int wt, int wb, pixman_fixed_t vx_, pixman_fixed_t unit_x_, pixman_fixed_t max_vx, pixman_bool_t zero_src) { intptr_t vx = vx_; intptr_t unit_x = unit_x_; BILINEAR_DECLARE_VARIABLES; uint32_t pix1; __m128i xmm_mask; if (zero_src || (*mask >> 24) == 0) return; xmm_mask = create_mask_16_128 (*mask >> 24); while (w && ((uintptr_t)dst & 15)) { BILINEAR_INTERPOLATE_ONE_PIXEL (pix1); if (pix1) { uint32_t d = *dst; __m128i ms = unpack_32_1x128 (pix1); __m128i alpha = expand_alpha_1x128 (ms); __m128i dest = xmm_mask; __m128i alpha_dst = unpack_32_1x128 (d); *dst = pack_1x128_32 (in_over_1x128 (&ms, &alpha, &dest, &alpha_dst)); } dst++; w--; } while (w >= 4) { __m128i xmm_src; BILINEAR_INTERPOLATE_FOUR_PIXELS (xmm_src); if (!is_zero (xmm_src)) { __m128i xmm_src_lo, xmm_src_hi; __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi; __m128i xmm_alpha_lo, xmm_alpha_hi; xmm_dst = load_128_aligned ((__m128i*)dst); unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi); unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi); expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi); in_over_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi, &xmm_mask, &xmm_mask, &xmm_dst_lo, &xmm_dst_hi); save_128_aligned ((__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi)); } dst += 4; w -= 4; } while (w) { BILINEAR_INTERPOLATE_ONE_PIXEL (pix1); if (pix1) { uint32_t d = *dst; __m128i ms = unpack_32_1x128 (pix1); __m128i alpha = expand_alpha_1x128 (ms); __m128i dest = xmm_mask; __m128i alpha_dst = unpack_32_1x128 (d); *dst = pack_1x128_32 (in_over_1x128 (&ms, &alpha, &dest, &alpha_dst)); } dst++; w--; } } FAST_BILINEAR_MAINLOOP_COMMON (sse2_8888_n_8888_cover_OVER, scaled_bilinear_scanline_sse2_8888_n_8888_OVER, uint32_t, uint32_t, uint32_t, COVER, FLAG_HAVE_SOLID_MASK) FAST_BILINEAR_MAINLOOP_COMMON (sse2_8888_n_8888_pad_OVER, scaled_bilinear_scanline_sse2_8888_n_8888_OVER, uint32_t, uint32_t, uint32_t, PAD, FLAG_HAVE_SOLID_MASK) FAST_BILINEAR_MAINLOOP_COMMON (sse2_8888_n_8888_none_OVER, scaled_bilinear_scanline_sse2_8888_n_8888_OVER, uint32_t, uint32_t, uint32_t, NONE, FLAG_HAVE_SOLID_MASK) FAST_BILINEAR_MAINLOOP_COMMON (sse2_8888_n_8888_normal_OVER, scaled_bilinear_scanline_sse2_8888_n_8888_OVER, uint32_t, uint32_t, uint32_t, NORMAL, FLAG_HAVE_SOLID_MASK) static const pixman_fast_path_t sse2_fast_paths[] = { /* PIXMAN_OP_OVER */ PIXMAN_STD_FAST_PATH (OVER, solid, a8, r5g6b5, sse2_composite_over_n_8_0565), PIXMAN_STD_FAST_PATH (OVER, solid, a8, b5g6r5, sse2_composite_over_n_8_0565), PIXMAN_STD_FAST_PATH (OVER, solid, null, a8r8g8b8, sse2_composite_over_n_8888), PIXMAN_STD_FAST_PATH (OVER, solid, null, x8r8g8b8, sse2_composite_over_n_8888), PIXMAN_STD_FAST_PATH (OVER, solid, null, r5g6b5, sse2_composite_over_n_0565), PIXMAN_STD_FAST_PATH (OVER, solid, null, b5g6r5, sse2_composite_over_n_0565), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, a8r8g8b8, sse2_composite_over_8888_8888), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, x8r8g8b8, sse2_composite_over_8888_8888), PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, a8b8g8r8, sse2_composite_over_8888_8888), PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, x8b8g8r8, sse2_composite_over_8888_8888), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, r5g6b5, sse2_composite_over_8888_0565), PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, b5g6r5, sse2_composite_over_8888_0565), PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8r8g8b8, sse2_composite_over_n_8_8888), PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8r8g8b8, sse2_composite_over_n_8_8888), PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8b8g8r8, sse2_composite_over_n_8_8888), PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8b8g8r8, sse2_composite_over_n_8_8888), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, a8r8g8b8, sse2_composite_over_8888_8888_8888), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, a8, x8r8g8b8, sse2_composite_over_8888_8_8888), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, a8, a8r8g8b8, sse2_composite_over_8888_8_8888), PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, a8, x8b8g8r8, sse2_composite_over_8888_8_8888), PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, a8, a8b8g8r8, sse2_composite_over_8888_8_8888), PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, a8, x8r8g8b8, sse2_composite_over_x888_8_8888), PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, a8, a8r8g8b8, sse2_composite_over_x888_8_8888), PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, a8, x8b8g8r8, sse2_composite_over_x888_8_8888), PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, a8, a8b8g8r8, sse2_composite_over_x888_8_8888), PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, solid, a8r8g8b8, sse2_composite_over_x888_n_8888), PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, solid, x8r8g8b8, sse2_composite_over_x888_n_8888), PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, solid, a8b8g8r8, sse2_composite_over_x888_n_8888), PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, solid, x8b8g8r8, sse2_composite_over_x888_n_8888), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, a8r8g8b8, sse2_composite_over_8888_n_8888), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, x8r8g8b8, sse2_composite_over_8888_n_8888), PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, solid, a8b8g8r8, sse2_composite_over_8888_n_8888), PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, solid, x8b8g8r8, sse2_composite_over_8888_n_8888), PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, a8r8g8b8, sse2_composite_over_n_8888_8888_ca), PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, x8r8g8b8, sse2_composite_over_n_8888_8888_ca), PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, a8b8g8r8, sse2_composite_over_n_8888_8888_ca), PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, x8b8g8r8, sse2_composite_over_n_8888_8888_ca), PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, r5g6b5, sse2_composite_over_n_8888_0565_ca), PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, b5g6r5, sse2_composite_over_n_8888_0565_ca), PIXMAN_STD_FAST_PATH (OVER, pixbuf, pixbuf, a8r8g8b8, sse2_composite_over_pixbuf_8888), PIXMAN_STD_FAST_PATH (OVER, pixbuf, pixbuf, x8r8g8b8, sse2_composite_over_pixbuf_8888), PIXMAN_STD_FAST_PATH (OVER, rpixbuf, rpixbuf, a8b8g8r8, sse2_composite_over_pixbuf_8888), PIXMAN_STD_FAST_PATH (OVER, rpixbuf, rpixbuf, x8b8g8r8, sse2_composite_over_pixbuf_8888), PIXMAN_STD_FAST_PATH (OVER, pixbuf, pixbuf, r5g6b5, sse2_composite_over_pixbuf_0565), PIXMAN_STD_FAST_PATH (OVER, rpixbuf, rpixbuf, b5g6r5, sse2_composite_over_pixbuf_0565), PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, null, x8r8g8b8, sse2_composite_copy_area), PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, null, x8b8g8r8, sse2_composite_copy_area), /* PIXMAN_OP_OVER_REVERSE */ PIXMAN_STD_FAST_PATH (OVER_REVERSE, solid, null, a8r8g8b8, sse2_composite_over_reverse_n_8888), PIXMAN_STD_FAST_PATH (OVER_REVERSE, solid, null, a8b8g8r8, sse2_composite_over_reverse_n_8888), /* PIXMAN_OP_ADD */ PIXMAN_STD_FAST_PATH_CA (ADD, solid, a8r8g8b8, a8r8g8b8, sse2_composite_add_n_8888_8888_ca), PIXMAN_STD_FAST_PATH (ADD, a8, null, a8, sse2_composite_add_8_8), PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, null, a8r8g8b8, sse2_composite_add_8888_8888), PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, null, a8b8g8r8, sse2_composite_add_8888_8888), PIXMAN_STD_FAST_PATH (ADD, solid, a8, a8, sse2_composite_add_n_8_8), PIXMAN_STD_FAST_PATH (ADD, solid, null, a8, sse2_composite_add_n_8), PIXMAN_STD_FAST_PATH (ADD, solid, null, x8r8g8b8, sse2_composite_add_n_8888), PIXMAN_STD_FAST_PATH (ADD, solid, null, a8r8g8b8, sse2_composite_add_n_8888), PIXMAN_STD_FAST_PATH (ADD, solid, null, x8b8g8r8, sse2_composite_add_n_8888), PIXMAN_STD_FAST_PATH (ADD, solid, null, a8b8g8r8, sse2_composite_add_n_8888), PIXMAN_STD_FAST_PATH (ADD, solid, a8, x8r8g8b8, sse2_composite_add_n_8_8888), PIXMAN_STD_FAST_PATH (ADD, solid, a8, a8r8g8b8, sse2_composite_add_n_8_8888), PIXMAN_STD_FAST_PATH (ADD, solid, a8, x8b8g8r8, sse2_composite_add_n_8_8888), PIXMAN_STD_FAST_PATH (ADD, solid, a8, a8b8g8r8, sse2_composite_add_n_8_8888), /* PIXMAN_OP_SRC */ PIXMAN_STD_FAST_PATH (SRC, solid, a8, a8r8g8b8, sse2_composite_src_n_8_8888), PIXMAN_STD_FAST_PATH (SRC, solid, a8, x8r8g8b8, sse2_composite_src_n_8_8888), PIXMAN_STD_FAST_PATH (SRC, solid, a8, a8b8g8r8, sse2_composite_src_n_8_8888), PIXMAN_STD_FAST_PATH (SRC, solid, a8, x8b8g8r8, sse2_composite_src_n_8_8888), PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, r5g6b5, sse2_composite_src_x888_0565), PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, b5g6r5, sse2_composite_src_x888_0565), PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, r5g6b5, sse2_composite_src_x888_0565), PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, b5g6r5, sse2_composite_src_x888_0565), PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, a8r8g8b8, sse2_composite_src_x888_8888), PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, a8b8g8r8, sse2_composite_src_x888_8888), PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, a8r8g8b8, sse2_composite_copy_area), PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, a8b8g8r8, sse2_composite_copy_area), PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, x8r8g8b8, sse2_composite_copy_area), PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, x8b8g8r8, sse2_composite_copy_area), PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, x8r8g8b8, sse2_composite_copy_area), PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, x8b8g8r8, sse2_composite_copy_area), PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, r5g6b5, sse2_composite_copy_area), PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, b5g6r5, sse2_composite_copy_area), /* PIXMAN_OP_IN */ PIXMAN_STD_FAST_PATH (IN, a8, null, a8, sse2_composite_in_8_8), PIXMAN_STD_FAST_PATH (IN, solid, a8, a8, sse2_composite_in_n_8_8), PIXMAN_STD_FAST_PATH (IN, solid, null, a8, sse2_composite_in_n_8), SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8, sse2_8888_8888), SIMPLE_NEAREST_FAST_PATH (OVER, a8b8g8r8, x8b8g8r8, sse2_8888_8888), SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, sse2_8888_8888), SIMPLE_NEAREST_FAST_PATH (OVER, a8b8g8r8, a8b8g8r8, sse2_8888_8888), SIMPLE_NEAREST_SOLID_MASK_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, sse2_8888_n_8888), SIMPLE_NEAREST_SOLID_MASK_FAST_PATH (OVER, a8b8g8r8, a8b8g8r8, sse2_8888_n_8888), SIMPLE_NEAREST_SOLID_MASK_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8, sse2_8888_n_8888), SIMPLE_NEAREST_SOLID_MASK_FAST_PATH (OVER, a8b8g8r8, x8b8g8r8, sse2_8888_n_8888), SIMPLE_BILINEAR_FAST_PATH (SRC, a8r8g8b8, a8r8g8b8, sse2_8888_8888), SIMPLE_BILINEAR_FAST_PATH (SRC, a8r8g8b8, x8r8g8b8, sse2_8888_8888), SIMPLE_BILINEAR_FAST_PATH (SRC, x8r8g8b8, x8r8g8b8, sse2_8888_8888), SIMPLE_BILINEAR_FAST_PATH (SRC, a8b8g8r8, a8b8g8r8, sse2_8888_8888), SIMPLE_BILINEAR_FAST_PATH (SRC, a8b8g8r8, x8b8g8r8, sse2_8888_8888), SIMPLE_BILINEAR_FAST_PATH (SRC, x8b8g8r8, x8b8g8r8, sse2_8888_8888), SIMPLE_BILINEAR_FAST_PATH_COVER (SRC, x8r8g8b8, a8r8g8b8, sse2_x888_8888), SIMPLE_BILINEAR_FAST_PATH_COVER (SRC, x8b8g8r8, a8b8g8r8, sse2_x888_8888), SIMPLE_BILINEAR_FAST_PATH_PAD (SRC, x8r8g8b8, a8r8g8b8, sse2_x888_8888), SIMPLE_BILINEAR_FAST_PATH_PAD (SRC, x8b8g8r8, a8b8g8r8, sse2_x888_8888), SIMPLE_BILINEAR_FAST_PATH_NORMAL (SRC, x8r8g8b8, a8r8g8b8, sse2_x888_8888), SIMPLE_BILINEAR_FAST_PATH_NORMAL (SRC, x8b8g8r8, a8b8g8r8, sse2_x888_8888), SIMPLE_BILINEAR_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8, sse2_8888_8888), SIMPLE_BILINEAR_FAST_PATH (OVER, a8b8g8r8, x8b8g8r8, sse2_8888_8888), SIMPLE_BILINEAR_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, sse2_8888_8888), SIMPLE_BILINEAR_FAST_PATH (OVER, a8b8g8r8, a8b8g8r8, sse2_8888_8888), SIMPLE_BILINEAR_SOLID_MASK_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8, sse2_8888_n_8888), SIMPLE_BILINEAR_SOLID_MASK_FAST_PATH (OVER, a8b8g8r8, x8b8g8r8, sse2_8888_n_8888), SIMPLE_BILINEAR_SOLID_MASK_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, sse2_8888_n_8888), SIMPLE_BILINEAR_SOLID_MASK_FAST_PATH (OVER, a8b8g8r8, a8b8g8r8, sse2_8888_n_8888), SIMPLE_BILINEAR_A8_MASK_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8, sse2_8888_8_8888), SIMPLE_BILINEAR_A8_MASK_FAST_PATH (OVER, a8b8g8r8, x8b8g8r8, sse2_8888_8_8888), SIMPLE_BILINEAR_A8_MASK_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, sse2_8888_8_8888), SIMPLE_BILINEAR_A8_MASK_FAST_PATH (OVER, a8b8g8r8, a8b8g8r8, sse2_8888_8_8888), { PIXMAN_OP_NONE }, }; static uint32_t * sse2_fetch_x8r8g8b8 (pixman_iter_t *iter, const uint32_t *mask) { int w = iter->width; __m128i ff000000 = mask_ff000000; uint32_t *dst = iter->buffer; uint32_t *src = (uint32_t *)iter->bits; iter->bits += iter->stride; while (w && ((uintptr_t)dst) & 0x0f) { *dst++ = (*src++) | 0xff000000; w--; } while (w >= 4) { save_128_aligned ( (__m128i *)dst, _mm_or_si128 ( load_128_unaligned ((__m128i *)src), ff000000)); dst += 4; src += 4; w -= 4; } while (w) { *dst++ = (*src++) | 0xff000000; w--; } return iter->buffer; } static uint32_t * sse2_fetch_r5g6b5 (pixman_iter_t *iter, const uint32_t *mask) { int w = iter->width; uint32_t *dst = iter->buffer; uint16_t *src = (uint16_t *)iter->bits; __m128i ff000000 = mask_ff000000; iter->bits += iter->stride; while (w && ((uintptr_t)dst) & 0x0f) { uint16_t s = *src++; *dst++ = convert_0565_to_8888 (s); w--; } while (w >= 8) { __m128i lo, hi, s; s = _mm_loadu_si128 ((__m128i *)src); lo = unpack_565_to_8888 (_mm_unpacklo_epi16 (s, _mm_setzero_si128 ())); hi = unpack_565_to_8888 (_mm_unpackhi_epi16 (s, _mm_setzero_si128 ())); save_128_aligned ((__m128i *)(dst + 0), _mm_or_si128 (lo, ff000000)); save_128_aligned ((__m128i *)(dst + 4), _mm_or_si128 (hi, ff000000)); dst += 8; src += 8; w -= 8; } while (w) { uint16_t s = *src++; *dst++ = convert_0565_to_8888 (s); w--; } return iter->buffer; } static uint32_t * sse2_fetch_a8 (pixman_iter_t *iter, const uint32_t *mask) { int w = iter->width; uint32_t *dst = iter->buffer; uint8_t *src = iter->bits; __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6; iter->bits += iter->stride; while (w && (((uintptr_t)dst) & 15)) { *dst++ = (uint32_t)(*(src++)) << 24; w--; } while (w >= 16) { xmm0 = _mm_loadu_si128((__m128i *)src); xmm1 = _mm_unpacklo_epi8 (_mm_setzero_si128(), xmm0); xmm2 = _mm_unpackhi_epi8 (_mm_setzero_si128(), xmm0); xmm3 = _mm_unpacklo_epi16 (_mm_setzero_si128(), xmm1); xmm4 = _mm_unpackhi_epi16 (_mm_setzero_si128(), xmm1); xmm5 = _mm_unpacklo_epi16 (_mm_setzero_si128(), xmm2); xmm6 = _mm_unpackhi_epi16 (_mm_setzero_si128(), xmm2); _mm_store_si128(((__m128i *)(dst + 0)), xmm3); _mm_store_si128(((__m128i *)(dst + 4)), xmm4); _mm_store_si128(((__m128i *)(dst + 8)), xmm5); _mm_store_si128(((__m128i *)(dst + 12)), xmm6); dst += 16; src += 16; w -= 16; } while (w) { *dst++ = (uint32_t)(*(src++)) << 24; w--; } return iter->buffer; } #define IMAGE_FLAGS \ (FAST_PATH_STANDARD_FLAGS | FAST_PATH_ID_TRANSFORM | \ FAST_PATH_BITS_IMAGE | FAST_PATH_SAMPLES_COVER_CLIP_NEAREST) static const pixman_iter_info_t sse2_iters[] = { { PIXMAN_x8r8g8b8, IMAGE_FLAGS, ITER_NARROW, _pixman_iter_init_bits_stride, sse2_fetch_x8r8g8b8, NULL }, { PIXMAN_r5g6b5, IMAGE_FLAGS, ITER_NARROW, _pixman_iter_init_bits_stride, sse2_fetch_r5g6b5, NULL }, { PIXMAN_a8, IMAGE_FLAGS, ITER_NARROW, _pixman_iter_init_bits_stride, sse2_fetch_a8, NULL }, { PIXMAN_null }, }; #if defined(__GNUC__) && !defined(__x86_64__) && !defined(__amd64__) __attribute__((__force_align_arg_pointer__)) #endif pixman_implementation_t * _pixman_implementation_create_sse2 (pixman_implementation_t *fallback) { pixman_implementation_t *imp = _pixman_implementation_create (fallback, sse2_fast_paths); /* SSE2 constants */ mask_565_r = create_mask_2x32_128 (0x00f80000, 0x00f80000); mask_565_g1 = create_mask_2x32_128 (0x00070000, 0x00070000); mask_565_g2 = create_mask_2x32_128 (0x000000e0, 0x000000e0); mask_565_b = create_mask_2x32_128 (0x0000001f, 0x0000001f); mask_red = create_mask_2x32_128 (0x00f80000, 0x00f80000); mask_green = create_mask_2x32_128 (0x0000fc00, 0x0000fc00); mask_blue = create_mask_2x32_128 (0x000000f8, 0x000000f8); mask_565_fix_rb = create_mask_2x32_128 (0x00e000e0, 0x00e000e0); mask_565_fix_g = create_mask_2x32_128 (0x0000c000, 0x0000c000); mask_0080 = create_mask_16_128 (0x0080); mask_00ff = create_mask_16_128 (0x00ff); mask_0101 = create_mask_16_128 (0x0101); mask_ffff = create_mask_16_128 (0xffff); mask_ff000000 = create_mask_2x32_128 (0xff000000, 0xff000000); mask_alpha = create_mask_2x32_128 (0x00ff0000, 0x00000000); mask_565_rb = create_mask_2x32_128 (0x00f800f8, 0x00f800f8); mask_565_pack_multiplier = create_mask_2x32_128 (0x20000004, 0x20000004); /* Set up function pointers */ imp->combine_32[PIXMAN_OP_OVER] = sse2_combine_over_u; imp->combine_32[PIXMAN_OP_OVER_REVERSE] = sse2_combine_over_reverse_u; imp->combine_32[PIXMAN_OP_IN] = sse2_combine_in_u; imp->combine_32[PIXMAN_OP_IN_REVERSE] = sse2_combine_in_reverse_u; imp->combine_32[PIXMAN_OP_OUT] = sse2_combine_out_u; imp->combine_32[PIXMAN_OP_OUT_REVERSE] = sse2_combine_out_reverse_u; imp->combine_32[PIXMAN_OP_ATOP] = sse2_combine_atop_u; imp->combine_32[PIXMAN_OP_ATOP_REVERSE] = sse2_combine_atop_reverse_u; imp->combine_32[PIXMAN_OP_XOR] = sse2_combine_xor_u; imp->combine_32[PIXMAN_OP_ADD] = sse2_combine_add_u; imp->combine_32[PIXMAN_OP_SATURATE] = sse2_combine_saturate_u; imp->combine_32_ca[PIXMAN_OP_SRC] = sse2_combine_src_ca; imp->combine_32_ca[PIXMAN_OP_OVER] = sse2_combine_over_ca; imp->combine_32_ca[PIXMAN_OP_OVER_REVERSE] = sse2_combine_over_reverse_ca; imp->combine_32_ca[PIXMAN_OP_IN] = sse2_combine_in_ca; imp->combine_32_ca[PIXMAN_OP_IN_REVERSE] = sse2_combine_in_reverse_ca; imp->combine_32_ca[PIXMAN_OP_OUT] = sse2_combine_out_ca; imp->combine_32_ca[PIXMAN_OP_OUT_REVERSE] = sse2_combine_out_reverse_ca; imp->combine_32_ca[PIXMAN_OP_ATOP] = sse2_combine_atop_ca; imp->combine_32_ca[PIXMAN_OP_ATOP_REVERSE] = sse2_combine_atop_reverse_ca; imp->combine_32_ca[PIXMAN_OP_XOR] = sse2_combine_xor_ca; imp->combine_32_ca[PIXMAN_OP_ADD] = sse2_combine_add_ca; imp->blt = sse2_blt; imp->fill = sse2_fill; imp->iter_info = sse2_iters; return imp; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-ssse3.c0000664000175000017500000002266114712446423017117 0ustar00mattst88mattst88/* * Copyright Âİ 2013 Soren Sandmann Pedersen * Copyright Âİ 2013 Red Hat, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Author: Soren Sandmann (soren.sandmann@gmail.com) */ #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include "pixman-private.h" #include "pixman-inlines.h" typedef struct { int y; uint64_t * buffer; } line_t; typedef struct { line_t lines[2]; pixman_fixed_t y; pixman_fixed_t x; uint64_t data[1]; } bilinear_info_t; static void ssse3_fetch_horizontal (bits_image_t *image, line_t *line, int y, pixman_fixed_t x, pixman_fixed_t ux, int n) { uint32_t *bits = image->bits + y * image->rowstride; __m128i vx = _mm_set_epi16 ( - (x + 1), x, - (x + 1), x, - (x + ux + 1), x + ux, - (x + ux + 1), x + ux); __m128i vux = _mm_set_epi16 ( - 2 * ux, 2 * ux, - 2 * ux, 2 * ux, - 2 * ux, 2 * ux, - 2 * ux, 2 * ux); __m128i vaddc = _mm_set_epi16 (1, 0, 1, 0, 1, 0, 1, 0); __m128i *b = (__m128i *)line->buffer; __m128i vrl0, vrl1; while ((n -= 2) >= 0) { __m128i vw, vr, s; vrl1 = _mm_loadl_epi64 ( (__m128i *)(bits + pixman_fixed_to_int (x + ux))); /* vrl1: R1, L1 */ final_pixel: vrl0 = _mm_loadl_epi64 ( (__m128i *)(bits + pixman_fixed_to_int (x))); /* vrl0: R0, L0 */ /* The weights are based on vx which is a vector of * * - (x + 1), x, - (x + 1), x, * - (x + ux + 1), x + ux, - (x + ux + 1), x + ux * * so the 16 bit weights end up like this: * * iw0, w0, iw0, w0, iw1, w1, iw1, w1 * * and after shifting and packing, we get these bytes: * * iw0, w0, iw0, w0, iw1, w1, iw1, w1, * iw0, w0, iw0, w0, iw1, w1, iw1, w1, * * which means the first and the second input pixel * have to be interleaved like this: * * la0, ra0, lr0, rr0, la1, ra1, lr1, rr1, * lg0, rg0, lb0, rb0, lg1, rg1, lb1, rb1 * * before maddubsw can be used. */ vw = _mm_add_epi16 ( vaddc, _mm_srli_epi16 (vx, 16 - BILINEAR_INTERPOLATION_BITS)); /* vw: iw0, w0, iw0, w0, iw1, w1, iw1, w1 */ vw = _mm_packus_epi16 (vw, vw); /* vw: iw0, w0, iw0, w0, iw1, w1, iw1, w1, * iw0, w0, iw0, w0, iw1, w1, iw1, w1 */ vx = _mm_add_epi16 (vx, vux); x += 2 * ux; vr = _mm_unpacklo_epi16 (vrl1, vrl0); /* vr: rar0, rar1, rgb0, rgb1, lar0, lar1, lgb0, lgb1 */ s = _mm_shuffle_epi32 (vr, _MM_SHUFFLE (1, 0, 3, 2)); /* s: lar0, lar1, lgb0, lgb1, rar0, rar1, rgb0, rgb1 */ vr = _mm_unpackhi_epi8 (vr, s); /* vr: la0, ra0, lr0, rr0, la1, ra1, lr1, rr1, * lg0, rg0, lb0, rb0, lg1, rg1, lb1, rb1 */ vr = _mm_maddubs_epi16 (vr, vw); /* When the weight is 0, the inverse weight is * 128 which can't be represented in a signed byte. * As a result maddubsw computes the following: * * r = l * -128 + r * 0 * * rather than the desired * * r = l * 128 + r * 0 * * We fix this by taking the absolute value of the * result. */ vr = _mm_abs_epi16 (vr); /* vr: A0, R0, A1, R1, G0, B0, G1, B1 */ _mm_store_si128 (b++, vr); } if (n == -1) { vrl1 = _mm_setzero_si128(); goto final_pixel; } line->y = y; } static uint32_t * ssse3_fetch_bilinear_cover (pixman_iter_t *iter, const uint32_t *mask) { pixman_fixed_t fx, ux; bilinear_info_t *info = iter->data; line_t *line0, *line1; int y0, y1; int32_t dist_y; __m128i vw; int i; fx = info->x; ux = iter->image->common.transform->matrix[0][0]; y0 = pixman_fixed_to_int (info->y); y1 = y0 + 1; line0 = &info->lines[y0 & 0x01]; line1 = &info->lines[y1 & 0x01]; if (line0->y != y0) { ssse3_fetch_horizontal ( &iter->image->bits, line0, y0, fx, ux, iter->width); } if (line1->y != y1) { ssse3_fetch_horizontal ( &iter->image->bits, line1, y1, fx, ux, iter->width); } dist_y = pixman_fixed_to_bilinear_weight (info->y); dist_y <<= (16 - BILINEAR_INTERPOLATION_BITS); vw = _mm_set_epi16 ( dist_y, dist_y, dist_y, dist_y, dist_y, dist_y, dist_y, dist_y); for (i = 0; i + 3 < iter->width; i += 4) { __m128i top0 = _mm_load_si128 ((__m128i *)(line0->buffer + i)); __m128i bot0 = _mm_load_si128 ((__m128i *)(line1->buffer + i)); __m128i top1 = _mm_load_si128 ((__m128i *)(line0->buffer + i + 2)); __m128i bot1 = _mm_load_si128 ((__m128i *)(line1->buffer + i + 2)); __m128i r0, r1, tmp, p; r0 = _mm_mulhi_epu16 ( _mm_sub_epi16 (bot0, top0), vw); tmp = _mm_cmplt_epi16 (bot0, top0); tmp = _mm_and_si128 (tmp, vw); r0 = _mm_sub_epi16 (r0, tmp); r0 = _mm_add_epi16 (r0, top0); r0 = _mm_srli_epi16 (r0, BILINEAR_INTERPOLATION_BITS); /* r0: A0 R0 A1 R1 G0 B0 G1 B1 */ r0 = _mm_shuffle_epi32 (r0, _MM_SHUFFLE (2, 0, 3, 1)); /* r0: A1 R1 G1 B1 A0 R0 G0 B0 */ r1 = _mm_mulhi_epu16 ( _mm_sub_epi16 (bot1, top1), vw); tmp = _mm_cmplt_epi16 (bot1, top1); tmp = _mm_and_si128 (tmp, vw); r1 = _mm_sub_epi16 (r1, tmp); r1 = _mm_add_epi16 (r1, top1); r1 = _mm_srli_epi16 (r1, BILINEAR_INTERPOLATION_BITS); r1 = _mm_shuffle_epi32 (r1, _MM_SHUFFLE (2, 0, 3, 1)); /* r1: A3 R3 G3 B3 A2 R2 G2 B2 */ p = _mm_packus_epi16 (r0, r1); _mm_storeu_si128 ((__m128i *)(iter->buffer + i), p); } while (i < iter->width) { __m128i top0 = _mm_load_si128 ((__m128i *)(line0->buffer + i)); __m128i bot0 = _mm_load_si128 ((__m128i *)(line1->buffer + i)); __m128i r0, tmp, p; r0 = _mm_mulhi_epu16 ( _mm_sub_epi16 (bot0, top0), vw); tmp = _mm_cmplt_epi16 (bot0, top0); tmp = _mm_and_si128 (tmp, vw); r0 = _mm_sub_epi16 (r0, tmp); r0 = _mm_add_epi16 (r0, top0); r0 = _mm_srli_epi16 (r0, BILINEAR_INTERPOLATION_BITS); /* r0: A0 R0 A1 R1 G0 B0 G1 B1 */ r0 = _mm_shuffle_epi32 (r0, _MM_SHUFFLE (2, 0, 3, 1)); /* r0: A1 R1 G1 B1 A0 R0 G0 B0 */ p = _mm_packus_epi16 (r0, r0); if (iter->width - i == 1) { *(uint32_t *)(iter->buffer + i) = _mm_cvtsi128_si32 (p); i++; } else { _mm_storel_epi64 ((__m128i *)(iter->buffer + i), p); i += 2; } } info->y += iter->image->common.transform->matrix[1][1]; return iter->buffer; } static void ssse3_bilinear_cover_iter_fini (pixman_iter_t *iter) { free (iter->data); } static void ssse3_bilinear_cover_iter_init (pixman_iter_t *iter, const pixman_iter_info_t *iter_info) { int width = iter->width; bilinear_info_t *info; pixman_vector_t v; /* Reference point is the center of the pixel */ v.vector[0] = pixman_int_to_fixed (iter->x) + pixman_fixed_1 / 2; v.vector[1] = pixman_int_to_fixed (iter->y) + pixman_fixed_1 / 2; v.vector[2] = pixman_fixed_1; if (!pixman_transform_point_3d (iter->image->common.transform, &v)) goto fail; info = malloc (sizeof (*info) + (2 * width - 1) * sizeof (uint64_t) + 64); if (!info) goto fail; info->x = v.vector[0] - pixman_fixed_1 / 2; info->y = v.vector[1] - pixman_fixed_1 / 2; #define ALIGN(addr) \ ((void *)((((uintptr_t)(addr)) + 15) & (~15))) /* It is safe to set the y coordinates to -1 initially * because COVER_CLIP_BILINEAR ensures that we will only * be asked to fetch lines in the [0, height) interval */ info->lines[0].y = -1; info->lines[0].buffer = ALIGN (&(info->data[0])); info->lines[1].y = -1; info->lines[1].buffer = ALIGN (info->lines[0].buffer + width); iter->get_scanline = ssse3_fetch_bilinear_cover; iter->fini = ssse3_bilinear_cover_iter_fini; iter->data = info; return; fail: /* Something went wrong, either a bad matrix or OOM; in such cases, * we don't guarantee any particular rendering. */ _pixman_log_error ( FUNC, "Allocation failure or bad matrix, skipping rendering\n"); iter->get_scanline = _pixman_iter_get_scanline_noop; iter->fini = NULL; } static const pixman_iter_info_t ssse3_iters[] = { { PIXMAN_a8r8g8b8, (FAST_PATH_STANDARD_FLAGS | FAST_PATH_SCALE_TRANSFORM | FAST_PATH_BILINEAR_FILTER | FAST_PATH_SAMPLES_COVER_CLIP_BILINEAR), ITER_NARROW | ITER_SRC, ssse3_bilinear_cover_iter_init, NULL, NULL }, { PIXMAN_null }, }; static const pixman_fast_path_t ssse3_fast_paths[] = { { PIXMAN_OP_NONE }, }; pixman_implementation_t * _pixman_implementation_create_ssse3 (pixman_implementation_t *fallback) { pixman_implementation_t *imp = _pixman_implementation_create (fallback, ssse3_fast_paths); imp->iter_info = ssse3_iters; return imp; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-timer.c0000664000175000017500000000353114712446423017172 0ustar00mattst88mattst88/* * Copyright Âİ 2007 Red Hat, Inc. * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of Red Hat not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. Red Hat makes no representations about the * suitability of this software for any purpose. It is provided "as is" * without express or implied warranty. * * RED HAT DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL RED HAT * BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifdef HAVE_CONFIG_H #include #endif #include #include #include "pixman-private.h" #ifdef PIXMAN_TIMERS static pixman_timer_t *timers; static void dump_timers (void) { pixman_timer_t *timer; for (timer = timers; timer != NULL; timer = timer->next) { printf ("%s: total: %llu n: %llu avg: %f\n", timer->name, timer->total, timer->n_times, timer->total / (double)timer->n_times); } } void pixman_timer_register (pixman_timer_t *timer) { static int initialized; int atexit (void (*function)(void)); if (!initialized) { atexit (dump_timers); initialized = 1; } timer->next = timers; timers = timer; } #endif ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-trap.c0000664000175000017500000004074114712446423017024 0ustar00mattst88mattst88/* * Copyright Âİ 2002 Keith Packard, member of The XFree86 Project, Inc. * Copyright Âİ 2004 Keith Packard * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of Keith Packard not be used in * advertising or publicity pertaining to distribution of the software without * specific, written prior permission. Keith Packard makes no * representations about the suitability of this software for any purpose. It * is provided "as is" without express or implied warranty. * * KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ #ifdef HAVE_CONFIG_H #include #endif #include #include #include "pixman-private.h" /* * Compute the smallest value greater than or equal to y which is on a * grid row. */ PIXMAN_EXPORT pixman_fixed_t pixman_sample_ceil_y (pixman_fixed_t y, int n) { pixman_fixed_t f = pixman_fixed_frac (y); pixman_fixed_t i = pixman_fixed_floor (y); f = DIV (f - Y_FRAC_FIRST (n) + (STEP_Y_SMALL (n) - pixman_fixed_e), STEP_Y_SMALL (n)) * STEP_Y_SMALL (n) + Y_FRAC_FIRST (n); if (f > Y_FRAC_LAST (n)) { if (pixman_fixed_to_int (i) == 0x7fff) { f = 0xffff; /* saturate */ } else { f = Y_FRAC_FIRST (n); i += pixman_fixed_1; } } return (i | f); } /* * Compute the largest value strictly less than y which is on a * grid row. */ PIXMAN_EXPORT pixman_fixed_t pixman_sample_floor_y (pixman_fixed_t y, int n) { pixman_fixed_t f = pixman_fixed_frac (y); pixman_fixed_t i = pixman_fixed_floor (y); f = DIV (f - pixman_fixed_e - Y_FRAC_FIRST (n), STEP_Y_SMALL (n)) * STEP_Y_SMALL (n) + Y_FRAC_FIRST (n); if (f < Y_FRAC_FIRST (n)) { if (pixman_fixed_to_int (i) == 0xffff8000) { f = 0; /* saturate */ } else { f = Y_FRAC_LAST (n); i -= pixman_fixed_1; } } return (i | f); } /* * Step an edge by any amount (including negative values) */ PIXMAN_EXPORT void pixman_edge_step (pixman_edge_t *e, int n) { pixman_fixed_48_16_t ne; e->x += n * e->stepx; ne = e->e + n * (pixman_fixed_48_16_t) e->dx; if (n >= 0) { if (ne > 0) { int nx = (ne + e->dy - 1) / e->dy; e->e = ne - nx * (pixman_fixed_48_16_t) e->dy; e->x += nx * e->signdx; } } else { if (ne <= -e->dy) { int nx = (-ne) / e->dy; e->e = ne + nx * (pixman_fixed_48_16_t) e->dy; e->x -= nx * e->signdx; } } } /* * A private routine to initialize the multi-step * elements of an edge structure */ static void _pixman_edge_multi_init (pixman_edge_t * e, int n, pixman_fixed_t *stepx_p, pixman_fixed_t *dx_p) { pixman_fixed_t stepx; pixman_fixed_48_16_t ne; ne = n * (pixman_fixed_48_16_t) e->dx; stepx = n * e->stepx; if (ne > 0) { int nx = ne / e->dy; ne -= nx * (pixman_fixed_48_16_t)e->dy; stepx += nx * e->signdx; } *dx_p = ne; *stepx_p = stepx; } /* * Initialize one edge structure given the line endpoints and a * starting y value */ PIXMAN_EXPORT void pixman_edge_init (pixman_edge_t *e, int n, pixman_fixed_t y_start, pixman_fixed_t x_top, pixman_fixed_t y_top, pixman_fixed_t x_bot, pixman_fixed_t y_bot) { pixman_fixed_t dx, dy; e->x = x_top; e->e = 0; dx = x_bot - x_top; dy = y_bot - y_top; e->dy = dy; e->dx = 0; if (dy) { if (dx >= 0) { e->signdx = 1; e->stepx = dx / dy; e->dx = dx % dy; e->e = -dy; } else { e->signdx = -1; e->stepx = -(-dx / dy); e->dx = -dx % dy; e->e = 0; } _pixman_edge_multi_init (e, STEP_Y_SMALL (n), &e->stepx_small, &e->dx_small); _pixman_edge_multi_init (e, STEP_Y_BIG (n), &e->stepx_big, &e->dx_big); } pixman_edge_step (e, y_start - y_top); } /* * Initialize one edge structure given a line, starting y value * and a pixel offset for the line */ PIXMAN_EXPORT void pixman_line_fixed_edge_init (pixman_edge_t * e, int n, pixman_fixed_t y, const pixman_line_fixed_t *line, int x_off, int y_off) { pixman_fixed_t x_off_fixed = pixman_int_to_fixed (x_off); pixman_fixed_t y_off_fixed = pixman_int_to_fixed (y_off); const pixman_point_fixed_t *top, *bot; if (line->p1.y <= line->p2.y) { top = &line->p1; bot = &line->p2; } else { top = &line->p2; bot = &line->p1; } pixman_edge_init (e, n, y, top->x + x_off_fixed, top->y + y_off_fixed, bot->x + x_off_fixed, bot->y + y_off_fixed); } PIXMAN_EXPORT void pixman_add_traps (pixman_image_t * image, int16_t x_off, int16_t y_off, int ntrap, const pixman_trap_t *traps) { int bpp; int height; pixman_fixed_t x_off_fixed; pixman_fixed_t y_off_fixed; pixman_edge_t l, r; pixman_fixed_t t, b; _pixman_image_validate (image); height = image->bits.height; bpp = PIXMAN_FORMAT_BPP (image->bits.format); x_off_fixed = pixman_int_to_fixed (x_off); y_off_fixed = pixman_int_to_fixed (y_off); while (ntrap--) { t = traps->top.y + y_off_fixed; if (t < 0) t = 0; t = pixman_sample_ceil_y (t, bpp); b = traps->bot.y + y_off_fixed; if (pixman_fixed_to_int (b) >= height) b = pixman_int_to_fixed (height) - 1; b = pixman_sample_floor_y (b, bpp); if (b >= t) { /* initialize edge walkers */ pixman_edge_init (&l, bpp, t, traps->top.l + x_off_fixed, traps->top.y + y_off_fixed, traps->bot.l + x_off_fixed, traps->bot.y + y_off_fixed); pixman_edge_init (&r, bpp, t, traps->top.r + x_off_fixed, traps->top.y + y_off_fixed, traps->bot.r + x_off_fixed, traps->bot.y + y_off_fixed); pixman_rasterize_edges (image, &l, &r, t, b); } traps++; } } #if 0 static void dump_image (pixman_image_t *image, const char * title) { int i, j; if (!image->type == BITS) printf ("%s is not a regular image\n", title); if (!image->bits.format == PIXMAN_a8) printf ("%s is not an alpha mask\n", title); printf ("\n\n\n%s: \n", title); for (i = 0; i < image->bits.height; ++i) { uint8_t *line = (uint8_t *)&(image->bits.bits[i * image->bits.rowstride]); for (j = 0; j < image->bits.width; ++j) printf ("%c", line[j] ? '#' : ' '); printf ("\n"); } } #endif PIXMAN_EXPORT void pixman_add_trapezoids (pixman_image_t * image, int16_t x_off, int y_off, int ntraps, const pixman_trapezoid_t *traps) { int i; #if 0 dump_image (image, "before"); #endif for (i = 0; i < ntraps; ++i) { const pixman_trapezoid_t *trap = &(traps[i]); if (!pixman_trapezoid_valid (trap)) continue; pixman_rasterize_trapezoid (image, trap, x_off, y_off); } #if 0 dump_image (image, "after"); #endif } PIXMAN_EXPORT void pixman_rasterize_trapezoid (pixman_image_t * image, const pixman_trapezoid_t *trap, int x_off, int y_off) { int bpp; int height; pixman_fixed_t y_off_fixed; pixman_edge_t l, r; pixman_fixed_t t, b; return_if_fail (image->type == BITS); _pixman_image_validate (image); if (!pixman_trapezoid_valid (trap)) return; height = image->bits.height; bpp = PIXMAN_FORMAT_BPP (image->bits.format); y_off_fixed = pixman_int_to_fixed (y_off); t = trap->top + y_off_fixed; if (t < 0) t = 0; t = pixman_sample_ceil_y (t, bpp); b = trap->bottom + y_off_fixed; if (pixman_fixed_to_int (b) >= height) b = pixman_int_to_fixed (height) - 1; b = pixman_sample_floor_y (b, bpp); if (b >= t) { /* initialize edge walkers */ pixman_line_fixed_edge_init (&l, bpp, t, &trap->left, x_off, y_off); pixman_line_fixed_edge_init (&r, bpp, t, &trap->right, x_off, y_off); pixman_rasterize_edges (image, &l, &r, t, b); } } static const pixman_bool_t zero_src_has_no_effect[PIXMAN_N_OPERATORS] = { FALSE, /* Clear 0 0 */ FALSE, /* Src 1 0 */ TRUE, /* Dst 0 1 */ TRUE, /* Over 1 1-Aa */ TRUE, /* OverReverse 1-Ab 1 */ FALSE, /* In Ab 0 */ FALSE, /* InReverse 0 Aa */ FALSE, /* Out 1-Ab 0 */ TRUE, /* OutReverse 0 1-Aa */ TRUE, /* Atop Ab 1-Aa */ FALSE, /* AtopReverse 1-Ab Aa */ TRUE, /* Xor 1-Ab 1-Aa */ TRUE, /* Add 1 1 */ }; static pixman_bool_t get_trap_extents (pixman_op_t op, pixman_image_t *dest, const pixman_trapezoid_t *traps, int n_traps, pixman_box32_t *box) { int i; /* When the operator is such that a zero source has an * effect on the underlying image, we have to * composite across the entire destination */ if (!zero_src_has_no_effect [op]) { box->x1 = 0; box->y1 = 0; box->x2 = dest->bits.width; box->y2 = dest->bits.height; return TRUE; } box->x1 = INT32_MAX; box->y1 = INT32_MAX; box->x2 = INT32_MIN; box->y2 = INT32_MIN; for (i = 0; i < n_traps; ++i) { const pixman_trapezoid_t *trap = &(traps[i]); int y1, y2; if (!pixman_trapezoid_valid (trap)) continue; y1 = pixman_fixed_to_int (trap->top); if (y1 < box->y1) box->y1 = y1; y2 = pixman_fixed_to_int (pixman_fixed_ceil (trap->bottom)); if (y2 > box->y2) box->y2 = y2; #define EXTEND_MIN(x) \ if (pixman_fixed_to_int ((x)) < box->x1) \ box->x1 = pixman_fixed_to_int ((x)); #define EXTEND_MAX(x) \ if (pixman_fixed_to_int (pixman_fixed_ceil ((x))) > box->x2) \ box->x2 = pixman_fixed_to_int (pixman_fixed_ceil ((x))); #define EXTEND(x) \ EXTEND_MIN(x); \ EXTEND_MAX(x); EXTEND(trap->left.p1.x); EXTEND(trap->left.p2.x); EXTEND(trap->right.p1.x); EXTEND(trap->right.p2.x); } if (box->x1 >= box->x2 || box->y1 >= box->y2) return FALSE; return TRUE; } /* * pixman_composite_trapezoids() * * All the trapezoids are conceptually rendered to an infinitely big image. * The (0, 0) coordinates of this image are then aligned with the (x, y) * coordinates of the source image, and then both images are aligned with * the (x, y) coordinates of the destination. Then these three images are * composited across the entire destination. */ PIXMAN_EXPORT void pixman_composite_trapezoids (pixman_op_t op, pixman_image_t * src, pixman_image_t * dst, pixman_format_code_t mask_format, int x_src, int y_src, int x_dst, int y_dst, int n_traps, const pixman_trapezoid_t * traps) { int i; return_if_fail (PIXMAN_FORMAT_TYPE (mask_format) == PIXMAN_TYPE_A); if (n_traps <= 0) return; _pixman_image_validate (src); _pixman_image_validate (dst); if (op == PIXMAN_OP_ADD && (src->common.flags & FAST_PATH_IS_OPAQUE) && (mask_format == dst->common.extended_format_code) && !(dst->common.have_clip_region)) { for (i = 0; i < n_traps; ++i) { const pixman_trapezoid_t *trap = &(traps[i]); if (!pixman_trapezoid_valid (trap)) continue; pixman_rasterize_trapezoid (dst, trap, x_dst, y_dst); } } else { pixman_image_t *tmp; pixman_box32_t box; int i; if (!get_trap_extents (op, dst, traps, n_traps, &box)) return; if (!(tmp = pixman_image_create_bits ( mask_format, box.x2 - box.x1, box.y2 - box.y1, NULL, -1))) return; for (i = 0; i < n_traps; ++i) { const pixman_trapezoid_t *trap = &(traps[i]); if (!pixman_trapezoid_valid (trap)) continue; pixman_rasterize_trapezoid (tmp, trap, - box.x1, - box.y1); } pixman_image_composite (op, src, tmp, dst, x_src + box.x1, y_src + box.y1, 0, 0, x_dst + box.x1, y_dst + box.y1, box.x2 - box.x1, box.y2 - box.y1); pixman_image_unref (tmp); } } static int greater_y (const pixman_point_fixed_t *a, const pixman_point_fixed_t *b) { if (a->y == b->y) return a->x > b->x; return a->y > b->y; } /* * Note that the definition of this function is a bit odd because * of the X coordinate space (y increasing downwards). */ static int clockwise (const pixman_point_fixed_t *ref, const pixman_point_fixed_t *a, const pixman_point_fixed_t *b) { pixman_point_fixed_t ad, bd; ad.x = a->x - ref->x; ad.y = a->y - ref->y; bd.x = b->x - ref->x; bd.y = b->y - ref->y; return ((pixman_fixed_32_32_t) bd.y * ad.x - (pixman_fixed_32_32_t) ad.y * bd.x) < 0; } static void triangle_to_trapezoids (const pixman_triangle_t *tri, pixman_trapezoid_t *traps) { const pixman_point_fixed_t *top, *left, *right, *tmp; top = &tri->p1; left = &tri->p2; right = &tri->p3; if (greater_y (top, left)) { tmp = left; left = top; top = tmp; } if (greater_y (top, right)) { tmp = right; right = top; top = tmp; } if (clockwise (top, right, left)) { tmp = right; right = left; left = tmp; } /* * Two cases: * * + + * / \ / \ * / \ / \ * / + + \ * / -- -- \ * / -- -- \ * / --- --- \ * +-- --+ */ traps->top = top->y; traps->left.p1 = *top; traps->left.p2 = *left; traps->right.p1 = *top; traps->right.p2 = *right; if (right->y < left->y) traps->bottom = right->y; else traps->bottom = left->y; traps++; *traps = *(traps - 1); if (right->y < left->y) { traps->top = right->y; traps->bottom = left->y; traps->right.p1 = *right; traps->right.p2 = *left; } else { traps->top = left->y; traps->bottom = right->y; traps->left.p1 = *left; traps->left.p2 = *right; } } static pixman_trapezoid_t * convert_triangles (int n_tris, const pixman_triangle_t *tris) { pixman_trapezoid_t *traps; int i; if (n_tris <= 0) return NULL; traps = pixman_malloc_ab (n_tris, 2 * sizeof (pixman_trapezoid_t)); if (!traps) return NULL; for (i = 0; i < n_tris; ++i) triangle_to_trapezoids (&(tris[i]), traps + 2 * i); return traps; } PIXMAN_EXPORT void pixman_composite_triangles (pixman_op_t op, pixman_image_t * src, pixman_image_t * dst, pixman_format_code_t mask_format, int x_src, int y_src, int x_dst, int y_dst, int n_tris, const pixman_triangle_t * tris) { pixman_trapezoid_t *traps; if ((traps = convert_triangles (n_tris, tris))) { pixman_composite_trapezoids (op, src, dst, mask_format, x_src, y_src, x_dst, y_dst, n_tris * 2, traps); free (traps); } } PIXMAN_EXPORT void pixman_add_triangles (pixman_image_t *image, int32_t x_off, int32_t y_off, int n_tris, const pixman_triangle_t *tris) { pixman_trapezoid_t *traps; if ((traps = convert_triangles (n_tris, tris))) { pixman_add_trapezoids (image, x_off, y_off, n_tris * 2, traps); free (traps); } } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-utils.c0000664000175000017500000001756414712446423017225 0ustar00mattst88mattst88/* * Copyright Âİ 2000 SuSE, Inc. * Copyright Âİ 1999 Keith Packard * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of SuSE not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. SuSE makes no representations about the * suitability of this software for any purpose. It is provided "as is" * without express or implied warranty. * * SuSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL SuSE * BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * * Author: Keith Packard, SuSE, Inc. */ #ifdef HAVE_CONFIG_H #include #endif #include #include #include "pixman-private.h" pixman_bool_t _pixman_multiply_overflows_size (size_t a, size_t b) { return a >= SIZE_MAX / b; } pixman_bool_t _pixman_multiply_overflows_int (unsigned int a, unsigned int b) { return a >= INT32_MAX / b; } pixman_bool_t _pixman_addition_overflows_int (unsigned int a, unsigned int b) { return a > INT32_MAX - b; } void * pixman_malloc_ab_plus_c (unsigned int a, unsigned int b, unsigned int c) { if (!b || a >= INT32_MAX / b || (a * b) > INT32_MAX - c) return NULL; return malloc (a * b + c); } void * pixman_malloc_ab (unsigned int a, unsigned int b) { if (a >= INT32_MAX / b) return NULL; return malloc (a * b); } void * pixman_malloc_abc (unsigned int a, unsigned int b, unsigned int c) { if (a >= INT32_MAX / b) return NULL; else if (a * b >= INT32_MAX / c) return NULL; else return malloc (a * b * c); } static force_inline uint16_t float_to_unorm (float f, int n_bits) { uint32_t u; if (f > 1.0) f = 1.0; if (f < 0.0) f = 0.0; u = f * (1 << n_bits); u -= (u >> n_bits); return u; } static force_inline float unorm_to_float (uint16_t u, int n_bits) { uint32_t m = ((1 << n_bits) - 1); return (u & m) * (1.f / (float)m); } /* * This function expands images from a8r8g8b8 to argb_t. To preserve * precision, it needs to know from which source format the a8r8g8b8 pixels * originally came. * * For example, if the source was PIXMAN_x1r5g5b5 and the red component * contained bits 12345, then the 8-bit value is 12345123. To correctly * expand this to floating point, it should be 12345 / 31.0 and not * 12345123 / 255.0. */ void pixman_expand_to_float (argb_t *dst, const uint32_t *src, pixman_format_code_t format, int width) { static const float multipliers[16] = { 0.0f, 1.0f / ((1 << 1) - 1), 1.0f / ((1 << 2) - 1), 1.0f / ((1 << 3) - 1), 1.0f / ((1 << 4) - 1), 1.0f / ((1 << 5) - 1), 1.0f / ((1 << 6) - 1), 1.0f / ((1 << 7) - 1), 1.0f / ((1 << 8) - 1), 1.0f / ((1 << 9) - 1), 1.0f / ((1 << 10) - 1), 1.0f / ((1 << 11) - 1), 1.0f / ((1 << 12) - 1), 1.0f / ((1 << 13) - 1), 1.0f / ((1 << 14) - 1), 1.0f / ((1 << 15) - 1), }; int a_size, r_size, g_size, b_size; int a_shift, r_shift, g_shift, b_shift; float a_mul, r_mul, g_mul, b_mul; uint32_t a_mask, r_mask, g_mask, b_mask; int i; if (!PIXMAN_FORMAT_VIS (format)) format = PIXMAN_a8r8g8b8; /* * Determine the sizes of each component and the masks and shifts * required to extract them from the source pixel. */ a_size = PIXMAN_FORMAT_A (format); r_size = PIXMAN_FORMAT_R (format); g_size = PIXMAN_FORMAT_G (format); b_size = PIXMAN_FORMAT_B (format); a_shift = 32 - a_size; r_shift = 24 - r_size; g_shift = 16 - g_size; b_shift = 8 - b_size; a_mask = ((1 << a_size) - 1); r_mask = ((1 << r_size) - 1); g_mask = ((1 << g_size) - 1); b_mask = ((1 << b_size) - 1); a_mul = multipliers[a_size]; r_mul = multipliers[r_size]; g_mul = multipliers[g_size]; b_mul = multipliers[b_size]; /* Start at the end so that we can do the expansion in place * when src == dst */ for (i = width - 1; i >= 0; i--) { const uint32_t pixel = src[i]; dst[i].a = a_mask? ((pixel >> a_shift) & a_mask) * a_mul : 1.0f; dst[i].r = ((pixel >> r_shift) & r_mask) * r_mul; dst[i].g = ((pixel >> g_shift) & g_mask) * g_mul; dst[i].b = ((pixel >> b_shift) & b_mask) * b_mul; } } uint16_t pixman_float_to_unorm (float f, int n_bits) { return float_to_unorm (f, n_bits); } float pixman_unorm_to_float (uint16_t u, int n_bits) { return unorm_to_float (u, n_bits); } void pixman_contract_from_float (uint32_t *dst, const argb_t *src, int width) { int i; for (i = 0; i < width; ++i) { uint32_t a, r, g, b; a = float_to_unorm (src[i].a, 8); r = float_to_unorm (src[i].r, 8); g = float_to_unorm (src[i].g, 8); b = float_to_unorm (src[i].b, 8); dst[i] = (a << 24) | (r << 16) | (g << 8) | (b << 0); } } uint32_t * _pixman_iter_get_scanline_noop (pixman_iter_t *iter, const uint32_t *mask) { return iter->buffer; } void _pixman_iter_init_bits_stride (pixman_iter_t *iter, const pixman_iter_info_t *info) { pixman_image_t *image = iter->image; uint8_t *b = (uint8_t *)image->bits.bits; int s = image->bits.rowstride * 4; iter->bits = b + s * iter->y + iter->x * PIXMAN_FORMAT_BPP (info->format) / 8; iter->stride = s; } #define N_TMP_BOXES (16) pixman_bool_t pixman_region16_copy_from_region32 (pixman_region16_t *dst, const pixman_region32_t *src) { int n_boxes, i; pixman_box32_t *boxes32; pixman_box16_t *boxes16; pixman_bool_t retval; boxes32 = pixman_region32_rectangles (src, &n_boxes); boxes16 = pixman_malloc_ab (n_boxes, sizeof (pixman_box16_t)); if (!boxes16) return FALSE; for (i = 0; i < n_boxes; ++i) { boxes16[i].x1 = boxes32[i].x1; boxes16[i].y1 = boxes32[i].y1; boxes16[i].x2 = boxes32[i].x2; boxes16[i].y2 = boxes32[i].y2; } pixman_region_fini (dst); retval = pixman_region_init_rects (dst, boxes16, n_boxes); free (boxes16); return retval; } pixman_bool_t pixman_region32_copy_from_region16 (pixman_region32_t *dst, const pixman_region16_t *src) { int n_boxes, i; pixman_box16_t *boxes16; pixman_box32_t *boxes32; pixman_box32_t tmp_boxes[N_TMP_BOXES]; pixman_bool_t retval; boxes16 = pixman_region_rectangles (src, &n_boxes); if (n_boxes > N_TMP_BOXES) boxes32 = pixman_malloc_ab (n_boxes, sizeof (pixman_box32_t)); else boxes32 = tmp_boxes; if (!boxes32) return FALSE; for (i = 0; i < n_boxes; ++i) { boxes32[i].x1 = boxes16[i].x1; boxes32[i].y1 = boxes16[i].y1; boxes32[i].x2 = boxes16[i].x2; boxes32[i].y2 = boxes16[i].y2; } pixman_region32_fini (dst); retval = pixman_region32_init_rects (dst, boxes32, n_boxes); if (boxes32 != tmp_boxes) free (boxes32); return retval; } /* This function is exported for the sake of the test suite and not part * of the ABI. */ PIXMAN_EXPORT pixman_implementation_t * _pixman_internal_only_get_implementation (void) { return get_implementation (); } void _pixman_log_error (const char *function, const char *message) { static int n_messages = 0; if (n_messages < 10) { fprintf (stderr, "*** BUG ***\n" "In %s: %s\n" "Set a breakpoint on '_pixman_log_error' to debug\n\n", function, message); n_messages++; } } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-version.h.in0000664000175000017500000000356614712446423020161 0ustar00mattst88mattst88/* * Copyright Âİ 2008 Red Hat, Inc. * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, * modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Author: Carl D. Worth */ #ifndef PIXMAN_VERSION_H__ #define PIXMAN_VERSION_H__ #ifndef PIXMAN_H__ # error pixman-version.h should only be included by pixman.h #endif #define PIXMAN_VERSION_MAJOR @PIXMAN_VERSION_MAJOR@ #define PIXMAN_VERSION_MINOR @PIXMAN_VERSION_MINOR@ #define PIXMAN_VERSION_MICRO @PIXMAN_VERSION_MICRO@ #define PIXMAN_VERSION_STRING "@PIXMAN_VERSION_MAJOR@.@PIXMAN_VERSION_MINOR@.@PIXMAN_VERSION_MICRO@" #define PIXMAN_VERSION_ENCODE(major, minor, micro) ( \ ((major) * 10000) \ + ((minor) * 100) \ + ((micro) * 1)) #define PIXMAN_VERSION PIXMAN_VERSION_ENCODE( \ PIXMAN_VERSION_MAJOR, \ PIXMAN_VERSION_MINOR, \ PIXMAN_VERSION_MICRO) #ifndef PIXMAN_API # define PIXMAN_API #endif #endif /* PIXMAN_VERSION_H__ */ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-vmx.c0000664000175000017500000021130014712446423016657 0ustar00mattst88mattst88/* * Copyright Âİ 2007 Luca Barbato * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of Luca Barbato not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. Luca Barbato makes no representations about the * suitability of this software for any purpose. It is provided "as is" * without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS * SOFTWARE. * * Author: Luca Barbato (lu_zero@gentoo.org) * * Based on fbmmx.c by Owen Taylor, S¸ren Sandmann and Nicholas Miell */ #ifdef HAVE_CONFIG_H #include #endif #include "pixman-private.h" #include "pixman-combine32.h" #include "pixman-inlines.h" #include #define AVV(x...) {x} static vector unsigned int mask_ff000000; static vector unsigned int mask_red; static vector unsigned int mask_green; static vector unsigned int mask_blue; static vector unsigned int mask_565_fix_rb; static vector unsigned int mask_565_fix_g; static force_inline vector unsigned int splat_alpha (vector unsigned int pix) { #ifdef WORDS_BIGENDIAN return vec_perm (pix, pix, (vector unsigned char)AVV ( 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x0C, 0x0C, 0x0C, 0x0C)); #else return vec_perm (pix, pix, (vector unsigned char)AVV ( 0x03, 0x03, 0x03, 0x03, 0x07, 0x07, 0x07, 0x07, 0x0B, 0x0B, 0x0B, 0x0B, 0x0F, 0x0F, 0x0F, 0x0F)); #endif } static force_inline vector unsigned int splat_pixel (vector unsigned int pix) { return vec_perm (pix, pix, (vector unsigned char)AVV ( 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03)); } static force_inline vector unsigned int pix_multiply (vector unsigned int p, vector unsigned int a) { vector unsigned short hi, lo, mod; /* unpack to short */ hi = (vector unsigned short) #ifdef WORDS_BIGENDIAN vec_mergeh ((vector unsigned char)AVV (0), (vector unsigned char)p); #else vec_mergeh ((vector unsigned char) p, (vector unsigned char) AVV (0)); #endif mod = (vector unsigned short) #ifdef WORDS_BIGENDIAN vec_mergeh ((vector unsigned char)AVV (0), (vector unsigned char)a); #else vec_mergeh ((vector unsigned char) a, (vector unsigned char) AVV (0)); #endif hi = vec_mladd (hi, mod, (vector unsigned short) AVV (0x0080, 0x0080, 0x0080, 0x0080, 0x0080, 0x0080, 0x0080, 0x0080)); hi = vec_adds (hi, vec_sr (hi, vec_splat_u16 (8))); hi = vec_sr (hi, vec_splat_u16 (8)); /* unpack to short */ lo = (vector unsigned short) #ifdef WORDS_BIGENDIAN vec_mergel ((vector unsigned char)AVV (0), (vector unsigned char)p); #else vec_mergel ((vector unsigned char) p, (vector unsigned char) AVV (0)); #endif mod = (vector unsigned short) #ifdef WORDS_BIGENDIAN vec_mergel ((vector unsigned char)AVV (0), (vector unsigned char)a); #else vec_mergel ((vector unsigned char) a, (vector unsigned char) AVV (0)); #endif lo = vec_mladd (lo, mod, (vector unsigned short) AVV (0x0080, 0x0080, 0x0080, 0x0080, 0x0080, 0x0080, 0x0080, 0x0080)); lo = vec_adds (lo, vec_sr (lo, vec_splat_u16 (8))); lo = vec_sr (lo, vec_splat_u16 (8)); return (vector unsigned int)vec_packsu (hi, lo); } static force_inline vector unsigned int pix_add (vector unsigned int a, vector unsigned int b) { return (vector unsigned int)vec_adds ((vector unsigned char)a, (vector unsigned char)b); } static force_inline vector unsigned int pix_add_mul (vector unsigned int x, vector unsigned int a, vector unsigned int y, vector unsigned int b) { vector unsigned int t1, t2; t1 = pix_multiply (x, a); t2 = pix_multiply (y, b); return pix_add (t1, t2); } static force_inline vector unsigned int negate (vector unsigned int src) { return vec_nor (src, src); } /* dest*~srca + src */ static force_inline vector unsigned int over (vector unsigned int src, vector unsigned int srca, vector unsigned int dest) { vector unsigned char tmp = (vector unsigned char) pix_multiply (dest, negate (srca)); tmp = vec_adds ((vector unsigned char)src, tmp); return (vector unsigned int)tmp; } /* in == pix_multiply */ #define in_over(src, srca, mask, dest) \ over (pix_multiply (src, mask), \ pix_multiply (srca, mask), dest) #ifdef WORDS_BIGENDIAN #define COMPUTE_SHIFT_MASK(source) \ source ## _mask = vec_lvsl (0, source); #define COMPUTE_SHIFT_MASKS(dest, source) \ source ## _mask = vec_lvsl (0, source); #define COMPUTE_SHIFT_MASKC(dest, source, mask) \ mask ## _mask = vec_lvsl (0, mask); \ source ## _mask = vec_lvsl (0, source); #define LOAD_VECTOR(source) \ do \ { \ vector unsigned char tmp1, tmp2; \ tmp1 = (typeof(tmp1))vec_ld (0, source); \ tmp2 = (typeof(tmp2))vec_ld (15, source); \ v ## source = (typeof(v ## source)) \ vec_perm (tmp1, tmp2, source ## _mask); \ } while (0) #define LOAD_VECTORS(dest, source) \ do \ { \ LOAD_VECTOR(source); \ v ## dest = (typeof(v ## dest))vec_ld (0, dest); \ } while (0) #define LOAD_VECTORSC(dest, source, mask) \ do \ { \ LOAD_VECTORS(dest, source); \ LOAD_VECTOR(mask); \ } while (0) #define DECLARE_SRC_MASK_VAR vector unsigned char src_mask #define DECLARE_MASK_MASK_VAR vector unsigned char mask_mask #else /* Now the COMPUTE_SHIFT_{MASK, MASKS, MASKC} below are just no-op. * They are defined that way because little endian altivec can do unaligned * reads natively and have no need for constructing the permutation pattern * variables. */ #define COMPUTE_SHIFT_MASK(source) #define COMPUTE_SHIFT_MASKS(dest, source) #define COMPUTE_SHIFT_MASKC(dest, source, mask) # define LOAD_VECTOR(source) \ v ## source = (typeof(v ## source))vec_xl(0, source); # define LOAD_VECTORS(dest, source) \ LOAD_VECTOR(source); \ LOAD_VECTOR(dest); \ # define LOAD_VECTORSC(dest, source, mask) \ LOAD_VECTORS(dest, source); \ LOAD_VECTOR(mask); \ #define DECLARE_SRC_MASK_VAR #define DECLARE_MASK_MASK_VAR #endif /* WORDS_BIGENDIAN */ #define LOAD_VECTORSM(dest, source, mask) \ LOAD_VECTORSC (dest, source, mask); \ v ## source = pix_multiply (v ## source, \ splat_alpha (v ## mask)); #define STORE_VECTOR(dest) \ vec_st ((vector unsigned int) v ## dest, 0, dest); /* load 4 pixels from a 16-byte boundary aligned address */ static force_inline vector unsigned int load_128_aligned (const uint32_t* src) { return *((vector unsigned int *) src); } /* load 4 pixels from a unaligned address */ static force_inline vector unsigned int load_128_unaligned (const uint32_t* src) { vector unsigned int vsrc; DECLARE_SRC_MASK_VAR; COMPUTE_SHIFT_MASK (src); LOAD_VECTOR (src); return vsrc; } /* save 4 pixels on a 16-byte boundary aligned address */ static force_inline void save_128_aligned (uint32_t* data, vector unsigned int vdata) { STORE_VECTOR(data) } static force_inline vector unsigned int create_mask_32_128 (uint32_t mask) { return (vector unsigned int) {mask, mask, mask, mask}; } static force_inline vector unsigned int unpacklo_128_16x8 (vector unsigned int data1, vector unsigned int data2) { vector unsigned char lo; /* unpack to short */ lo = (vector unsigned char) #ifdef WORDS_BIGENDIAN vec_mergel ((vector unsigned char) data2, (vector unsigned char) data1); #else vec_mergel ((vector unsigned char) data1, (vector unsigned char) data2); #endif return (vector unsigned int) lo; } static force_inline vector unsigned int unpackhi_128_16x8 (vector unsigned int data1, vector unsigned int data2) { vector unsigned char hi; /* unpack to short */ hi = (vector unsigned char) #ifdef WORDS_BIGENDIAN vec_mergeh ((vector unsigned char) data2, (vector unsigned char) data1); #else vec_mergeh ((vector unsigned char) data1, (vector unsigned char) data2); #endif return (vector unsigned int) hi; } static force_inline vector unsigned int unpacklo_128_8x16 (vector unsigned int data1, vector unsigned int data2) { vector unsigned short lo; /* unpack to char */ lo = (vector unsigned short) #ifdef WORDS_BIGENDIAN vec_mergel ((vector unsigned short) data2, (vector unsigned short) data1); #else vec_mergel ((vector unsigned short) data1, (vector unsigned short) data2); #endif return (vector unsigned int) lo; } static force_inline vector unsigned int unpackhi_128_8x16 (vector unsigned int data1, vector unsigned int data2) { vector unsigned short hi; /* unpack to char */ hi = (vector unsigned short) #ifdef WORDS_BIGENDIAN vec_mergeh ((vector unsigned short) data2, (vector unsigned short) data1); #else vec_mergeh ((vector unsigned short) data1, (vector unsigned short) data2); #endif return (vector unsigned int) hi; } static force_inline void unpack_128_2x128 (vector unsigned int data1, vector unsigned int data2, vector unsigned int* data_lo, vector unsigned int* data_hi) { *data_lo = unpacklo_128_16x8(data1, data2); *data_hi = unpackhi_128_16x8(data1, data2); } static force_inline void unpack_128_2x128_16 (vector unsigned int data1, vector unsigned int data2, vector unsigned int* data_lo, vector unsigned int* data_hi) { *data_lo = unpacklo_128_8x16(data1, data2); *data_hi = unpackhi_128_8x16(data1, data2); } static force_inline vector unsigned int unpack_565_to_8888 (vector unsigned int lo) { vector unsigned int r, g, b, rb, t; r = vec_and (vec_sl(lo, create_mask_32_128(8)), mask_red); g = vec_and (vec_sl(lo, create_mask_32_128(5)), mask_green); b = vec_and (vec_sl(lo, create_mask_32_128(3)), mask_blue); rb = vec_or (r, b); t = vec_and (rb, mask_565_fix_rb); t = vec_sr (t, create_mask_32_128(5)); rb = vec_or (rb, t); t = vec_and (g, mask_565_fix_g); t = vec_sr (t, create_mask_32_128(6)); g = vec_or (g, t); return vec_or (rb, g); } static force_inline int is_opaque (vector unsigned int x) { uint32_t cmp_result; vector bool int ffs = vec_cmpeq(x, x); cmp_result = vec_all_eq(x, ffs); return (cmp_result & 0x8888) == 0x8888; } static force_inline int is_zero (vector unsigned int x) { uint32_t cmp_result; cmp_result = vec_all_eq(x, (vector unsigned int) AVV(0)); return cmp_result == 0xffff; } static force_inline int is_transparent (vector unsigned int x) { uint32_t cmp_result; cmp_result = vec_all_eq(x, (vector unsigned int) AVV(0)); return (cmp_result & 0x8888) == 0x8888; } static force_inline uint32_t core_combine_over_u_pixel_vmx (uint32_t src, uint32_t dst) { uint32_t a; a = ALPHA_8(src); if (a == 0xff) { return src; } else if (src) { UN8x4_MUL_UN8_ADD_UN8x4(dst, (~a & MASK), src); } return dst; } static force_inline uint32_t combine1 (const uint32_t *ps, const uint32_t *pm) { uint32_t s = *ps; if (pm) UN8x4_MUL_UN8(s, ALPHA_8(*pm)); return s; } static force_inline vector unsigned int combine4 (const uint32_t* ps, const uint32_t* pm) { vector unsigned int src, msk; if (pm) { msk = load_128_unaligned(pm); if (is_transparent(msk)) return (vector unsigned int) AVV(0); } src = load_128_unaligned(ps); if (pm) src = pix_multiply(src, msk); return src; } static void vmx_combine_over_u_no_mask (uint32_t * dest, const uint32_t *src, int width) { int i; vector unsigned int vdest, vsrc; DECLARE_SRC_MASK_VAR; while (width && ((uintptr_t)dest & 15)) { uint32_t s = *src++; uint32_t d = *dest; uint32_t ia = ALPHA_8 (~s); UN8x4_MUL_UN8_ADD_UN8x4 (d, ia, s); *dest++ = d; width--; } COMPUTE_SHIFT_MASKS (dest, src); /* printf ("%s\n",__PRETTY_FUNCTION__); */ for (i = width / 4; i > 0; i--) { LOAD_VECTORS (dest, src); vdest = over (vsrc, splat_alpha (vsrc), vdest); STORE_VECTOR (dest); src += 4; dest += 4; } for (i = width % 4; --i >= 0;) { uint32_t s = src[i]; uint32_t d = dest[i]; uint32_t ia = ALPHA_8 (~s); UN8x4_MUL_UN8_ADD_UN8x4 (d, ia, s); dest[i] = d; } } static void vmx_combine_over_u_mask (uint32_t * dest, const uint32_t *src, const uint32_t *mask, int width) { int i; vector unsigned int vdest, vsrc, vmask; DECLARE_SRC_MASK_VAR; DECLARE_MASK_MASK_VAR; while (width && ((uintptr_t)dest & 15)) { uint32_t m = ALPHA_8 (*mask++); uint32_t s = *src++; uint32_t d = *dest; uint32_t ia; UN8x4_MUL_UN8 (s, m); ia = ALPHA_8 (~s); UN8x4_MUL_UN8_ADD_UN8x4 (d, ia, s); *dest++ = d; width--; } COMPUTE_SHIFT_MASKC (dest, src, mask); /* printf ("%s\n",__PRETTY_FUNCTION__); */ for (i = width / 4; i > 0; i--) { LOAD_VECTORSM (dest, src, mask); vdest = over (vsrc, splat_alpha (vsrc), vdest); STORE_VECTOR (dest); src += 4; dest += 4; mask += 4; } for (i = width % 4; --i >= 0;) { uint32_t m = ALPHA_8 (mask[i]); uint32_t s = src[i]; uint32_t d = dest[i]; uint32_t ia; UN8x4_MUL_UN8 (s, m); ia = ALPHA_8 (~s); UN8x4_MUL_UN8_ADD_UN8x4 (d, ia, s); dest[i] = d; } } static void vmx_combine_over_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { if (mask) vmx_combine_over_u_mask (dest, src, mask, width); else vmx_combine_over_u_no_mask (dest, src, width); } static void vmx_combine_over_reverse_u_no_mask (uint32_t * dest, const uint32_t *src, int width) { int i; vector unsigned int vdest, vsrc; DECLARE_SRC_MASK_VAR; while (width && ((uintptr_t)dest & 15)) { uint32_t s = *src++; uint32_t d = *dest; uint32_t ia = ALPHA_8 (~d); UN8x4_MUL_UN8_ADD_UN8x4 (s, ia, d); *dest++ = s; width--; } COMPUTE_SHIFT_MASKS (dest, src); /* printf ("%s\n",__PRETTY_FUNCTION__); */ for (i = width / 4; i > 0; i--) { LOAD_VECTORS (dest, src); vdest = over (vdest, splat_alpha (vdest), vsrc); STORE_VECTOR (dest); src += 4; dest += 4; } for (i = width % 4; --i >= 0;) { uint32_t s = src[i]; uint32_t d = dest[i]; uint32_t ia = ALPHA_8 (~dest[i]); UN8x4_MUL_UN8_ADD_UN8x4 (s, ia, d); dest[i] = s; } } static void vmx_combine_over_reverse_u_mask (uint32_t * dest, const uint32_t *src, const uint32_t *mask, int width) { int i; vector unsigned int vdest, vsrc, vmask; DECLARE_SRC_MASK_VAR; DECLARE_MASK_MASK_VAR; while (width && ((uintptr_t)dest & 15)) { uint32_t m = ALPHA_8 (*mask++); uint32_t s = *src++; uint32_t d = *dest; uint32_t ia = ALPHA_8 (~d); UN8x4_MUL_UN8 (s, m); UN8x4_MUL_UN8_ADD_UN8x4 (s, ia, d); *dest++ = s; width--; } COMPUTE_SHIFT_MASKC (dest, src, mask); /* printf ("%s\n",__PRETTY_FUNCTION__); */ for (i = width / 4; i > 0; i--) { LOAD_VECTORSM (dest, src, mask); vdest = over (vdest, splat_alpha (vdest), vsrc); STORE_VECTOR (dest); src += 4; dest += 4; mask += 4; } for (i = width % 4; --i >= 0;) { uint32_t m = ALPHA_8 (mask[i]); uint32_t s = src[i]; uint32_t d = dest[i]; uint32_t ia = ALPHA_8 (~dest[i]); UN8x4_MUL_UN8 (s, m); UN8x4_MUL_UN8_ADD_UN8x4 (s, ia, d); dest[i] = s; } } static void vmx_combine_over_reverse_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { if (mask) vmx_combine_over_reverse_u_mask (dest, src, mask, width); else vmx_combine_over_reverse_u_no_mask (dest, src, width); } static void vmx_combine_in_u_no_mask (uint32_t * dest, const uint32_t *src, int width) { int i; vector unsigned int vdest, vsrc; DECLARE_SRC_MASK_VAR; while (width && ((uintptr_t)dest & 15)) { uint32_t s = *src++; uint32_t a = ALPHA_8 (*dest); UN8x4_MUL_UN8 (s, a); *dest++ = s; width--; } COMPUTE_SHIFT_MASKS (dest, src); /* printf ("%s\n",__PRETTY_FUNCTION__); */ for (i = width / 4; i > 0; i--) { LOAD_VECTORS (dest, src); vdest = pix_multiply (vsrc, splat_alpha (vdest)); STORE_VECTOR (dest); src += 4; dest += 4; } for (i = width % 4; --i >= 0;) { uint32_t s = src[i]; uint32_t a = ALPHA_8 (dest[i]); UN8x4_MUL_UN8 (s, a); dest[i] = s; } } static void vmx_combine_in_u_mask (uint32_t * dest, const uint32_t *src, const uint32_t *mask, int width) { int i; vector unsigned int vdest, vsrc, vmask; DECLARE_SRC_MASK_VAR; DECLARE_MASK_MASK_VAR; while (width && ((uintptr_t)dest & 15)) { uint32_t m = ALPHA_8 (*mask++); uint32_t s = *src++; uint32_t a = ALPHA_8 (*dest); UN8x4_MUL_UN8 (s, m); UN8x4_MUL_UN8 (s, a); *dest++ = s; width--; } COMPUTE_SHIFT_MASKC (dest, src, mask); /* printf ("%s\n",__PRETTY_FUNCTION__); */ for (i = width / 4; i > 0; i--) { LOAD_VECTORSM (dest, src, mask); vdest = pix_multiply (vsrc, splat_alpha (vdest)); STORE_VECTOR (dest); src += 4; dest += 4; mask += 4; } for (i = width % 4; --i >= 0;) { uint32_t m = ALPHA_8 (mask[i]); uint32_t s = src[i]; uint32_t a = ALPHA_8 (dest[i]); UN8x4_MUL_UN8 (s, m); UN8x4_MUL_UN8 (s, a); dest[i] = s; } } static void vmx_combine_in_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { if (mask) vmx_combine_in_u_mask (dest, src, mask, width); else vmx_combine_in_u_no_mask (dest, src, width); } static void vmx_combine_in_reverse_u_no_mask (uint32_t * dest, const uint32_t *src, int width) { int i; vector unsigned int vdest, vsrc; DECLARE_SRC_MASK_VAR; while (width && ((uintptr_t)dest & 15)) { uint32_t d = *dest; uint32_t a = ALPHA_8 (*src++); UN8x4_MUL_UN8 (d, a); *dest++ = d; width--; } COMPUTE_SHIFT_MASKS (dest, src); /* printf ("%s\n",__PRETTY_FUNCTION__); */ for (i = width / 4; i > 0; i--) { LOAD_VECTORS (dest, src); vdest = pix_multiply (vdest, splat_alpha (vsrc)); STORE_VECTOR (dest); src += 4; dest += 4; } for (i = width % 4; --i >= 0;) { uint32_t d = dest[i]; uint32_t a = ALPHA_8 (src[i]); UN8x4_MUL_UN8 (d, a); dest[i] = d; } } static void vmx_combine_in_reverse_u_mask (uint32_t * dest, const uint32_t *src, const uint32_t *mask, int width) { int i; vector unsigned int vdest, vsrc, vmask; DECLARE_SRC_MASK_VAR; DECLARE_MASK_MASK_VAR; while (width && ((uintptr_t)dest & 15)) { uint32_t m = ALPHA_8 (*mask++); uint32_t d = *dest; uint32_t a = *src++; UN8x4_MUL_UN8 (a, m); a = ALPHA_8 (a); UN8x4_MUL_UN8 (d, a); *dest++ = d; width--; } COMPUTE_SHIFT_MASKC (dest, src, mask); /* printf ("%s\n",__PRETTY_FUNCTION__); */ for (i = width / 4; i > 0; i--) { LOAD_VECTORSM (dest, src, mask); vdest = pix_multiply (vdest, splat_alpha (vsrc)); STORE_VECTOR (dest); src += 4; dest += 4; mask += 4; } for (i = width % 4; --i >= 0;) { uint32_t m = ALPHA_8 (mask[i]); uint32_t d = dest[i]; uint32_t a = src[i]; UN8x4_MUL_UN8 (a, m); a = ALPHA_8 (a); UN8x4_MUL_UN8 (d, a); dest[i] = d; } } static void vmx_combine_in_reverse_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { if (mask) vmx_combine_in_reverse_u_mask (dest, src, mask, width); else vmx_combine_in_reverse_u_no_mask (dest, src, width); } static void vmx_combine_out_u_no_mask (uint32_t * dest, const uint32_t *src, int width) { int i; vector unsigned int vdest, vsrc; DECLARE_SRC_MASK_VAR; while (width && ((uintptr_t)dest & 15)) { uint32_t s = *src++; uint32_t a = ALPHA_8 (~(*dest)); UN8x4_MUL_UN8 (s, a); *dest++ = s; width--; } COMPUTE_SHIFT_MASKS (dest, src); /* printf ("%s\n",__PRETTY_FUNCTION__); */ for (i = width / 4; i > 0; i--) { LOAD_VECTORS (dest, src); vdest = pix_multiply (vsrc, splat_alpha (negate (vdest))); STORE_VECTOR (dest); src += 4; dest += 4; } for (i = width % 4; --i >= 0;) { uint32_t s = src[i]; uint32_t a = ALPHA_8 (~dest[i]); UN8x4_MUL_UN8 (s, a); dest[i] = s; } } static void vmx_combine_out_u_mask (uint32_t * dest, const uint32_t *src, const uint32_t *mask, int width) { int i; vector unsigned int vdest, vsrc, vmask; DECLARE_SRC_MASK_VAR; DECLARE_MASK_MASK_VAR; while (width && ((uintptr_t)dest & 15)) { uint32_t m = ALPHA_8 (*mask++); uint32_t s = *src++; uint32_t a = ALPHA_8 (~(*dest)); UN8x4_MUL_UN8 (s, m); UN8x4_MUL_UN8 (s, a); *dest++ = s; width--; } COMPUTE_SHIFT_MASKC (dest, src, mask); /* printf ("%s\n",__PRETTY_FUNCTION__); */ for (i = width / 4; i > 0; i--) { LOAD_VECTORSM (dest, src, mask); vdest = pix_multiply (vsrc, splat_alpha (negate (vdest))); STORE_VECTOR (dest); src += 4; dest += 4; mask += 4; } for (i = width % 4; --i >= 0;) { uint32_t m = ALPHA_8 (mask[i]); uint32_t s = src[i]; uint32_t a = ALPHA_8 (~dest[i]); UN8x4_MUL_UN8 (s, m); UN8x4_MUL_UN8 (s, a); dest[i] = s; } } static void vmx_combine_out_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { if (mask) vmx_combine_out_u_mask (dest, src, mask, width); else vmx_combine_out_u_no_mask (dest, src, width); } static void vmx_combine_out_reverse_u_no_mask (uint32_t * dest, const uint32_t *src, int width) { int i; vector unsigned int vdest, vsrc; DECLARE_SRC_MASK_VAR; while (width && ((uintptr_t)dest & 15)) { uint32_t d = *dest; uint32_t a = ALPHA_8 (~(*src++)); UN8x4_MUL_UN8 (d, a); *dest++ = d; width--; } COMPUTE_SHIFT_MASKS (dest, src); /* printf ("%s\n",__PRETTY_FUNCTION__); */ for (i = width / 4; i > 0; i--) { LOAD_VECTORS (dest, src); vdest = pix_multiply (vdest, splat_alpha (negate (vsrc))); STORE_VECTOR (dest); src += 4; dest += 4; } for (i = width % 4; --i >= 0;) { uint32_t d = dest[i]; uint32_t a = ALPHA_8 (~src[i]); UN8x4_MUL_UN8 (d, a); dest[i] = d; } } static void vmx_combine_out_reverse_u_mask (uint32_t * dest, const uint32_t *src, const uint32_t *mask, int width) { int i; vector unsigned int vdest, vsrc, vmask; DECLARE_SRC_MASK_VAR; DECLARE_MASK_MASK_VAR; while (width && ((uintptr_t)dest & 15)) { uint32_t m = ALPHA_8 (*mask++); uint32_t d = *dest; uint32_t a = *src++; UN8x4_MUL_UN8 (a, m); a = ALPHA_8 (~a); UN8x4_MUL_UN8 (d, a); *dest++ = d; width--; } COMPUTE_SHIFT_MASKC (dest, src, mask); /* printf ("%s\n",__PRETTY_FUNCTION__); */ for (i = width / 4; i > 0; i--) { LOAD_VECTORSM (dest, src, mask); vdest = pix_multiply (vdest, splat_alpha (negate (vsrc))); STORE_VECTOR (dest); src += 4; dest += 4; mask += 4; } for (i = width % 4; --i >= 0;) { uint32_t m = ALPHA_8 (mask[i]); uint32_t d = dest[i]; uint32_t a = src[i]; UN8x4_MUL_UN8 (a, m); a = ALPHA_8 (~a); UN8x4_MUL_UN8 (d, a); dest[i] = d; } } static void vmx_combine_out_reverse_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { if (mask) vmx_combine_out_reverse_u_mask (dest, src, mask, width); else vmx_combine_out_reverse_u_no_mask (dest, src, width); } static void vmx_combine_atop_u_no_mask (uint32_t * dest, const uint32_t *src, int width) { int i; vector unsigned int vdest, vsrc; DECLARE_SRC_MASK_VAR; while (width && ((uintptr_t)dest & 15)) { uint32_t s = *src++; uint32_t d = *dest; uint32_t dest_a = ALPHA_8 (d); uint32_t src_ia = ALPHA_8 (~s); UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_a, d, src_ia); *dest++ = s; width--; } COMPUTE_SHIFT_MASKS (dest, src); /* printf ("%s\n",__PRETTY_FUNCTION__); */ for (i = width / 4; i > 0; i--) { LOAD_VECTORS (dest, src); vdest = pix_add_mul (vsrc, splat_alpha (vdest), vdest, splat_alpha (negate (vsrc))); STORE_VECTOR (dest); src += 4; dest += 4; } for (i = width % 4; --i >= 0;) { uint32_t s = src[i]; uint32_t d = dest[i]; uint32_t dest_a = ALPHA_8 (d); uint32_t src_ia = ALPHA_8 (~s); UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_a, d, src_ia); dest[i] = s; } } static void vmx_combine_atop_u_mask (uint32_t * dest, const uint32_t *src, const uint32_t *mask, int width) { int i; vector unsigned int vdest, vsrc, vmask; DECLARE_SRC_MASK_VAR; DECLARE_MASK_MASK_VAR; while (width && ((uintptr_t)dest & 15)) { uint32_t m = ALPHA_8 (*mask++); uint32_t s = *src++; uint32_t d = *dest; uint32_t dest_a = ALPHA_8 (d); uint32_t src_ia; UN8x4_MUL_UN8 (s, m); src_ia = ALPHA_8 (~s); UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_a, d, src_ia); *dest++ = s; width--; } COMPUTE_SHIFT_MASKC (dest, src, mask); /* printf ("%s\n",__PRETTY_FUNCTION__); */ for (i = width / 4; i > 0; i--) { LOAD_VECTORSM (dest, src, mask); vdest = pix_add_mul (vsrc, splat_alpha (vdest), vdest, splat_alpha (negate (vsrc))); STORE_VECTOR (dest); src += 4; dest += 4; mask += 4; } for (i = width % 4; --i >= 0;) { uint32_t m = ALPHA_8 (mask[i]); uint32_t s = src[i]; uint32_t d = dest[i]; uint32_t dest_a = ALPHA_8 (d); uint32_t src_ia; UN8x4_MUL_UN8 (s, m); src_ia = ALPHA_8 (~s); UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_a, d, src_ia); dest[i] = s; } } static void vmx_combine_atop_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { if (mask) vmx_combine_atop_u_mask (dest, src, mask, width); else vmx_combine_atop_u_no_mask (dest, src, width); } static void vmx_combine_atop_reverse_u_no_mask (uint32_t * dest, const uint32_t *src, int width) { int i; vector unsigned int vdest, vsrc; DECLARE_SRC_MASK_VAR; while (width && ((uintptr_t)dest & 15)) { uint32_t s = *src++; uint32_t d = *dest; uint32_t src_a = ALPHA_8 (s); uint32_t dest_ia = ALPHA_8 (~d); UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_ia, d, src_a); *dest++ = s; width--; } COMPUTE_SHIFT_MASKS (dest, src); /* printf ("%s\n",__PRETTY_FUNCTION__); */ for (i = width / 4; i > 0; i--) { LOAD_VECTORS (dest, src); vdest = pix_add_mul (vdest, splat_alpha (vsrc), vsrc, splat_alpha (negate (vdest))); STORE_VECTOR (dest); src += 4; dest += 4; } for (i = width % 4; --i >= 0;) { uint32_t s = src[i]; uint32_t d = dest[i]; uint32_t src_a = ALPHA_8 (s); uint32_t dest_ia = ALPHA_8 (~d); UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_ia, d, src_a); dest[i] = s; } } static void vmx_combine_atop_reverse_u_mask (uint32_t * dest, const uint32_t *src, const uint32_t *mask, int width) { int i; vector unsigned int vdest, vsrc, vmask; DECLARE_SRC_MASK_VAR; DECLARE_MASK_MASK_VAR; while (width && ((uintptr_t)dest & 15)) { uint32_t m = ALPHA_8 (*mask++); uint32_t s = *src++; uint32_t d = *dest; uint32_t src_a; uint32_t dest_ia = ALPHA_8 (~d); UN8x4_MUL_UN8 (s, m); src_a = ALPHA_8 (s); UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_ia, d, src_a); *dest++ = s; width--; } COMPUTE_SHIFT_MASKC (dest, src, mask); /* printf ("%s\n",__PRETTY_FUNCTION__); */ for (i = width / 4; i > 0; i--) { LOAD_VECTORSM (dest, src, mask); vdest = pix_add_mul (vdest, splat_alpha (vsrc), vsrc, splat_alpha (negate (vdest))); STORE_VECTOR (dest); src += 4; dest += 4; mask += 4; } for (i = width % 4; --i >= 0;) { uint32_t m = ALPHA_8 (mask[i]); uint32_t s = src[i]; uint32_t d = dest[i]; uint32_t src_a; uint32_t dest_ia = ALPHA_8 (~d); UN8x4_MUL_UN8 (s, m); src_a = ALPHA_8 (s); UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_ia, d, src_a); dest[i] = s; } } static void vmx_combine_atop_reverse_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { if (mask) vmx_combine_atop_reverse_u_mask (dest, src, mask, width); else vmx_combine_atop_reverse_u_no_mask (dest, src, width); } static void vmx_combine_xor_u_no_mask (uint32_t * dest, const uint32_t *src, int width) { int i; vector unsigned int vdest, vsrc; DECLARE_SRC_MASK_VAR; while (width && ((uintptr_t)dest & 15)) { uint32_t s = *src++; uint32_t d = *dest; uint32_t src_ia = ALPHA_8 (~s); uint32_t dest_ia = ALPHA_8 (~d); UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_ia, d, src_ia); *dest++ = s; width--; } COMPUTE_SHIFT_MASKS (dest, src); /* printf ("%s\n",__PRETTY_FUNCTION__); */ for (i = width / 4; i > 0; i--) { LOAD_VECTORS (dest, src); vdest = pix_add_mul (vsrc, splat_alpha (negate (vdest)), vdest, splat_alpha (negate (vsrc))); STORE_VECTOR (dest); src += 4; dest += 4; } for (i = width % 4; --i >= 0;) { uint32_t s = src[i]; uint32_t d = dest[i]; uint32_t src_ia = ALPHA_8 (~s); uint32_t dest_ia = ALPHA_8 (~d); UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_ia, d, src_ia); dest[i] = s; } } static void vmx_combine_xor_u_mask (uint32_t * dest, const uint32_t *src, const uint32_t *mask, int width) { int i; vector unsigned int vdest, vsrc, vmask; DECLARE_SRC_MASK_VAR; DECLARE_MASK_MASK_VAR; while (width && ((uintptr_t)dest & 15)) { uint32_t m = ALPHA_8 (*mask++); uint32_t s = *src++; uint32_t d = *dest; uint32_t src_ia; uint32_t dest_ia = ALPHA_8 (~d); UN8x4_MUL_UN8 (s, m); src_ia = ALPHA_8 (~s); UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_ia, d, src_ia); *dest++ = s; width--; } COMPUTE_SHIFT_MASKC (dest, src, mask); /* printf ("%s\n",__PRETTY_FUNCTION__); */ for (i = width / 4; i > 0; i--) { LOAD_VECTORSM (dest, src, mask); vdest = pix_add_mul (vsrc, splat_alpha (negate (vdest)), vdest, splat_alpha (negate (vsrc))); STORE_VECTOR (dest); src += 4; dest += 4; mask += 4; } for (i = width % 4; --i >= 0;) { uint32_t m = ALPHA_8 (mask[i]); uint32_t s = src[i]; uint32_t d = dest[i]; uint32_t src_ia; uint32_t dest_ia = ALPHA_8 (~d); UN8x4_MUL_UN8 (s, m); src_ia = ALPHA_8 (~s); UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_ia, d, src_ia); dest[i] = s; } } static void vmx_combine_xor_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { if (mask) vmx_combine_xor_u_mask (dest, src, mask, width); else vmx_combine_xor_u_no_mask (dest, src, width); } static void vmx_combine_add_u_no_mask (uint32_t * dest, const uint32_t *src, int width) { int i; vector unsigned int vdest, vsrc; DECLARE_SRC_MASK_VAR; while (width && ((uintptr_t)dest & 15)) { uint32_t s = *src++; uint32_t d = *dest; UN8x4_ADD_UN8x4 (d, s); *dest++ = d; width--; } COMPUTE_SHIFT_MASKS (dest, src); /* printf ("%s\n",__PRETTY_FUNCTION__); */ for (i = width / 4; i > 0; i--) { LOAD_VECTORS (dest, src); vdest = pix_add (vsrc, vdest); STORE_VECTOR (dest); src += 4; dest += 4; } for (i = width % 4; --i >= 0;) { uint32_t s = src[i]; uint32_t d = dest[i]; UN8x4_ADD_UN8x4 (d, s); dest[i] = d; } } static void vmx_combine_add_u_mask (uint32_t * dest, const uint32_t *src, const uint32_t *mask, int width) { int i; vector unsigned int vdest, vsrc, vmask; DECLARE_SRC_MASK_VAR; DECLARE_MASK_MASK_VAR; while (width && ((uintptr_t)dest & 15)) { uint32_t m = ALPHA_8 (*mask++); uint32_t s = *src++; uint32_t d = *dest; UN8x4_MUL_UN8 (s, m); UN8x4_ADD_UN8x4 (d, s); *dest++ = d; width--; } COMPUTE_SHIFT_MASKC (dest, src, mask); /* printf ("%s\n",__PRETTY_FUNCTION__); */ for (i = width / 4; i > 0; i--) { LOAD_VECTORSM (dest, src, mask); vdest = pix_add (vsrc, vdest); STORE_VECTOR (dest); src += 4; dest += 4; mask += 4; } for (i = width % 4; --i >= 0;) { uint32_t m = ALPHA_8 (mask[i]); uint32_t s = src[i]; uint32_t d = dest[i]; UN8x4_MUL_UN8 (s, m); UN8x4_ADD_UN8x4 (d, s); dest[i] = d; } } static void vmx_combine_add_u (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { if (mask) vmx_combine_add_u_mask (dest, src, mask, width); else vmx_combine_add_u_no_mask (dest, src, width); } static void vmx_combine_src_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { int i; vector unsigned int vdest, vsrc, vmask; DECLARE_SRC_MASK_VAR; DECLARE_MASK_MASK_VAR; while (width && ((uintptr_t)dest & 15)) { uint32_t a = *mask++; uint32_t s = *src++; UN8x4_MUL_UN8x4 (s, a); *dest++ = s; width--; } COMPUTE_SHIFT_MASKC (dest, src, mask); /* printf ("%s\n",__PRETTY_FUNCTION__); */ for (i = width / 4; i > 0; i--) { LOAD_VECTORSC (dest, src, mask); vdest = pix_multiply (vsrc, vmask); STORE_VECTOR (dest); mask += 4; src += 4; dest += 4; } for (i = width % 4; --i >= 0;) { uint32_t a = mask[i]; uint32_t s = src[i]; UN8x4_MUL_UN8x4 (s, a); dest[i] = s; } } static void vmx_combine_over_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { int i; vector unsigned int vdest, vsrc, vmask; DECLARE_SRC_MASK_VAR; DECLARE_MASK_MASK_VAR; while (width && ((uintptr_t)dest & 15)) { uint32_t a = *mask++; uint32_t s = *src++; uint32_t d = *dest; uint32_t sa = ALPHA_8 (s); UN8x4_MUL_UN8x4 (s, a); UN8x4_MUL_UN8 (a, sa); UN8x4_MUL_UN8x4_ADD_UN8x4 (d, ~a, s); *dest++ = d; width--; } COMPUTE_SHIFT_MASKC (dest, src, mask); /* printf ("%s\n",__PRETTY_FUNCTION__); */ for (i = width / 4; i > 0; i--) { LOAD_VECTORSC (dest, src, mask); vdest = in_over (vsrc, splat_alpha (vsrc), vmask, vdest); STORE_VECTOR (dest); mask += 4; src += 4; dest += 4; } for (i = width % 4; --i >= 0;) { uint32_t a = mask[i]; uint32_t s = src[i]; uint32_t d = dest[i]; uint32_t sa = ALPHA_8 (s); UN8x4_MUL_UN8x4 (s, a); UN8x4_MUL_UN8 (a, sa); UN8x4_MUL_UN8x4_ADD_UN8x4 (d, ~a, s); dest[i] = d; } } static void vmx_combine_over_reverse_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { int i; vector unsigned int vdest, vsrc, vmask; DECLARE_SRC_MASK_VAR; DECLARE_MASK_MASK_VAR; while (width && ((uintptr_t)dest & 15)) { uint32_t a = *mask++; uint32_t s = *src++; uint32_t d = *dest; uint32_t ida = ALPHA_8 (~d); UN8x4_MUL_UN8x4 (s, a); UN8x4_MUL_UN8_ADD_UN8x4 (s, ida, d); *dest++ = s; width--; } COMPUTE_SHIFT_MASKC (dest, src, mask); /* printf("%s\n",__PRETTY_FUNCTION__); */ for (i = width / 4; i > 0; i--) { LOAD_VECTORSC (dest, src, mask); vdest = over (vdest, splat_alpha (vdest), pix_multiply (vsrc, vmask)); STORE_VECTOR (dest); mask += 4; src += 4; dest += 4; } for (i = width % 4; --i >= 0;) { uint32_t a = mask[i]; uint32_t s = src[i]; uint32_t d = dest[i]; uint32_t ida = ALPHA_8 (~d); UN8x4_MUL_UN8x4 (s, a); UN8x4_MUL_UN8_ADD_UN8x4 (s, ida, d); dest[i] = s; } } static void vmx_combine_in_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { int i; vector unsigned int vdest, vsrc, vmask; DECLARE_SRC_MASK_VAR; DECLARE_MASK_MASK_VAR; while (width && ((uintptr_t)dest & 15)) { uint32_t a = *mask++; uint32_t s = *src++; uint32_t da = ALPHA_8 (*dest); UN8x4_MUL_UN8x4 (s, a); UN8x4_MUL_UN8 (s, da); *dest++ = s; width--; } COMPUTE_SHIFT_MASKC (dest, src, mask); /* printf ("%s\n",__PRETTY_FUNCTION__); */ for (i = width / 4; i > 0; i--) { LOAD_VECTORSC (dest, src, mask); vdest = pix_multiply (pix_multiply (vsrc, vmask), splat_alpha (vdest)); STORE_VECTOR (dest); src += 4; dest += 4; mask += 4; } for (i = width % 4; --i >= 0;) { uint32_t a = mask[i]; uint32_t s = src[i]; uint32_t da = ALPHA_8 (dest[i]); UN8x4_MUL_UN8x4 (s, a); UN8x4_MUL_UN8 (s, da); dest[i] = s; } } static void vmx_combine_in_reverse_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { int i; vector unsigned int vdest, vsrc, vmask; DECLARE_SRC_MASK_VAR; DECLARE_MASK_MASK_VAR; while (width && ((uintptr_t)dest & 15)) { uint32_t a = *mask++; uint32_t d = *dest; uint32_t sa = ALPHA_8 (*src++); UN8x4_MUL_UN8 (a, sa); UN8x4_MUL_UN8x4 (d, a); *dest++ = d; width--; } COMPUTE_SHIFT_MASKC (dest, src, mask); /* printf ("%s\n",__PRETTY_FUNCTION__); */ for (i = width / 4; i > 0; i--) { LOAD_VECTORSC (dest, src, mask); vdest = pix_multiply (vdest, pix_multiply (vmask, splat_alpha (vsrc))); STORE_VECTOR (dest); src += 4; dest += 4; mask += 4; } for (i = width % 4; --i >= 0;) { uint32_t a = mask[i]; uint32_t d = dest[i]; uint32_t sa = ALPHA_8 (src[i]); UN8x4_MUL_UN8 (a, sa); UN8x4_MUL_UN8x4 (d, a); dest[i] = d; } } static void vmx_combine_out_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { int i; vector unsigned int vdest, vsrc, vmask; DECLARE_SRC_MASK_VAR; DECLARE_MASK_MASK_VAR; while (width && ((uintptr_t)dest & 15)) { uint32_t a = *mask++; uint32_t s = *src++; uint32_t d = *dest; uint32_t da = ALPHA_8 (~d); UN8x4_MUL_UN8x4 (s, a); UN8x4_MUL_UN8 (s, da); *dest++ = s; width--; } COMPUTE_SHIFT_MASKC (dest, src, mask); /* printf ("%s\n",__PRETTY_FUNCTION__); */ for (i = width / 4; i > 0; i--) { LOAD_VECTORSC (dest, src, mask); vdest = pix_multiply ( pix_multiply (vsrc, vmask), splat_alpha (negate (vdest))); STORE_VECTOR (dest); src += 4; dest += 4; mask += 4; } for (i = width % 4; --i >= 0;) { uint32_t a = mask[i]; uint32_t s = src[i]; uint32_t d = dest[i]; uint32_t da = ALPHA_8 (~d); UN8x4_MUL_UN8x4 (s, a); UN8x4_MUL_UN8 (s, da); dest[i] = s; } } static void vmx_combine_out_reverse_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { int i; vector unsigned int vdest, vsrc, vmask; DECLARE_SRC_MASK_VAR; DECLARE_MASK_MASK_VAR; while (width && ((uintptr_t)dest & 15)) { uint32_t a = *mask++; uint32_t s = *src++; uint32_t d = *dest; uint32_t sa = ALPHA_8 (s); UN8x4_MUL_UN8 (a, sa); UN8x4_MUL_UN8x4 (d, ~a); *dest++ = d; width--; } COMPUTE_SHIFT_MASKC (dest, src, mask); /* printf ("%s\n",__PRETTY_FUNCTION__); */ for (i = width / 4; i > 0; i--) { LOAD_VECTORSC (dest, src, mask); vdest = pix_multiply ( vdest, negate (pix_multiply (vmask, splat_alpha (vsrc)))); STORE_VECTOR (dest); src += 4; dest += 4; mask += 4; } for (i = width % 4; --i >= 0;) { uint32_t a = mask[i]; uint32_t s = src[i]; uint32_t d = dest[i]; uint32_t sa = ALPHA_8 (s); UN8x4_MUL_UN8 (a, sa); UN8x4_MUL_UN8x4 (d, ~a); dest[i] = d; } } static void vmx_combine_atop_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { int i; vector unsigned int vdest, vsrc, vmask, vsrca; DECLARE_SRC_MASK_VAR; DECLARE_MASK_MASK_VAR; while (width && ((uintptr_t)dest & 15)) { uint32_t a = *mask++; uint32_t s = *src++; uint32_t d = *dest; uint32_t sa = ALPHA_8 (s); uint32_t da = ALPHA_8 (d); UN8x4_MUL_UN8x4 (s, a); UN8x4_MUL_UN8 (a, sa); UN8x4_MUL_UN8x4_ADD_UN8x4_MUL_UN8 (d, ~a, s, da); *dest++ = d; width--; } COMPUTE_SHIFT_MASKC (dest, src, mask); /* printf ("%s\n",__PRETTY_FUNCTION__); */ for (i = width / 4; i > 0; i--) { LOAD_VECTORSC (dest, src, mask); vsrca = splat_alpha (vsrc); vsrc = pix_multiply (vsrc, vmask); vmask = pix_multiply (vmask, vsrca); vdest = pix_add_mul (vsrc, splat_alpha (vdest), negate (vmask), vdest); STORE_VECTOR (dest); src += 4; dest += 4; mask += 4; } for (i = width % 4; --i >= 0;) { uint32_t a = mask[i]; uint32_t s = src[i]; uint32_t d = dest[i]; uint32_t sa = ALPHA_8 (s); uint32_t da = ALPHA_8 (d); UN8x4_MUL_UN8x4 (s, a); UN8x4_MUL_UN8 (a, sa); UN8x4_MUL_UN8x4_ADD_UN8x4_MUL_UN8 (d, ~a, s, da); dest[i] = d; } } static void vmx_combine_atop_reverse_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { int i; vector unsigned int vdest, vsrc, vmask; DECLARE_SRC_MASK_VAR; DECLARE_MASK_MASK_VAR; while (width && ((uintptr_t)dest & 15)) { uint32_t a = *mask++; uint32_t s = *src++; uint32_t d = *dest; uint32_t sa = ALPHA_8 (s); uint32_t da = ALPHA_8 (~d); UN8x4_MUL_UN8x4 (s, a); UN8x4_MUL_UN8 (a, sa); UN8x4_MUL_UN8x4_ADD_UN8x4_MUL_UN8 (d, a, s, da); *dest++ = d; width--; } COMPUTE_SHIFT_MASKC (dest, src, mask); /* printf ("%s\n",__PRETTY_FUNCTION__); */ for (i = width / 4; i > 0; i--) { LOAD_VECTORSC (dest, src, mask); vdest = pix_add_mul (vdest, pix_multiply (vmask, splat_alpha (vsrc)), pix_multiply (vsrc, vmask), negate (splat_alpha (vdest))); STORE_VECTOR (dest); src += 4; dest += 4; mask += 4; } for (i = width % 4; --i >= 0;) { uint32_t a = mask[i]; uint32_t s = src[i]; uint32_t d = dest[i]; uint32_t sa = ALPHA_8 (s); uint32_t da = ALPHA_8 (~d); UN8x4_MUL_UN8x4 (s, a); UN8x4_MUL_UN8 (a, sa); UN8x4_MUL_UN8x4_ADD_UN8x4_MUL_UN8 (d, a, s, da); dest[i] = d; } } static void vmx_combine_xor_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { int i; vector unsigned int vdest, vsrc, vmask; DECLARE_SRC_MASK_VAR; DECLARE_MASK_MASK_VAR; while (width && ((uintptr_t)dest & 15)) { uint32_t a = *mask++; uint32_t s = *src++; uint32_t d = *dest; uint32_t sa = ALPHA_8 (s); uint32_t da = ALPHA_8 (~d); UN8x4_MUL_UN8x4 (s, a); UN8x4_MUL_UN8 (a, sa); UN8x4_MUL_UN8x4_ADD_UN8x4_MUL_UN8 (d, ~a, s, da); *dest++ = d; width--; } COMPUTE_SHIFT_MASKC (dest, src, mask); /* printf ("%s\n",__PRETTY_FUNCTION__); */ for (i = width / 4; i > 0; i--) { LOAD_VECTORSC (dest, src, mask); vdest = pix_add_mul (vdest, negate (pix_multiply (vmask, splat_alpha (vsrc))), pix_multiply (vsrc, vmask), negate (splat_alpha (vdest))); STORE_VECTOR (dest); src += 4; dest += 4; mask += 4; } for (i = width % 4; --i >= 0;) { uint32_t a = mask[i]; uint32_t s = src[i]; uint32_t d = dest[i]; uint32_t sa = ALPHA_8 (s); uint32_t da = ALPHA_8 (~d); UN8x4_MUL_UN8x4 (s, a); UN8x4_MUL_UN8 (a, sa); UN8x4_MUL_UN8x4_ADD_UN8x4_MUL_UN8 (d, ~a, s, da); dest[i] = d; } } static void vmx_combine_add_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { int i; vector unsigned int vdest, vsrc, vmask; DECLARE_SRC_MASK_VAR; DECLARE_MASK_MASK_VAR; while (width && ((uintptr_t)dest & 15)) { uint32_t a = *mask++; uint32_t s = *src++; uint32_t d = *dest; UN8x4_MUL_UN8x4 (s, a); UN8x4_ADD_UN8x4 (s, d); *dest++ = s; width--; } COMPUTE_SHIFT_MASKC (dest, src, mask); /* printf ("%s\n",__PRETTY_FUNCTION__); */ for (i = width / 4; i > 0; i--) { LOAD_VECTORSC (dest, src, mask); vdest = pix_add (pix_multiply (vsrc, vmask), vdest); STORE_VECTOR (dest); src += 4; dest += 4; mask += 4; } for (i = width % 4; --i >= 0;) { uint32_t a = mask[i]; uint32_t s = src[i]; uint32_t d = dest[i]; UN8x4_MUL_UN8x4 (s, a); UN8x4_ADD_UN8x4 (s, d); dest[i] = s; } } static void vmx_composite_over_n_8_8888 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t src, srca; uint32_t *dst_line, *dst; uint8_t *mask_line; int dst_stride, mask_stride; int32_t w; uint32_t m, d, s, ia; vector unsigned int vsrc, valpha, vmask, vdst; src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); srca = ALPHA_8(src); if (src == 0) return; PIXMAN_IMAGE_GET_LINE ( dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE ( mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1); vsrc = (vector unsigned int) {src, src, src, src}; valpha = splat_alpha(vsrc); while (height--) { const uint8_t *pm = mask_line; dst = dst_line; dst_line += dst_stride; mask_line += mask_stride; w = width; while (w && (uintptr_t)dst & 15) { s = src; m = *pm++; if (m) { d = *dst; UN8x4_MUL_UN8 (s, m); ia = ALPHA_8 (~s); UN8x4_MUL_UN8_ADD_UN8x4 (d, ia, s); *dst = d; } w--; dst++; } while (w >= 4) { m = *((uint32_t*)pm); if (srca == 0xff && m == 0xffffffff) { save_128_aligned(dst, vsrc); } else if (m) { vmask = splat_pixel((vector unsigned int) {m, m, m, m}); /* dst is 16-byte aligned */ vdst = in_over (vsrc, valpha, vmask, load_128_aligned (dst)); save_128_aligned(dst, vdst); } w -= 4; dst += 4; pm += 4; } while (w) { s = src; m = *pm++; if (m) { d = *dst; UN8x4_MUL_UN8 (s, m); ia = ALPHA_8 (~s); UN8x4_MUL_UN8_ADD_UN8x4 (d, ia, s); *dst = d; } w--; dst++; } } } static pixman_bool_t vmx_fill (pixman_implementation_t *imp, uint32_t * bits, int stride, int bpp, int x, int y, int width, int height, uint32_t filler) { uint32_t byte_width; uint8_t *byte_line; vector unsigned int vfiller; if (bpp == 8) { uint8_t b; uint16_t w; stride = stride * (int) sizeof (uint32_t) / 1; byte_line = (uint8_t *)(((uint8_t *)bits) + stride * y + x); byte_width = width; stride *= 1; b = filler & 0xff; w = (b << 8) | b; filler = (w << 16) | w; } else if (bpp == 16) { stride = stride * (int) sizeof (uint32_t) / 2; byte_line = (uint8_t *)(((uint16_t *)bits) + stride * y + x); byte_width = 2 * width; stride *= 2; filler = (filler & 0xffff) * 0x00010001; } else if (bpp == 32) { stride = stride * (int) sizeof (uint32_t) / 4; byte_line = (uint8_t *)(((uint32_t *)bits) + stride * y + x); byte_width = 4 * width; stride *= 4; } else { return FALSE; } vfiller = create_mask_32_128(filler); while (height--) { int w; uint8_t *d = byte_line; byte_line += stride; w = byte_width; if (w >= 1 && ((uintptr_t)d & 1)) { *(uint8_t *)d = filler; w -= 1; d += 1; } while (w >= 2 && ((uintptr_t)d & 3)) { *(uint16_t *)d = filler; w -= 2; d += 2; } while (w >= 4 && ((uintptr_t)d & 15)) { *(uint32_t *)d = filler; w -= 4; d += 4; } while (w >= 128) { vec_st(vfiller, 0, (uint32_t *) d); vec_st(vfiller, 0, (uint32_t *) d + 4); vec_st(vfiller, 0, (uint32_t *) d + 8); vec_st(vfiller, 0, (uint32_t *) d + 12); vec_st(vfiller, 0, (uint32_t *) d + 16); vec_st(vfiller, 0, (uint32_t *) d + 20); vec_st(vfiller, 0, (uint32_t *) d + 24); vec_st(vfiller, 0, (uint32_t *) d + 28); d += 128; w -= 128; } if (w >= 64) { vec_st(vfiller, 0, (uint32_t *) d); vec_st(vfiller, 0, (uint32_t *) d + 4); vec_st(vfiller, 0, (uint32_t *) d + 8); vec_st(vfiller, 0, (uint32_t *) d + 12); d += 64; w -= 64; } if (w >= 32) { vec_st(vfiller, 0, (uint32_t *) d); vec_st(vfiller, 0, (uint32_t *) d + 4); d += 32; w -= 32; } if (w >= 16) { vec_st(vfiller, 0, (uint32_t *) d); d += 16; w -= 16; } while (w >= 4) { *(uint32_t *)d = filler; w -= 4; d += 4; } if (w >= 2) { *(uint16_t *)d = filler; w -= 2; d += 2; } if (w >= 1) { *(uint8_t *)d = filler; w -= 1; d += 1; } } return TRUE; } static void vmx_composite_src_x888_8888 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t *dst_line, *dst; uint32_t *src_line, *src; int32_t w; int dst_stride, src_stride; PIXMAN_IMAGE_GET_LINE ( dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE ( src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); while (height--) { dst = dst_line; dst_line += dst_stride; src = src_line; src_line += src_stride; w = width; while (w && (uintptr_t)dst & 15) { *dst++ = *src++ | 0xff000000; w--; } while (w >= 16) { vector unsigned int vmx_src1, vmx_src2, vmx_src3, vmx_src4; vmx_src1 = load_128_unaligned (src); vmx_src2 = load_128_unaligned (src + 4); vmx_src3 = load_128_unaligned (src + 8); vmx_src4 = load_128_unaligned (src + 12); save_128_aligned (dst, vec_or (vmx_src1, mask_ff000000)); save_128_aligned (dst + 4, vec_or (vmx_src2, mask_ff000000)); save_128_aligned (dst + 8, vec_or (vmx_src3, mask_ff000000)); save_128_aligned (dst + 12, vec_or (vmx_src4, mask_ff000000)); dst += 16; src += 16; w -= 16; } while (w) { *dst++ = *src++ | 0xff000000; w--; } } } static void vmx_composite_over_n_8888 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t *dst_line, *dst; uint32_t src, ia; int i, w, dst_stride; vector unsigned int vdst, vsrc, via; src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); if (src == 0) return; PIXMAN_IMAGE_GET_LINE ( dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); vsrc = (vector unsigned int){src, src, src, src}; via = negate (splat_alpha (vsrc)); ia = ALPHA_8 (~src); while (height--) { dst = dst_line; dst_line += dst_stride; w = width; while (w && ((uintptr_t)dst & 15)) { uint32_t d = *dst; UN8x4_MUL_UN8_ADD_UN8x4 (d, ia, src); *dst++ = d; w--; } for (i = w / 4; i > 0; i--) { vdst = pix_multiply (load_128_aligned (dst), via); save_128_aligned (dst, pix_add (vsrc, vdst)); dst += 4; } for (i = w % 4; --i >= 0;) { uint32_t d = dst[i]; UN8x4_MUL_UN8_ADD_UN8x4 (d, ia, src); dst[i] = d; } } } static void vmx_composite_over_8888_8888 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); int dst_stride, src_stride; uint32_t *dst_line, *dst; uint32_t *src_line, *src; PIXMAN_IMAGE_GET_LINE ( dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE ( src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); dst = dst_line; src = src_line; while (height--) { vmx_combine_over_u (imp, op, dst, src, NULL, width); dst += dst_stride; src += src_stride; } } static void vmx_composite_over_n_8888_8888_ca (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t src, ia; uint32_t *dst_line, d; uint32_t *mask_line, m; uint32_t pack_cmp; int dst_stride, mask_stride; vector unsigned int vsrc, valpha, vmask, vdest; src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); if (src == 0) return; PIXMAN_IMAGE_GET_LINE ( dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); PIXMAN_IMAGE_GET_LINE ( mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1); vsrc = (vector unsigned int) {src, src, src, src}; valpha = splat_alpha(vsrc); ia = ALPHA_8 (src); while (height--) { int w = width; const uint32_t *pm = (uint32_t *)mask_line; uint32_t *pd = (uint32_t *)dst_line; uint32_t s; dst_line += dst_stride; mask_line += mask_stride; while (w && (uintptr_t)pd & 15) { s = src; m = *pm++; if (m) { d = *pd; UN8x4_MUL_UN8x4 (s, m); UN8x4_MUL_UN8 (m, ia); m = ~m; UN8x4_MUL_UN8x4_ADD_UN8x4 (d, m, s); *pd = d; } pd++; w--; } while (w >= 4) { /* pm is NOT necessarily 16-byte aligned */ vmask = load_128_unaligned (pm); pack_cmp = vec_all_eq(vmask, (vector unsigned int) AVV(0)); /* if all bits in mask are zero, pack_cmp is not 0 */ if (pack_cmp == 0) { /* pd is 16-byte aligned */ vdest = in_over (vsrc, valpha, vmask, load_128_aligned (pd)); save_128_aligned(pd, vdest); } pd += 4; pm += 4; w -= 4; } while (w) { s = src; m = *pm++; if (m) { d = *pd; UN8x4_MUL_UN8x4 (s, m); UN8x4_MUL_UN8 (m, ia); m = ~m; UN8x4_MUL_UN8x4_ADD_UN8x4 (d, m, s); *pd = d; } pd++; w--; } } } static void vmx_composite_add_8_8 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint8_t *dst_line, *dst; uint8_t *src_line, *src; int dst_stride, src_stride; int32_t w; uint16_t t; PIXMAN_IMAGE_GET_LINE ( src_image, src_x, src_y, uint8_t, src_stride, src_line, 1); PIXMAN_IMAGE_GET_LINE ( dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1); while (height--) { dst = dst_line; src = src_line; dst_line += dst_stride; src_line += src_stride; w = width; /* Small head */ while (w && (uintptr_t)dst & 3) { t = (*dst) + (*src++); *dst++ = t | (0 - (t >> 8)); w--; } vmx_combine_add_u (imp, op, (uint32_t*)dst, (uint32_t*)src, NULL, w >> 2); /* Small tail */ dst += w & 0xfffc; src += w & 0xfffc; w &= 3; while (w) { t = (*dst) + (*src++); *dst++ = t | (0 - (t >> 8)); w--; } } } static void vmx_composite_add_8888_8888 (pixman_implementation_t *imp, pixman_composite_info_t *info) { PIXMAN_COMPOSITE_ARGS (info); uint32_t *dst_line, *dst; uint32_t *src_line, *src; int dst_stride, src_stride; PIXMAN_IMAGE_GET_LINE ( src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); PIXMAN_IMAGE_GET_LINE ( dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); while (height--) { dst = dst_line; dst_line += dst_stride; src = src_line; src_line += src_stride; vmx_combine_add_u (imp, op, dst, src, NULL, width); } } static force_inline void scaled_nearest_scanline_vmx_8888_8888_OVER (uint32_t* pd, const uint32_t* ps, int32_t w, pixman_fixed_t vx, pixman_fixed_t unit_x, pixman_fixed_t src_width_fixed, pixman_bool_t fully_transparent_src) { uint32_t s, d; const uint32_t* pm = NULL; vector unsigned int vsrc, vdst; if (fully_transparent_src) return; /* Align dst on a 16-byte boundary */ while (w && ((uintptr_t)pd & 15)) { d = *pd; s = combine1 (ps + pixman_fixed_to_int (vx), pm); vx += unit_x; while (vx >= 0) vx -= src_width_fixed; *pd++ = core_combine_over_u_pixel_vmx (s, d); if (pm) pm++; w--; } while (w >= 4) { uint32_t tmp[4]; tmp[0] = *(ps + pixman_fixed_to_int (vx)); vx += unit_x; while (vx >= 0) vx -= src_width_fixed; tmp[1] = *(ps + pixman_fixed_to_int (vx)); vx += unit_x; while (vx >= 0) vx -= src_width_fixed; tmp[2] = *(ps + pixman_fixed_to_int (vx)); vx += unit_x; while (vx >= 0) vx -= src_width_fixed; tmp[3] = *(ps + pixman_fixed_to_int (vx)); vx += unit_x; while (vx >= 0) vx -= src_width_fixed; vsrc = combine4 (tmp, pm); if (is_opaque (vsrc)) { save_128_aligned (pd, vsrc); } else if (!is_zero (vsrc)) { vdst = over(vsrc, splat_alpha(vsrc), load_128_aligned (pd)); save_128_aligned (pd, vdst); } w -= 4; pd += 4; if (pm) pm += 4; } while (w) { d = *pd; s = combine1 (ps + pixman_fixed_to_int (vx), pm); vx += unit_x; while (vx >= 0) vx -= src_width_fixed; *pd++ = core_combine_over_u_pixel_vmx (s, d); if (pm) pm++; w--; } } FAST_NEAREST_MAINLOOP (vmx_8888_8888_cover_OVER, scaled_nearest_scanline_vmx_8888_8888_OVER, uint32_t, uint32_t, COVER) FAST_NEAREST_MAINLOOP (vmx_8888_8888_none_OVER, scaled_nearest_scanline_vmx_8888_8888_OVER, uint32_t, uint32_t, NONE) FAST_NEAREST_MAINLOOP (vmx_8888_8888_pad_OVER, scaled_nearest_scanline_vmx_8888_8888_OVER, uint32_t, uint32_t, PAD) FAST_NEAREST_MAINLOOP (vmx_8888_8888_normal_OVER, scaled_nearest_scanline_vmx_8888_8888_OVER, uint32_t, uint32_t, NORMAL) static const pixman_fast_path_t vmx_fast_paths[] = { PIXMAN_STD_FAST_PATH (OVER, solid, null, a8r8g8b8, vmx_composite_over_n_8888), PIXMAN_STD_FAST_PATH (OVER, solid, null, x8r8g8b8, vmx_composite_over_n_8888), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, a8r8g8b8, vmx_composite_over_8888_8888), PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, x8r8g8b8, vmx_composite_over_8888_8888), PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, a8b8g8r8, vmx_composite_over_8888_8888), PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, x8b8g8r8, vmx_composite_over_8888_8888), PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8r8g8b8, vmx_composite_over_n_8_8888), PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8r8g8b8, vmx_composite_over_n_8_8888), PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8b8g8r8, vmx_composite_over_n_8_8888), PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8b8g8r8, vmx_composite_over_n_8_8888), PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, a8r8g8b8, vmx_composite_over_n_8888_8888_ca), PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, x8r8g8b8, vmx_composite_over_n_8888_8888_ca), PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, a8b8g8r8, vmx_composite_over_n_8888_8888_ca), PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, x8b8g8r8, vmx_composite_over_n_8888_8888_ca), /* PIXMAN_OP_ADD */ PIXMAN_STD_FAST_PATH (ADD, a8, null, a8, vmx_composite_add_8_8), PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, null, a8r8g8b8, vmx_composite_add_8888_8888), PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, null, a8b8g8r8, vmx_composite_add_8888_8888), /* PIXMAN_OP_SRC */ PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, a8r8g8b8, vmx_composite_src_x888_8888), PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, a8b8g8r8, vmx_composite_src_x888_8888), SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8, vmx_8888_8888), SIMPLE_NEAREST_FAST_PATH (OVER, a8b8g8r8, x8b8g8r8, vmx_8888_8888), SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, vmx_8888_8888), SIMPLE_NEAREST_FAST_PATH (OVER, a8b8g8r8, a8b8g8r8, vmx_8888_8888), { PIXMAN_OP_NONE }, }; static uint32_t * vmx_fetch_x8r8g8b8 (pixman_iter_t *iter, const uint32_t *mask) { int w = iter->width; vector unsigned int ff000000 = mask_ff000000; uint32_t *dst = iter->buffer; uint32_t *src = (uint32_t *)iter->bits; iter->bits += iter->stride; while (w && ((uintptr_t)dst) & 0x0f) { *dst++ = (*src++) | 0xff000000; w--; } while (w >= 4) { save_128_aligned(dst, vec_or(load_128_unaligned(src), ff000000)); dst += 4; src += 4; w -= 4; } while (w) { *dst++ = (*src++) | 0xff000000; w--; } return iter->buffer; } static uint32_t * vmx_fetch_a8 (pixman_iter_t *iter, const uint32_t *mask) { int w = iter->width; uint32_t *dst = iter->buffer; uint8_t *src = iter->bits; vector unsigned int vmx0, vmx1, vmx2, vmx3, vmx4, vmx5, vmx6; iter->bits += iter->stride; while (w && (((uintptr_t)dst) & 15)) { *dst++ = *(src++) << 24; w--; } while (w >= 16) { vmx0 = load_128_unaligned((uint32_t *) src); unpack_128_2x128((vector unsigned int) AVV(0), vmx0, &vmx1, &vmx2); unpack_128_2x128_16((vector unsigned int) AVV(0), vmx1, &vmx3, &vmx4); unpack_128_2x128_16((vector unsigned int) AVV(0), vmx2, &vmx5, &vmx6); save_128_aligned(dst, vmx6); save_128_aligned((dst + 4), vmx5); save_128_aligned((dst + 8), vmx4); save_128_aligned((dst + 12), vmx3); dst += 16; src += 16; w -= 16; } while (w) { *dst++ = *(src++) << 24; w--; } return iter->buffer; } #define IMAGE_FLAGS \ (FAST_PATH_STANDARD_FLAGS | FAST_PATH_ID_TRANSFORM | \ FAST_PATH_BITS_IMAGE | FAST_PATH_SAMPLES_COVER_CLIP_NEAREST) static const pixman_iter_info_t vmx_iters[] = { { PIXMAN_x8r8g8b8, IMAGE_FLAGS, ITER_NARROW, _pixman_iter_init_bits_stride, vmx_fetch_x8r8g8b8, NULL }, { PIXMAN_a8, IMAGE_FLAGS, ITER_NARROW, _pixman_iter_init_bits_stride, vmx_fetch_a8, NULL }, { PIXMAN_null }, }; pixman_implementation_t * _pixman_implementation_create_vmx (pixman_implementation_t *fallback) { pixman_implementation_t *imp = _pixman_implementation_create (fallback, vmx_fast_paths); /* VMX constants */ mask_ff000000 = create_mask_32_128 (0xff000000); mask_red = create_mask_32_128 (0x00f80000); mask_green = create_mask_32_128 (0x0000fc00); mask_blue = create_mask_32_128 (0x000000f8); mask_565_fix_rb = create_mask_32_128 (0x00e000e0); mask_565_fix_g = create_mask_32_128 (0x0000c000); /* Set up function pointers */ imp->combine_32[PIXMAN_OP_OVER] = vmx_combine_over_u; imp->combine_32[PIXMAN_OP_OVER_REVERSE] = vmx_combine_over_reverse_u; imp->combine_32[PIXMAN_OP_IN] = vmx_combine_in_u; imp->combine_32[PIXMAN_OP_IN_REVERSE] = vmx_combine_in_reverse_u; imp->combine_32[PIXMAN_OP_OUT] = vmx_combine_out_u; imp->combine_32[PIXMAN_OP_OUT_REVERSE] = vmx_combine_out_reverse_u; imp->combine_32[PIXMAN_OP_ATOP] = vmx_combine_atop_u; imp->combine_32[PIXMAN_OP_ATOP_REVERSE] = vmx_combine_atop_reverse_u; imp->combine_32[PIXMAN_OP_XOR] = vmx_combine_xor_u; imp->combine_32[PIXMAN_OP_ADD] = vmx_combine_add_u; imp->combine_32_ca[PIXMAN_OP_SRC] = vmx_combine_src_ca; imp->combine_32_ca[PIXMAN_OP_OVER] = vmx_combine_over_ca; imp->combine_32_ca[PIXMAN_OP_OVER_REVERSE] = vmx_combine_over_reverse_ca; imp->combine_32_ca[PIXMAN_OP_IN] = vmx_combine_in_ca; imp->combine_32_ca[PIXMAN_OP_IN_REVERSE] = vmx_combine_in_reverse_ca; imp->combine_32_ca[PIXMAN_OP_OUT] = vmx_combine_out_ca; imp->combine_32_ca[PIXMAN_OP_OUT_REVERSE] = vmx_combine_out_reverse_ca; imp->combine_32_ca[PIXMAN_OP_ATOP] = vmx_combine_atop_ca; imp->combine_32_ca[PIXMAN_OP_ATOP_REVERSE] = vmx_combine_atop_reverse_ca; imp->combine_32_ca[PIXMAN_OP_XOR] = vmx_combine_xor_ca; imp->combine_32_ca[PIXMAN_OP_ADD] = vmx_combine_add_ca; imp->fill = vmx_fill; imp->iter_info = vmx_iters; return imp; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman-x86.c0000664000175000017500000001136614712446423016504 0ustar00mattst88mattst88/* * Copyright Âİ 2000 SuSE, Inc. * Copyright Âİ 2007 Red Hat, Inc. * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of SuSE not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. SuSE makes no representations about the * suitability of this software for any purpose. It is provided "as is" * without express or implied warranty. * * SuSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL SuSE * BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifdef HAVE_CONFIG_H #include #endif #include "pixman-private.h" #if defined(USE_X86_MMX) || defined (USE_SSE2) || defined (USE_SSSE3) /* The CPU detection code needs to be in a file not compiled with * "-mmmx -msse", as gcc would generate CMOV instructions otherwise * that would lead to SIGILL instructions on old CPUs that don't have * it. */ typedef enum { X86_MMX = (1 << 0), X86_MMX_EXTENSIONS = (1 << 1), X86_SSE = (1 << 2) | X86_MMX_EXTENSIONS, X86_SSE2 = (1 << 3), X86_CMOV = (1 << 4), X86_SSSE3 = (1 << 5) } cpu_features_t; #ifdef HAVE_GETISAX #include static cpu_features_t detect_cpu_features (void) { cpu_features_t features = 0; unsigned int result = 0; if (getisax (&result, 1)) { if (result & AV_386_CMOV) features |= X86_CMOV; if (result & AV_386_MMX) features |= X86_MMX; if (result & AV_386_AMD_MMX) features |= X86_MMX_EXTENSIONS; if (result & AV_386_SSE) features |= X86_SSE; if (result & AV_386_SSE2) features |= X86_SSE2; if (result & AV_386_SSSE3) features |= X86_SSSE3; } return features; } #else #if defined (__GNUC__) #include #elif defined(_MSC_VER) #include #endif static void pixman_cpuid (uint32_t feature, uint32_t *a, uint32_t *b, uint32_t *c, uint32_t *d) { #if defined (__GNUC__) *a = *b = *c = *d = 0; __get_cpuid(feature, a, b, c, d); #elif defined (_MSC_VER) int info[4]; __cpuid (info, feature); *a = info[0]; *b = info[1]; *c = info[2]; *d = info[3]; #else #error Unknown compiler #endif } static cpu_features_t detect_cpu_features (void) { uint32_t a, b, c, d; cpu_features_t features = 0; /* Get feature bits */ pixman_cpuid (0x01, &a, &b, &c, &d); if (d & (1 << 15)) features |= X86_CMOV; if (d & (1 << 23)) features |= X86_MMX; if (d & (1 << 25)) features |= X86_SSE; if (d & (1 << 26)) features |= X86_SSE2; if (c & (1 << 9)) features |= X86_SSSE3; /* Check for AMD specific features */ if ((features & X86_MMX) && !(features & X86_SSE)) { char vendor[13]; /* Get vendor string */ memset (vendor, 0, sizeof vendor); pixman_cpuid (0x00, &a, &b, &c, &d); memcpy (vendor + 0, &b, 4); memcpy (vendor + 4, &d, 4); memcpy (vendor + 8, &c, 4); if (strcmp (vendor, "AuthenticAMD") == 0 || strcmp (vendor, "HygonGenuine") == 0 || strcmp (vendor, "Geode by NSC") == 0) { pixman_cpuid (0x80000000, &a, &b, &c, &d); if (a >= 0x80000001) { pixman_cpuid (0x80000001, &a, &b, &c, &d); if (d & (1 << 22)) features |= X86_MMX_EXTENSIONS; } } } return features; } #endif static pixman_bool_t have_feature (cpu_features_t feature) { static pixman_bool_t initialized; static cpu_features_t features; if (!initialized) { features = detect_cpu_features(); initialized = TRUE; } return (features & feature) == feature; } #endif pixman_implementation_t * _pixman_x86_get_implementations (pixman_implementation_t *imp) { #define MMX_BITS (X86_MMX | X86_MMX_EXTENSIONS) #define SSE2_BITS (X86_MMX | X86_MMX_EXTENSIONS | X86_SSE | X86_SSE2) #define SSSE3_BITS (X86_SSE | X86_SSE2 | X86_SSSE3) #ifdef USE_X86_MMX if (!_pixman_disabled ("mmx") && have_feature (MMX_BITS)) imp = _pixman_implementation_create_mmx (imp); #endif #ifdef USE_SSE2 if (!_pixman_disabled ("sse2") && have_feature (SSE2_BITS)) imp = _pixman_implementation_create_sse2 (imp); #endif #ifdef USE_SSSE3 if (!_pixman_disabled ("ssse3") && have_feature (SSSE3_BITS)) imp = _pixman_implementation_create_ssse3 (imp); #endif return imp; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman.c0000664000175000017500000010661514712446423016063 0ustar00mattst88mattst88/* -*- Mode: c; c-basic-offset: 4; tab-width: 8; indent-tabs-mode: t; -*- */ /* * Copyright Âİ 2000 SuSE, Inc. * Copyright Âİ 2007 Red Hat, Inc. * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of SuSE not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. SuSE makes no representations about the * suitability of this software for any purpose. It is provided "as is" * without express or implied warranty. * * SuSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL SuSE * BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * * Author: Keith Packard, SuSE, Inc. */ #ifdef HAVE_CONFIG_H #include #endif #include "pixman-private.h" #include pixman_implementation_t *global_implementation; #ifdef TOOLCHAIN_SUPPORTS_ATTRIBUTE_CONSTRUCTOR static void __attribute__((constructor)) pixman_constructor (void) { global_implementation = _pixman_choose_implementation (); } #endif #ifdef TOOLCHAIN_SUPPORTS_ATTRIBUTE_DESTRUCTOR static void __attribute__((destructor)) pixman_destructor (void) { pixman_implementation_t *imp = global_implementation; while (imp) { pixman_implementation_t *cur = imp; imp = imp->fallback; free (cur); } } #endif typedef struct operator_info_t operator_info_t; struct operator_info_t { uint8_t opaque_info[4]; }; #define PACK(neither, src, dest, both) \ {{ (uint8_t)PIXMAN_OP_ ## neither, \ (uint8_t)PIXMAN_OP_ ## src, \ (uint8_t)PIXMAN_OP_ ## dest, \ (uint8_t)PIXMAN_OP_ ## both }} static const operator_info_t operator_table[] = { /* Neither Opaque Src Opaque Dst Opaque Both Opaque */ PACK (CLEAR, CLEAR, CLEAR, CLEAR), PACK (SRC, SRC, SRC, SRC), PACK (DST, DST, DST, DST), PACK (OVER, SRC, OVER, SRC), PACK (OVER_REVERSE, OVER_REVERSE, DST, DST), PACK (IN, IN, SRC, SRC), PACK (IN_REVERSE, DST, IN_REVERSE, DST), PACK (OUT, OUT, CLEAR, CLEAR), PACK (OUT_REVERSE, CLEAR, OUT_REVERSE, CLEAR), PACK (ATOP, IN, OVER, SRC), PACK (ATOP_REVERSE, OVER_REVERSE, IN_REVERSE, DST), PACK (XOR, OUT, OUT_REVERSE, CLEAR), PACK (ADD, ADD, ADD, ADD), PACK (SATURATE, OVER_REVERSE, DST, DST), {{ 0 /* 0x0e */ }}, {{ 0 /* 0x0f */ }}, PACK (CLEAR, CLEAR, CLEAR, CLEAR), PACK (SRC, SRC, SRC, SRC), PACK (DST, DST, DST, DST), PACK (DISJOINT_OVER, DISJOINT_OVER, DISJOINT_OVER, DISJOINT_OVER), PACK (DISJOINT_OVER_REVERSE, DISJOINT_OVER_REVERSE, DISJOINT_OVER_REVERSE, DISJOINT_OVER_REVERSE), PACK (DISJOINT_IN, DISJOINT_IN, DISJOINT_IN, DISJOINT_IN), PACK (DISJOINT_IN_REVERSE, DISJOINT_IN_REVERSE, DISJOINT_IN_REVERSE, DISJOINT_IN_REVERSE), PACK (DISJOINT_OUT, DISJOINT_OUT, DISJOINT_OUT, DISJOINT_OUT), PACK (DISJOINT_OUT_REVERSE, DISJOINT_OUT_REVERSE, DISJOINT_OUT_REVERSE, DISJOINT_OUT_REVERSE), PACK (DISJOINT_ATOP, DISJOINT_ATOP, DISJOINT_ATOP, DISJOINT_ATOP), PACK (DISJOINT_ATOP_REVERSE, DISJOINT_ATOP_REVERSE, DISJOINT_ATOP_REVERSE, DISJOINT_ATOP_REVERSE), PACK (DISJOINT_XOR, DISJOINT_XOR, DISJOINT_XOR, DISJOINT_XOR), {{ 0 /* 0x1c */ }}, {{ 0 /* 0x1d */ }}, {{ 0 /* 0x1e */ }}, {{ 0 /* 0x1f */ }}, PACK (CLEAR, CLEAR, CLEAR, CLEAR), PACK (SRC, SRC, SRC, SRC), PACK (DST, DST, DST, DST), PACK (CONJOINT_OVER, CONJOINT_OVER, CONJOINT_OVER, CONJOINT_OVER), PACK (CONJOINT_OVER_REVERSE, CONJOINT_OVER_REVERSE, CONJOINT_OVER_REVERSE, CONJOINT_OVER_REVERSE), PACK (CONJOINT_IN, CONJOINT_IN, CONJOINT_IN, CONJOINT_IN), PACK (CONJOINT_IN_REVERSE, CONJOINT_IN_REVERSE, CONJOINT_IN_REVERSE, CONJOINT_IN_REVERSE), PACK (CONJOINT_OUT, CONJOINT_OUT, CONJOINT_OUT, CONJOINT_OUT), PACK (CONJOINT_OUT_REVERSE, CONJOINT_OUT_REVERSE, CONJOINT_OUT_REVERSE, CONJOINT_OUT_REVERSE), PACK (CONJOINT_ATOP, CONJOINT_ATOP, CONJOINT_ATOP, CONJOINT_ATOP), PACK (CONJOINT_ATOP_REVERSE, CONJOINT_ATOP_REVERSE, CONJOINT_ATOP_REVERSE, CONJOINT_ATOP_REVERSE), PACK (CONJOINT_XOR, CONJOINT_XOR, CONJOINT_XOR, CONJOINT_XOR), {{ 0 /* 0x2c */ }}, {{ 0 /* 0x2d */ }}, {{ 0 /* 0x2e */ }}, {{ 0 /* 0x2f */ }}, PACK (MULTIPLY, MULTIPLY, MULTIPLY, MULTIPLY), PACK (SCREEN, SCREEN, SCREEN, SCREEN), PACK (OVERLAY, OVERLAY, OVERLAY, OVERLAY), PACK (DARKEN, DARKEN, DARKEN, DARKEN), PACK (LIGHTEN, LIGHTEN, LIGHTEN, LIGHTEN), PACK (COLOR_DODGE, COLOR_DODGE, COLOR_DODGE, COLOR_DODGE), PACK (COLOR_BURN, COLOR_BURN, COLOR_BURN, COLOR_BURN), PACK (HARD_LIGHT, HARD_LIGHT, HARD_LIGHT, HARD_LIGHT), PACK (SOFT_LIGHT, SOFT_LIGHT, SOFT_LIGHT, SOFT_LIGHT), PACK (DIFFERENCE, DIFFERENCE, DIFFERENCE, DIFFERENCE), PACK (EXCLUSION, EXCLUSION, EXCLUSION, EXCLUSION), PACK (HSL_HUE, HSL_HUE, HSL_HUE, HSL_HUE), PACK (HSL_SATURATION, HSL_SATURATION, HSL_SATURATION, HSL_SATURATION), PACK (HSL_COLOR, HSL_COLOR, HSL_COLOR, HSL_COLOR), PACK (HSL_LUMINOSITY, HSL_LUMINOSITY, HSL_LUMINOSITY, HSL_LUMINOSITY), }; /* * Optimize the current operator based on opacity of source or destination * The output operator should be mathematically equivalent to the source. */ static pixman_op_t optimize_operator (pixman_op_t op, uint32_t src_flags, uint32_t mask_flags, uint32_t dst_flags) { pixman_bool_t is_source_opaque, is_dest_opaque; #define OPAQUE_SHIFT 13 COMPILE_TIME_ASSERT (FAST_PATH_IS_OPAQUE == (1 << OPAQUE_SHIFT)); is_dest_opaque = (dst_flags & FAST_PATH_IS_OPAQUE); is_source_opaque = ((src_flags & mask_flags) & FAST_PATH_IS_OPAQUE); is_dest_opaque >>= OPAQUE_SHIFT - 1; is_source_opaque >>= OPAQUE_SHIFT; return operator_table[op].opaque_info[is_dest_opaque | is_source_opaque]; } /* * Computing composite region */ static inline pixman_bool_t clip_general_image (pixman_region32_t * region, pixman_region32_t * clip, int dx, int dy) { if (pixman_region32_n_rects (region) == 1 && pixman_region32_n_rects (clip) == 1) { pixman_box32_t * rbox = pixman_region32_rectangles (region, NULL); pixman_box32_t * cbox = pixman_region32_rectangles (clip, NULL); int v; if (rbox->x1 < (v = cbox->x1 + dx)) rbox->x1 = v; if (rbox->x2 > (v = cbox->x2 + dx)) rbox->x2 = v; if (rbox->y1 < (v = cbox->y1 + dy)) rbox->y1 = v; if (rbox->y2 > (v = cbox->y2 + dy)) rbox->y2 = v; if (rbox->x1 >= rbox->x2 || rbox->y1 >= rbox->y2) { pixman_region32_init (region); return FALSE; } } else if (pixman_region32_empty (clip)) { return FALSE; } else { if (dx || dy) pixman_region32_translate (region, -dx, -dy); if (!pixman_region32_intersect (region, region, clip)) return FALSE; if (dx || dy) pixman_region32_translate (region, dx, dy); } return pixman_region32_not_empty (region); } static inline pixman_bool_t clip_source_image (pixman_region32_t * region, pixman_image_t * image, int dx, int dy) { /* Source clips are ignored, unless they are explicitly turned on * and the clip in question was set by an X client. (Because if * the clip was not set by a client, then it is a hierarchy * clip and those should always be ignored for sources). */ if (!image->common.clip_sources || !image->common.client_clip) return TRUE; return clip_general_image (region, &image->common.clip_region, dx, dy); } /* * returns FALSE if the final region is empty. Indistinguishable from * an allocation failure, but rendering ignores those anyways. */ pixman_bool_t _pixman_compute_composite_region32 (pixman_region32_t * region, pixman_image_t * src_image, pixman_image_t * mask_image, pixman_image_t * dest_image, int32_t src_x, int32_t src_y, int32_t mask_x, int32_t mask_y, int32_t dest_x, int32_t dest_y, int32_t width, int32_t height) { region->extents.x1 = dest_x; region->extents.x2 = dest_x + width; region->extents.y1 = dest_y; region->extents.y2 = dest_y + height; region->extents.x1 = MAX (region->extents.x1, 0); region->extents.y1 = MAX (region->extents.y1, 0); region->extents.x2 = MIN (region->extents.x2, dest_image->bits.width); region->extents.y2 = MIN (region->extents.y2, dest_image->bits.height); region->data = 0; /* Check for empty operation */ if (region->extents.x1 >= region->extents.x2 || region->extents.y1 >= region->extents.y2) { region->extents.x1 = 0; region->extents.x2 = 0; region->extents.y1 = 0; region->extents.y2 = 0; return FALSE; } if (dest_image->common.have_clip_region) { if (!clip_general_image (region, &dest_image->common.clip_region, 0, 0)) return FALSE; } if (dest_image->common.alpha_map) { if (!pixman_region32_intersect_rect (region, region, dest_image->common.alpha_origin_x, dest_image->common.alpha_origin_y, dest_image->common.alpha_map->width, dest_image->common.alpha_map->height)) { return FALSE; } if (pixman_region32_empty (region)) return FALSE; if (dest_image->common.alpha_map->common.have_clip_region) { if (!clip_general_image (region, &dest_image->common.alpha_map->common.clip_region, -dest_image->common.alpha_origin_x, -dest_image->common.alpha_origin_y)) { return FALSE; } } } /* clip against src */ if (src_image->common.have_clip_region) { if (!clip_source_image (region, src_image, dest_x - src_x, dest_y - src_y)) return FALSE; } if (src_image->common.alpha_map && src_image->common.alpha_map->common.have_clip_region) { if (!clip_source_image (region, (pixman_image_t *)src_image->common.alpha_map, dest_x - (src_x - src_image->common.alpha_origin_x), dest_y - (src_y - src_image->common.alpha_origin_y))) { return FALSE; } } /* clip against mask */ if (mask_image && mask_image->common.have_clip_region) { if (!clip_source_image (region, mask_image, dest_x - mask_x, dest_y - mask_y)) return FALSE; if (mask_image->common.alpha_map && mask_image->common.alpha_map->common.have_clip_region) { if (!clip_source_image (region, (pixman_image_t *)mask_image->common.alpha_map, dest_x - (mask_x - mask_image->common.alpha_origin_x), dest_y - (mask_y - mask_image->common.alpha_origin_y))) { return FALSE; } } } return TRUE; } typedef struct box_48_16 box_48_16_t; struct box_48_16 { pixman_fixed_48_16_t x1; pixman_fixed_48_16_t y1; pixman_fixed_48_16_t x2; pixman_fixed_48_16_t y2; }; static pixman_bool_t compute_transformed_extents (pixman_transform_t *transform, const pixman_box32_t *extents, box_48_16_t *transformed) { pixman_fixed_48_16_t tx1, ty1, tx2, ty2; pixman_fixed_t x1, y1, x2, y2; int i; x1 = pixman_int_to_fixed (extents->x1) + pixman_fixed_1 / 2; y1 = pixman_int_to_fixed (extents->y1) + pixman_fixed_1 / 2; x2 = pixman_int_to_fixed (extents->x2) - pixman_fixed_1 / 2; y2 = pixman_int_to_fixed (extents->y2) - pixman_fixed_1 / 2; if (!transform) { transformed->x1 = x1; transformed->y1 = y1; transformed->x2 = x2; transformed->y2 = y2; return TRUE; } tx1 = ty1 = INT64_MAX; tx2 = ty2 = INT64_MIN; for (i = 0; i < 4; ++i) { pixman_fixed_48_16_t tx, ty; pixman_vector_t v; v.vector[0] = (i & 0x01)? x1 : x2; v.vector[1] = (i & 0x02)? y1 : y2; v.vector[2] = pixman_fixed_1; if (!pixman_transform_point (transform, &v)) return FALSE; tx = (pixman_fixed_48_16_t)v.vector[0]; ty = (pixman_fixed_48_16_t)v.vector[1]; if (tx < tx1) tx1 = tx; if (ty < ty1) ty1 = ty; if (tx > tx2) tx2 = tx; if (ty > ty2) ty2 = ty; } transformed->x1 = tx1; transformed->y1 = ty1; transformed->x2 = tx2; transformed->y2 = ty2; return TRUE; } #define IS_16BIT(x) (((x) >= INT16_MIN) && ((x) <= INT16_MAX)) #define ABS(f) (((f) < 0)? (-(f)) : (f)) #define IS_16_16(f) (((f) >= pixman_min_fixed_48_16 && ((f) <= pixman_max_fixed_48_16))) static pixman_bool_t analyze_extent (pixman_image_t *image, const pixman_box32_t *extents, uint32_t *flags) { pixman_transform_t *transform; pixman_fixed_t x_off, y_off; pixman_fixed_t width, height; pixman_fixed_t *params; box_48_16_t transformed; pixman_box32_t exp_extents; if (!image) return TRUE; /* Some compositing functions walk one step * outside the destination rectangle, so we * check here that the expanded-by-one source * extents in destination space fits in 16 bits */ if (!IS_16BIT (extents->x1 - 1) || !IS_16BIT (extents->y1 - 1) || !IS_16BIT (extents->x2 + 1) || !IS_16BIT (extents->y2 + 1)) { return FALSE; } transform = image->common.transform; if (image->common.type == BITS) { /* During repeat mode calculations we might convert the * width/height of an image to fixed 16.16, so we need * them to be smaller than 16 bits. */ if (image->bits.width >= 0x7fff || image->bits.height >= 0x7fff) return FALSE; if ((image->common.flags & FAST_PATH_ID_TRANSFORM) == FAST_PATH_ID_TRANSFORM && extents->x1 >= 0 && extents->y1 >= 0 && extents->x2 <= image->bits.width && extents->y2 <= image->bits.height) { *flags |= FAST_PATH_SAMPLES_COVER_CLIP_NEAREST; return TRUE; } switch (image->common.filter) { case PIXMAN_FILTER_CONVOLUTION: params = image->common.filter_params; x_off = - pixman_fixed_e - ((params[0] - pixman_fixed_1) >> 1); y_off = - pixman_fixed_e - ((params[1] - pixman_fixed_1) >> 1); width = params[0]; height = params[1]; break; case PIXMAN_FILTER_SEPARABLE_CONVOLUTION: params = image->common.filter_params; x_off = - pixman_fixed_e - ((params[0] - pixman_fixed_1) >> 1); y_off = - pixman_fixed_e - ((params[1] - pixman_fixed_1) >> 1); width = params[0]; height = params[1]; break; case PIXMAN_FILTER_GOOD: case PIXMAN_FILTER_BEST: case PIXMAN_FILTER_BILINEAR: x_off = - pixman_fixed_1 / 2; y_off = - pixman_fixed_1 / 2; width = pixman_fixed_1; height = pixman_fixed_1; break; case PIXMAN_FILTER_FAST: case PIXMAN_FILTER_NEAREST: x_off = - pixman_fixed_e; y_off = - pixman_fixed_e; width = 0; height = 0; break; default: return FALSE; } } else { x_off = 0; y_off = 0; width = 0; height = 0; } if (!compute_transformed_extents (transform, extents, &transformed)) return FALSE; if (image->common.type == BITS) { if (pixman_fixed_to_int (transformed.x1 - pixman_fixed_e) >= 0 && pixman_fixed_to_int (transformed.y1 - pixman_fixed_e) >= 0 && pixman_fixed_to_int (transformed.x2 - pixman_fixed_e) < image->bits.width && pixman_fixed_to_int (transformed.y2 - pixman_fixed_e) < image->bits.height) { *flags |= FAST_PATH_SAMPLES_COVER_CLIP_NEAREST; } if (pixman_fixed_to_int (transformed.x1 - pixman_fixed_1 / 2) >= 0 && pixman_fixed_to_int (transformed.y1 - pixman_fixed_1 / 2) >= 0 && pixman_fixed_to_int (transformed.x2 + pixman_fixed_1 / 2) < image->bits.width && pixman_fixed_to_int (transformed.y2 + pixman_fixed_1 / 2) < image->bits.height) { *flags |= FAST_PATH_SAMPLES_COVER_CLIP_BILINEAR; } } /* Check we don't overflow when the destination extents are expanded by one. * This ensures that compositing functions can simply walk the source space * using 16.16 variables without worrying about overflow. */ exp_extents = *extents; exp_extents.x1 -= 1; exp_extents.y1 -= 1; exp_extents.x2 += 1; exp_extents.y2 += 1; if (!compute_transformed_extents (transform, &exp_extents, &transformed)) return FALSE; if (!IS_16_16 (transformed.x1 + x_off - 8 * pixman_fixed_e) || !IS_16_16 (transformed.y1 + y_off - 8 * pixman_fixed_e) || !IS_16_16 (transformed.x2 + x_off + 8 * pixman_fixed_e + width) || !IS_16_16 (transformed.y2 + y_off + 8 * pixman_fixed_e + height)) { return FALSE; } return TRUE; } /* * Work around GCC bug causing crashes in Mozilla with SSE2 * * When using -msse, gcc generates movdqa instructions assuming that * the stack is 16 byte aligned. Unfortunately some applications, such * as Mozilla and Mono, end up aligning the stack to 4 bytes, which * causes the movdqa instructions to fail. * * The __force_align_arg_pointer__ makes gcc generate a prologue that * realigns the stack pointer to 16 bytes. * * On x86-64 this is not necessary because the standard ABI already * calls for a 16 byte aligned stack. * * See https://bugs.freedesktop.org/show_bug.cgi?id=15693 */ #if defined (USE_SSE2) && defined(__GNUC__) && !defined(__x86_64__) && !defined(__amd64__) __attribute__((__force_align_arg_pointer__)) #endif PIXMAN_EXPORT void pixman_image_composite32 (pixman_op_t op, pixman_image_t * src, pixman_image_t * mask, pixman_image_t * dest, int32_t src_x, int32_t src_y, int32_t mask_x, int32_t mask_y, int32_t dest_x, int32_t dest_y, int32_t width, int32_t height) { pixman_format_code_t src_format, mask_format, dest_format; pixman_region32_t region; pixman_box32_t extents; pixman_implementation_t *imp; pixman_composite_func_t func; pixman_composite_info_t info; const pixman_box32_t *pbox; int n; _pixman_image_validate (src); if (mask) _pixman_image_validate (mask); _pixman_image_validate (dest); src_format = src->common.extended_format_code; info.src_flags = src->common.flags; if (mask && !(mask->common.flags & FAST_PATH_IS_OPAQUE)) { mask_format = mask->common.extended_format_code; info.mask_flags = mask->common.flags; } else { mask_format = PIXMAN_null; info.mask_flags = FAST_PATH_IS_OPAQUE | FAST_PATH_NO_ALPHA_MAP; } dest_format = dest->common.extended_format_code; info.dest_flags = dest->common.flags; /* Check for pixbufs */ if ((mask_format == PIXMAN_a8r8g8b8 || mask_format == PIXMAN_a8b8g8r8) && (src->type == BITS && src->bits.bits == mask->bits.bits) && (src->common.repeat == mask->common.repeat) && (info.src_flags & info.mask_flags & FAST_PATH_ID_TRANSFORM) && (src_x == mask_x && src_y == mask_y)) { if (src_format == PIXMAN_x8b8g8r8) src_format = mask_format = PIXMAN_pixbuf; else if (src_format == PIXMAN_x8r8g8b8) src_format = mask_format = PIXMAN_rpixbuf; } pixman_region32_init (®ion); if (!_pixman_compute_composite_region32 ( ®ion, src, mask, dest, src_x, src_y, mask_x, mask_y, dest_x, dest_y, width, height)) { goto out; } extents = *pixman_region32_extents (®ion); extents.x1 -= dest_x - src_x; extents.y1 -= dest_y - src_y; extents.x2 -= dest_x - src_x; extents.y2 -= dest_y - src_y; if (!analyze_extent (src, &extents, &info.src_flags)) goto out; extents.x1 -= src_x - mask_x; extents.y1 -= src_y - mask_y; extents.x2 -= src_x - mask_x; extents.y2 -= src_y - mask_y; if (!analyze_extent (mask, &extents, &info.mask_flags)) goto out; /* If the clip is within the source samples, and the samples are * opaque, then the source is effectively opaque. */ #define NEAREST_OPAQUE (FAST_PATH_SAMPLES_OPAQUE | \ FAST_PATH_NEAREST_FILTER | \ FAST_PATH_SAMPLES_COVER_CLIP_NEAREST) #define BILINEAR_OPAQUE (FAST_PATH_SAMPLES_OPAQUE | \ FAST_PATH_BILINEAR_FILTER | \ FAST_PATH_SAMPLES_COVER_CLIP_BILINEAR) if ((info.src_flags & NEAREST_OPAQUE) == NEAREST_OPAQUE || (info.src_flags & BILINEAR_OPAQUE) == BILINEAR_OPAQUE) { info.src_flags |= FAST_PATH_IS_OPAQUE; } if ((info.mask_flags & NEAREST_OPAQUE) == NEAREST_OPAQUE || (info.mask_flags & BILINEAR_OPAQUE) == BILINEAR_OPAQUE) { info.mask_flags |= FAST_PATH_IS_OPAQUE; } /* * Check if we can replace our operator by a simpler one * if the src or dest are opaque. The output operator should be * mathematically equivalent to the source. */ info.op = optimize_operator (op, info.src_flags, info.mask_flags, info.dest_flags); _pixman_implementation_lookup_composite ( get_implementation (), info.op, src_format, info.src_flags, mask_format, info.mask_flags, dest_format, info.dest_flags, &imp, &func); info.src_image = src; info.mask_image = mask; info.dest_image = dest; pbox = pixman_region32_rectangles (®ion, &n); while (n--) { info.src_x = pbox->x1 + src_x - dest_x; info.src_y = pbox->y1 + src_y - dest_y; info.mask_x = pbox->x1 + mask_x - dest_x; info.mask_y = pbox->y1 + mask_y - dest_y; info.dest_x = pbox->x1; info.dest_y = pbox->y1; info.width = pbox->x2 - pbox->x1; info.height = pbox->y2 - pbox->y1; func (imp, &info); pbox++; } out: pixman_region32_fini (®ion); } PIXMAN_EXPORT void pixman_image_composite (pixman_op_t op, pixman_image_t * src, pixman_image_t * mask, pixman_image_t * dest, int16_t src_x, int16_t src_y, int16_t mask_x, int16_t mask_y, int16_t dest_x, int16_t dest_y, uint16_t width, uint16_t height) { pixman_image_composite32 (op, src, mask, dest, src_x, src_y, mask_x, mask_y, dest_x, dest_y, width, height); } PIXMAN_EXPORT pixman_bool_t pixman_blt (uint32_t *src_bits, uint32_t *dst_bits, int src_stride, int dst_stride, int src_bpp, int dst_bpp, int src_x, int src_y, int dest_x, int dest_y, int width, int height) { return _pixman_implementation_blt (get_implementation(), src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp, src_x, src_y, dest_x, dest_y, width, height); } PIXMAN_EXPORT pixman_bool_t pixman_fill (uint32_t *bits, int stride, int bpp, int x, int y, int width, int height, uint32_t filler) { return _pixman_implementation_fill ( get_implementation(), bits, stride, bpp, x, y, width, height, filler); } static uint32_t color_to_uint32 (const pixman_color_t *color) { return (color->alpha >> 8 << 24) | (color->red >> 8 << 16) | (color->green & 0xff00) | (color->blue >> 8); } static pixman_bool_t color_to_pixel (const pixman_color_t *color, uint32_t * pixel, pixman_format_code_t format) { uint32_t c = color_to_uint32 (color); if (PIXMAN_FORMAT_TYPE (format) == PIXMAN_TYPE_RGBA_FLOAT) { return FALSE; } if (!(format == PIXMAN_a8r8g8b8 || format == PIXMAN_x8r8g8b8 || format == PIXMAN_a8b8g8r8 || format == PIXMAN_x8b8g8r8 || format == PIXMAN_b8g8r8a8 || format == PIXMAN_b8g8r8x8 || format == PIXMAN_r8g8b8a8 || format == PIXMAN_r8g8b8x8 || format == PIXMAN_r5g6b5 || format == PIXMAN_b5g6r5 || format == PIXMAN_a8 || format == PIXMAN_a1)) { return FALSE; } if (PIXMAN_FORMAT_TYPE (format) == PIXMAN_TYPE_ABGR) { c = ((c & 0xff000000) >> 0) | ((c & 0x00ff0000) >> 16) | ((c & 0x0000ff00) >> 0) | ((c & 0x000000ff) << 16); } if (PIXMAN_FORMAT_TYPE (format) == PIXMAN_TYPE_BGRA) { c = ((c & 0xff000000) >> 24) | ((c & 0x00ff0000) >> 8) | ((c & 0x0000ff00) << 8) | ((c & 0x000000ff) << 24); } if (PIXMAN_FORMAT_TYPE (format) == PIXMAN_TYPE_RGBA) c = ((c & 0xff000000) >> 24) | (c << 8); if (format == PIXMAN_a1) c = c >> 31; else if (format == PIXMAN_a8) c = c >> 24; else if (format == PIXMAN_r5g6b5 || format == PIXMAN_b5g6r5) c = convert_8888_to_0565 (c); #if 0 printf ("color: %x %x %x %x\n", color->alpha, color->red, color->green, color->blue); printf ("pixel: %x\n", c); #endif *pixel = c; return TRUE; } PIXMAN_EXPORT pixman_bool_t pixman_image_fill_rectangles (pixman_op_t op, pixman_image_t * dest, const pixman_color_t * color, int n_rects, const pixman_rectangle16_t *rects) { pixman_box32_t stack_boxes[6]; pixman_box32_t *boxes; pixman_bool_t result; int i; if (n_rects > 6) { boxes = pixman_malloc_ab (sizeof (pixman_box32_t), n_rects); if (boxes == NULL) return FALSE; } else { boxes = stack_boxes; } for (i = 0; i < n_rects; ++i) { boxes[i].x1 = rects[i].x; boxes[i].y1 = rects[i].y; boxes[i].x2 = boxes[i].x1 + rects[i].width; boxes[i].y2 = boxes[i].y1 + rects[i].height; } result = pixman_image_fill_boxes (op, dest, color, n_rects, boxes); if (boxes != stack_boxes) free (boxes); return result; } PIXMAN_EXPORT pixman_bool_t pixman_image_fill_boxes (pixman_op_t op, pixman_image_t * dest, const pixman_color_t *color, int n_boxes, const pixman_box32_t *boxes) { pixman_image_t *solid; pixman_color_t c; int i; _pixman_image_validate (dest); if (color->alpha == 0xffff) { if (op == PIXMAN_OP_OVER) op = PIXMAN_OP_SRC; } if (op == PIXMAN_OP_CLEAR) { c.red = 0; c.green = 0; c.blue = 0; c.alpha = 0; color = &c; op = PIXMAN_OP_SRC; } if (op == PIXMAN_OP_SRC) { uint32_t pixel; if (color_to_pixel (color, &pixel, dest->bits.format)) { pixman_region32_t fill_region; int n_rects, j; pixman_box32_t *rects; if (!pixman_region32_init_rects (&fill_region, boxes, n_boxes)) return FALSE; if (dest->common.have_clip_region) { if (!pixman_region32_intersect (&fill_region, &fill_region, &dest->common.clip_region)) return FALSE; } rects = pixman_region32_rectangles (&fill_region, &n_rects); for (j = 0; j < n_rects; ++j) { const pixman_box32_t *rect = &(rects[j]); pixman_fill (dest->bits.bits, dest->bits.rowstride, PIXMAN_FORMAT_BPP (dest->bits.format), rect->x1, rect->y1, rect->x2 - rect->x1, rect->y2 - rect->y1, pixel); } pixman_region32_fini (&fill_region); return TRUE; } } solid = pixman_image_create_solid_fill (color); if (!solid) return FALSE; for (i = 0; i < n_boxes; ++i) { const pixman_box32_t *box = &(boxes[i]); pixman_image_composite32 (op, solid, NULL, dest, 0, 0, 0, 0, box->x1, box->y1, box->x2 - box->x1, box->y2 - box->y1); } pixman_image_unref (solid); return TRUE; } /** * pixman_version: * * Returns the version of the pixman library encoded in a single * integer as per %PIXMAN_VERSION_ENCODE. The encoding ensures that * later versions compare greater than earlier versions. * * A run-time comparison to check that pixman's version is greater than * or equal to version X.Y.Z could be performed as follows: * * * if (pixman_version() >= PIXMAN_VERSION_ENCODE(X,Y,Z)) {...} * * * See also pixman_version_string() as well as the compile-time * equivalents %PIXMAN_VERSION and %PIXMAN_VERSION_STRING. * * Return value: the encoded version. **/ PIXMAN_EXPORT int pixman_version (void) { return PIXMAN_VERSION; } /** * pixman_version_string: * * Returns the version of the pixman library as a human-readable string * of the form "X.Y.Z". * * See also pixman_version() as well as the compile-time equivalents * %PIXMAN_VERSION_STRING and %PIXMAN_VERSION. * * Return value: a string containing the version. **/ PIXMAN_EXPORT const char* pixman_version_string (void) { return PIXMAN_VERSION_STRING; } /** * pixman_format_supported_source: * @format: A pixman_format_code_t format * * Return value: whether the provided format code is a supported * format for a pixman surface used as a source in * rendering. * * Currently, all pixman_format_code_t values are supported. **/ PIXMAN_EXPORT pixman_bool_t pixman_format_supported_source (pixman_format_code_t format) { switch (format) { /* 32 bpp formats */ case PIXMAN_a2b10g10r10: case PIXMAN_x2b10g10r10: case PIXMAN_a2r10g10b10: case PIXMAN_x2r10g10b10: case PIXMAN_a8r8g8b8: case PIXMAN_a8r8g8b8_sRGB: case PIXMAN_r8g8b8_sRGB: case PIXMAN_x8r8g8b8: case PIXMAN_a8b8g8r8: case PIXMAN_x8b8g8r8: case PIXMAN_b8g8r8a8: case PIXMAN_b8g8r8x8: case PIXMAN_r8g8b8a8: case PIXMAN_r8g8b8x8: case PIXMAN_r8g8b8: case PIXMAN_b8g8r8: case PIXMAN_r5g6b5: case PIXMAN_b5g6r5: case PIXMAN_x14r6g6b6: /* 16 bpp formats */ case PIXMAN_a1r5g5b5: case PIXMAN_x1r5g5b5: case PIXMAN_a1b5g5r5: case PIXMAN_x1b5g5r5: case PIXMAN_a4r4g4b4: case PIXMAN_x4r4g4b4: case PIXMAN_a4b4g4r4: case PIXMAN_x4b4g4r4: /* 8bpp formats */ case PIXMAN_a8: case PIXMAN_r3g3b2: case PIXMAN_b2g3r3: case PIXMAN_a2r2g2b2: case PIXMAN_a2b2g2r2: case PIXMAN_c8: case PIXMAN_g8: case PIXMAN_x4a4: /* Collides with PIXMAN_c8 case PIXMAN_x4c4: */ /* Collides with PIXMAN_g8 case PIXMAN_x4g4: */ /* 4bpp formats */ case PIXMAN_a4: case PIXMAN_r1g2b1: case PIXMAN_b1g2r1: case PIXMAN_a1r1g1b1: case PIXMAN_a1b1g1r1: case PIXMAN_c4: case PIXMAN_g4: /* 1bpp formats */ case PIXMAN_a1: case PIXMAN_g1: /* YUV formats */ case PIXMAN_yuy2: case PIXMAN_yv12: return TRUE; default: return FALSE; } } /** * pixman_format_supported_destination: * @format: A pixman_format_code_t format * * Return value: whether the provided format code is a supported * format for a pixman surface used as a destination in * rendering. * * Currently, all pixman_format_code_t values are supported * except for the YUV formats. **/ PIXMAN_EXPORT pixman_bool_t pixman_format_supported_destination (pixman_format_code_t format) { /* YUV formats cannot be written to at the moment */ if (format == PIXMAN_yuy2 || format == PIXMAN_yv12) return FALSE; return pixman_format_supported_source (format); } PIXMAN_EXPORT pixman_bool_t pixman_compute_composite_region (pixman_region16_t * region, pixman_image_t * src_image, pixman_image_t * mask_image, pixman_image_t * dest_image, int16_t src_x, int16_t src_y, int16_t mask_x, int16_t mask_y, int16_t dest_x, int16_t dest_y, uint16_t width, uint16_t height) { pixman_region32_t r32; pixman_bool_t retval; pixman_region32_init (&r32); retval = _pixman_compute_composite_region32 ( &r32, src_image, mask_image, dest_image, src_x, src_y, mask_x, mask_y, dest_x, dest_y, width, height); if (retval) { if (!pixman_region16_copy_from_region32 (region, &r32)) retval = FALSE; } pixman_region32_fini (&r32); return retval; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/pixman.h0000664000175000017500000013661514712446423016073 0ustar00mattst88mattst88/*********************************************************** Copyright 1987, 1998 The Open Group Permission to use, copy, modify, distribute, and sell this software and its documentation for any purpose is hereby granted without fee, provided that the above copyright notice appear in all copies and that both that copyright notice and this permission notice appear in supporting documentation. The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE OPEN GROUP BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Except as contained in this notice, the name of The Open Group shall not be used in advertising or otherwise to promote the sale, use or other dealings in this Software without prior written authorization from The Open Group. Copyright 1987 by Digital Equipment Corporation, Maynard, Massachusetts. All Rights Reserved Permission to use, copy, modify, and distribute this software and its documentation for any purpose and without fee is hereby granted, provided that the above copyright notice appear in all copies and that both that copyright notice and this permission notice appear in supporting documentation, and that the name of Digital not be used in advertising or publicity pertaining to distribution of the software without specific, written prior permission. DIGITAL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL DIGITAL BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ******************************************************************/ /* * Copyright Âİ 1998, 2004 Keith Packard * Copyright 2007 Red Hat, Inc. * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of Keith Packard not be used in * advertising or publicity pertaining to distribution of the software without * specific, written prior permission. Keith Packard makes no * representations about the suitability of this software for any purpose. It * is provided "as is" without express or implied warranty. * * KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ #ifndef PIXMAN_H__ #define PIXMAN_H__ #include #ifdef __cplusplus #define PIXMAN_BEGIN_DECLS extern "C" { #define PIXMAN_END_DECLS } #else #define PIXMAN_BEGIN_DECLS #define PIXMAN_END_DECLS #endif PIXMAN_BEGIN_DECLS /* * Standard integers */ #if !defined (PIXMAN_DONT_DEFINE_STDINT) #if defined (_SVR4) || defined (SVR4) || defined (__OpenBSD__) || defined (_sgi) || defined (__sun) || defined (sun) || defined (__digital__) || defined (__HP_cc) # include /* VS 2010 (_MSC_VER 1600) has stdint.h */ #elif defined (_MSC_VER) && _MSC_VER < 1600 typedef __int8 int8_t; typedef unsigned __int8 uint8_t; typedef __int16 int16_t; typedef unsigned __int16 uint16_t; typedef __int32 int32_t; typedef unsigned __int32 uint32_t; typedef __int64 int64_t; typedef unsigned __int64 uint64_t; #elif defined (_AIX) # include #else # include #endif #endif /* * Boolean */ typedef int pixman_bool_t; /* * Fixpoint numbers */ typedef int64_t pixman_fixed_32_32_t; typedef pixman_fixed_32_32_t pixman_fixed_48_16_t; typedef uint32_t pixman_fixed_1_31_t; typedef uint32_t pixman_fixed_1_16_t; typedef int32_t pixman_fixed_16_16_t; typedef pixman_fixed_16_16_t pixman_fixed_t; #define pixman_fixed_e ((pixman_fixed_t) 1) #define pixman_fixed_1 (pixman_int_to_fixed(1)) #define pixman_fixed_1_minus_e (pixman_fixed_1 - pixman_fixed_e) #define pixman_fixed_minus_1 (pixman_int_to_fixed(-1)) #define pixman_fixed_to_int(f) ((int) ((f) >> 16)) #define pixman_int_to_fixed(i) ((pixman_fixed_t) ((uint32_t) (i) << 16)) #define pixman_fixed_to_double(f) (double) ((f) / (double) pixman_fixed_1) #define pixman_double_to_fixed(d) ((pixman_fixed_t) ((d) * 65536.0)) #define pixman_fixed_frac(f) ((f) & pixman_fixed_1_minus_e) #define pixman_fixed_floor(f) ((f) & ~pixman_fixed_1_minus_e) #define pixman_fixed_ceil(f) pixman_fixed_floor ((f) + pixman_fixed_1_minus_e) #define pixman_fixed_fraction(f) ((f) & pixman_fixed_1_minus_e) #define pixman_fixed_mod_2(f) ((f) & (pixman_fixed1 | pixman_fixed_1_minus_e)) #define pixman_max_fixed_48_16 ((pixman_fixed_48_16_t) 0x7fffffff) #define pixman_min_fixed_48_16 (-((pixman_fixed_48_16_t) 1 << 31)) /* * Misc structs */ typedef struct pixman_color pixman_color_t; typedef struct pixman_point_fixed pixman_point_fixed_t; typedef struct pixman_line_fixed pixman_line_fixed_t; typedef struct pixman_vector pixman_vector_t; typedef struct pixman_transform pixman_transform_t; struct pixman_color { uint16_t red; uint16_t green; uint16_t blue; uint16_t alpha; }; struct pixman_point_fixed { pixman_fixed_t x; pixman_fixed_t y; }; struct pixman_line_fixed { pixman_point_fixed_t p1, p2; }; /* * Fixed point matrices */ struct pixman_vector { pixman_fixed_t vector[3]; }; struct pixman_transform { pixman_fixed_t matrix[3][3]; }; /* forward declaration (sorry) */ struct pixman_box16; typedef union pixman_image pixman_image_t; PIXMAN_API void pixman_transform_init_identity (struct pixman_transform *matrix); PIXMAN_API pixman_bool_t pixman_transform_point_3d (const struct pixman_transform *transform, struct pixman_vector *vector); PIXMAN_API pixman_bool_t pixman_transform_point (const struct pixman_transform *transform, struct pixman_vector *vector); PIXMAN_API pixman_bool_t pixman_transform_multiply (struct pixman_transform *dst, const struct pixman_transform *l, const struct pixman_transform *r); PIXMAN_API void pixman_transform_init_scale (struct pixman_transform *t, pixman_fixed_t sx, pixman_fixed_t sy); PIXMAN_API pixman_bool_t pixman_transform_scale (struct pixman_transform *forward, struct pixman_transform *reverse, pixman_fixed_t sx, pixman_fixed_t sy); PIXMAN_API void pixman_transform_init_rotate (struct pixman_transform *t, pixman_fixed_t cos, pixman_fixed_t sin); PIXMAN_API pixman_bool_t pixman_transform_rotate (struct pixman_transform *forward, struct pixman_transform *reverse, pixman_fixed_t c, pixman_fixed_t s); PIXMAN_API void pixman_transform_init_translate (struct pixman_transform *t, pixman_fixed_t tx, pixman_fixed_t ty); PIXMAN_API pixman_bool_t pixman_transform_translate (struct pixman_transform *forward, struct pixman_transform *reverse, pixman_fixed_t tx, pixman_fixed_t ty); PIXMAN_API pixman_bool_t pixman_transform_bounds (const struct pixman_transform *matrix, struct pixman_box16 *b); PIXMAN_API pixman_bool_t pixman_transform_invert (struct pixman_transform *dst, const struct pixman_transform *src); PIXMAN_API pixman_bool_t pixman_transform_is_identity (const struct pixman_transform *t); PIXMAN_API pixman_bool_t pixman_transform_is_scale (const struct pixman_transform *t); PIXMAN_API pixman_bool_t pixman_transform_is_int_translate (const struct pixman_transform *t); PIXMAN_API pixman_bool_t pixman_transform_is_inverse (const struct pixman_transform *a, const struct pixman_transform *b); /* * Floating point matrices */ typedef struct pixman_f_transform pixman_f_transform_t; typedef struct pixman_f_vector pixman_f_vector_t; struct pixman_f_vector { double v[3]; }; struct pixman_f_transform { double m[3][3]; }; PIXMAN_API pixman_bool_t pixman_transform_from_pixman_f_transform (struct pixman_transform *t, const struct pixman_f_transform *ft); PIXMAN_API void pixman_f_transform_from_pixman_transform (struct pixman_f_transform *ft, const struct pixman_transform *t); PIXMAN_API pixman_bool_t pixman_f_transform_invert (struct pixman_f_transform *dst, const struct pixman_f_transform *src); PIXMAN_API pixman_bool_t pixman_f_transform_point (const struct pixman_f_transform *t, struct pixman_f_vector *v); PIXMAN_API void pixman_f_transform_point_3d (const struct pixman_f_transform *t, struct pixman_f_vector *v); PIXMAN_API void pixman_f_transform_multiply (struct pixman_f_transform *dst, const struct pixman_f_transform *l, const struct pixman_f_transform *r); PIXMAN_API void pixman_f_transform_init_scale (struct pixman_f_transform *t, double sx, double sy); PIXMAN_API pixman_bool_t pixman_f_transform_scale (struct pixman_f_transform *forward, struct pixman_f_transform *reverse, double sx, double sy); PIXMAN_API void pixman_f_transform_init_rotate (struct pixman_f_transform *t, double cos, double sin); PIXMAN_API pixman_bool_t pixman_f_transform_rotate (struct pixman_f_transform *forward, struct pixman_f_transform *reverse, double c, double s); PIXMAN_API void pixman_f_transform_init_translate (struct pixman_f_transform *t, double tx, double ty); PIXMAN_API pixman_bool_t pixman_f_transform_translate (struct pixman_f_transform *forward, struct pixman_f_transform *reverse, double tx, double ty); PIXMAN_API pixman_bool_t pixman_f_transform_bounds (const struct pixman_f_transform *t, struct pixman_box16 *b); PIXMAN_API void pixman_f_transform_init_identity (struct pixman_f_transform *t); typedef enum { PIXMAN_REPEAT_NONE, PIXMAN_REPEAT_NORMAL, PIXMAN_REPEAT_PAD, PIXMAN_REPEAT_REFLECT } pixman_repeat_t; typedef enum { PIXMAN_DITHER_NONE, PIXMAN_DITHER_FAST, PIXMAN_DITHER_GOOD, PIXMAN_DITHER_BEST, PIXMAN_DITHER_ORDERED_BAYER_8, PIXMAN_DITHER_ORDERED_BLUE_NOISE_64, } pixman_dither_t; typedef enum { PIXMAN_FILTER_FAST, PIXMAN_FILTER_GOOD, PIXMAN_FILTER_BEST, PIXMAN_FILTER_NEAREST, PIXMAN_FILTER_BILINEAR, PIXMAN_FILTER_CONVOLUTION, /* The SEPARABLE_CONVOLUTION filter takes the following parameters: * * width: integer given as 16.16 fixpoint number * height: integer given as 16.16 fixpoint number * x_phase_bits: integer given as 16.16 fixpoint * y_phase_bits: integer given as 16.16 fixpoint * xtables: (1 << x_phase_bits) tables of size width * ytables: (1 << y_phase_bits) tables of size height * * When sampling at (x, y), the location is first rounded to one of * n_x_phases * n_y_phases subpixel positions. These subpixel positions * determine an xtable and a ytable to use. * * Conceptually a width x height matrix is then formed in which each entry * is the product of the corresponding entries in the x and y tables. * This matrix is then aligned with the image pixels such that its center * is as close as possible to the subpixel location chosen earlier. Then * the image is convolved with the matrix and the resulting pixel returned. */ PIXMAN_FILTER_SEPARABLE_CONVOLUTION } pixman_filter_t; typedef enum { PIXMAN_OP_CLEAR = 0x00, PIXMAN_OP_SRC = 0x01, PIXMAN_OP_DST = 0x02, PIXMAN_OP_OVER = 0x03, PIXMAN_OP_OVER_REVERSE = 0x04, PIXMAN_OP_IN = 0x05, PIXMAN_OP_IN_REVERSE = 0x06, PIXMAN_OP_OUT = 0x07, PIXMAN_OP_OUT_REVERSE = 0x08, PIXMAN_OP_ATOP = 0x09, PIXMAN_OP_ATOP_REVERSE = 0x0a, PIXMAN_OP_XOR = 0x0b, PIXMAN_OP_ADD = 0x0c, PIXMAN_OP_SATURATE = 0x0d, PIXMAN_OP_DISJOINT_CLEAR = 0x10, PIXMAN_OP_DISJOINT_SRC = 0x11, PIXMAN_OP_DISJOINT_DST = 0x12, PIXMAN_OP_DISJOINT_OVER = 0x13, PIXMAN_OP_DISJOINT_OVER_REVERSE = 0x14, PIXMAN_OP_DISJOINT_IN = 0x15, PIXMAN_OP_DISJOINT_IN_REVERSE = 0x16, PIXMAN_OP_DISJOINT_OUT = 0x17, PIXMAN_OP_DISJOINT_OUT_REVERSE = 0x18, PIXMAN_OP_DISJOINT_ATOP = 0x19, PIXMAN_OP_DISJOINT_ATOP_REVERSE = 0x1a, PIXMAN_OP_DISJOINT_XOR = 0x1b, PIXMAN_OP_CONJOINT_CLEAR = 0x20, PIXMAN_OP_CONJOINT_SRC = 0x21, PIXMAN_OP_CONJOINT_DST = 0x22, PIXMAN_OP_CONJOINT_OVER = 0x23, PIXMAN_OP_CONJOINT_OVER_REVERSE = 0x24, PIXMAN_OP_CONJOINT_IN = 0x25, PIXMAN_OP_CONJOINT_IN_REVERSE = 0x26, PIXMAN_OP_CONJOINT_OUT = 0x27, PIXMAN_OP_CONJOINT_OUT_REVERSE = 0x28, PIXMAN_OP_CONJOINT_ATOP = 0x29, PIXMAN_OP_CONJOINT_ATOP_REVERSE = 0x2a, PIXMAN_OP_CONJOINT_XOR = 0x2b, PIXMAN_OP_MULTIPLY = 0x30, PIXMAN_OP_SCREEN = 0x31, PIXMAN_OP_OVERLAY = 0x32, PIXMAN_OP_DARKEN = 0x33, PIXMAN_OP_LIGHTEN = 0x34, PIXMAN_OP_COLOR_DODGE = 0x35, PIXMAN_OP_COLOR_BURN = 0x36, PIXMAN_OP_HARD_LIGHT = 0x37, PIXMAN_OP_SOFT_LIGHT = 0x38, PIXMAN_OP_DIFFERENCE = 0x39, PIXMAN_OP_EXCLUSION = 0x3a, PIXMAN_OP_HSL_HUE = 0x3b, PIXMAN_OP_HSL_SATURATION = 0x3c, PIXMAN_OP_HSL_COLOR = 0x3d, PIXMAN_OP_HSL_LUMINOSITY = 0x3e #ifdef PIXMAN_USE_INTERNAL_API , PIXMAN_N_OPERATORS, PIXMAN_OP_NONE = PIXMAN_N_OPERATORS #endif } pixman_op_t; /* * Regions */ typedef struct pixman_region16_data pixman_region16_data_t; typedef struct pixman_box16 pixman_box16_t; typedef struct pixman_rectangle16 pixman_rectangle16_t; typedef struct pixman_region16 pixman_region16_t; struct pixman_region16_data { long size; long numRects; /* pixman_box16_t rects[size]; in memory but not explicitly declared */ }; struct pixman_rectangle16 { int16_t x, y; uint16_t width, height; }; struct pixman_box16 { int16_t x1, y1, x2, y2; }; struct pixman_region16 { pixman_box16_t extents; pixman_region16_data_t *data; }; typedef enum { PIXMAN_REGION_OUT, PIXMAN_REGION_IN, PIXMAN_REGION_PART } pixman_region_overlap_t; /* This function exists only to make it possible to preserve * the X ABI - it should go away at first opportunity. */ PIXMAN_API void pixman_region_set_static_pointers (pixman_box16_t *empty_box, pixman_region16_data_t *empty_data, pixman_region16_data_t *broken_data); /* creation/destruction */ PIXMAN_API void pixman_region_init (pixman_region16_t *region); PIXMAN_API void pixman_region_init_rect (pixman_region16_t *region, int x, int y, unsigned int width, unsigned int height); PIXMAN_API pixman_bool_t pixman_region_init_rects (pixman_region16_t *region, const pixman_box16_t *boxes, int count); PIXMAN_API void pixman_region_init_with_extents (pixman_region16_t *region, const pixman_box16_t *extents); PIXMAN_API void pixman_region_init_from_image (pixman_region16_t *region, pixman_image_t *image); PIXMAN_API void pixman_region_fini (pixman_region16_t *region); /* manipulation */ PIXMAN_API void pixman_region_translate (pixman_region16_t *region, int x, int y); PIXMAN_API pixman_bool_t pixman_region_copy (pixman_region16_t *dest, const pixman_region16_t *source); PIXMAN_API pixman_bool_t pixman_region_intersect (pixman_region16_t *new_reg, const pixman_region16_t *reg1, const pixman_region16_t *reg2); PIXMAN_API pixman_bool_t pixman_region_union (pixman_region16_t *new_reg, const pixman_region16_t *reg1, const pixman_region16_t *reg2); PIXMAN_API pixman_bool_t pixman_region_union_rect (pixman_region16_t *dest, const pixman_region16_t *source, int x, int y, unsigned int width, unsigned int height); PIXMAN_API pixman_bool_t pixman_region_intersect_rect (pixman_region16_t *dest, const pixman_region16_t *source, int x, int y, unsigned int width, unsigned int height); PIXMAN_API pixman_bool_t pixman_region_subtract (pixman_region16_t *reg_d, const pixman_region16_t *reg_m, const pixman_region16_t *reg_s); PIXMAN_API pixman_bool_t pixman_region_inverse (pixman_region16_t *new_reg, const pixman_region16_t *reg1, const pixman_box16_t *inv_rect); PIXMAN_API pixman_bool_t pixman_region_contains_point (const pixman_region16_t *region, int x, int y, pixman_box16_t *box); PIXMAN_API pixman_region_overlap_t pixman_region_contains_rectangle (const pixman_region16_t *region, const pixman_box16_t *prect); PIXMAN_API pixman_bool_t pixman_region_empty (const pixman_region16_t *region); PIXMAN_API pixman_bool_t pixman_region_not_empty (const pixman_region16_t *region); PIXMAN_API pixman_box16_t * pixman_region_extents (const pixman_region16_t *region); PIXMAN_API int pixman_region_n_rects (const pixman_region16_t *region); PIXMAN_API pixman_box16_t * pixman_region_rectangles (const pixman_region16_t *region, int *n_rects); PIXMAN_API pixman_bool_t pixman_region_equal (const pixman_region16_t *region1, const pixman_region16_t *region2); PIXMAN_API pixman_bool_t pixman_region_selfcheck (pixman_region16_t *region); PIXMAN_API void pixman_region_reset (pixman_region16_t *region, const pixman_box16_t *box); PIXMAN_API void pixman_region_clear (pixman_region16_t *region); /* * 32 bit regions */ typedef struct pixman_region32_data pixman_region32_data_t; typedef struct pixman_box32 pixman_box32_t; typedef struct pixman_rectangle32 pixman_rectangle32_t; typedef struct pixman_region32 pixman_region32_t; struct pixman_region32_data { long size; long numRects; /* pixman_box32_t rects[size]; in memory but not explicitly declared */ }; struct pixman_rectangle32 { int32_t x, y; uint32_t width, height; }; struct pixman_box32 { int32_t x1, y1, x2, y2; }; struct pixman_region32 { pixman_box32_t extents; pixman_region32_data_t *data; }; /* creation/destruction */ PIXMAN_API void pixman_region32_init (pixman_region32_t *region); PIXMAN_API void pixman_region32_init_rect (pixman_region32_t *region, int x, int y, unsigned int width, unsigned int height); PIXMAN_API pixman_bool_t pixman_region32_init_rects (pixman_region32_t *region, const pixman_box32_t *boxes, int count); PIXMAN_API void pixman_region32_init_with_extents (pixman_region32_t *region, const pixman_box32_t *extents); PIXMAN_API void pixman_region32_init_from_image (pixman_region32_t *region, pixman_image_t *image); PIXMAN_API void pixman_region32_fini (pixman_region32_t *region); /* manipulation */ PIXMAN_API void pixman_region32_translate (pixman_region32_t *region, int x, int y); PIXMAN_API pixman_bool_t pixman_region32_copy (pixman_region32_t *dest, const pixman_region32_t *source); PIXMAN_API pixman_bool_t pixman_region32_intersect (pixman_region32_t *new_reg, const pixman_region32_t *reg1, const pixman_region32_t *reg2); PIXMAN_API pixman_bool_t pixman_region32_union (pixman_region32_t *new_reg, const pixman_region32_t *reg1, const pixman_region32_t *reg2); PIXMAN_API pixman_bool_t pixman_region32_intersect_rect (pixman_region32_t *dest, const pixman_region32_t *source, int x, int y, unsigned int width, unsigned int height); PIXMAN_API pixman_bool_t pixman_region32_union_rect (pixman_region32_t *dest, const pixman_region32_t *source, int x, int y, unsigned int width, unsigned int height); PIXMAN_API pixman_bool_t pixman_region32_subtract (pixman_region32_t *reg_d, const pixman_region32_t *reg_m, const pixman_region32_t *reg_s); PIXMAN_API pixman_bool_t pixman_region32_inverse (pixman_region32_t *new_reg, const pixman_region32_t *reg1, const pixman_box32_t *inv_rect); PIXMAN_API pixman_bool_t pixman_region32_contains_point (const pixman_region32_t *region, int x, int y, pixman_box32_t *box); PIXMAN_API pixman_region_overlap_t pixman_region32_contains_rectangle (const pixman_region32_t *region, const pixman_box32_t *prect); PIXMAN_API pixman_bool_t pixman_region32_empty (const pixman_region32_t *region); PIXMAN_API pixman_bool_t pixman_region32_not_empty (const pixman_region32_t *region); PIXMAN_API pixman_box32_t * pixman_region32_extents (const pixman_region32_t *region); PIXMAN_API int pixman_region32_n_rects (const pixman_region32_t *region); PIXMAN_API pixman_box32_t * pixman_region32_rectangles (const pixman_region32_t *region, int *n_rects); PIXMAN_API pixman_bool_t pixman_region32_equal (const pixman_region32_t *region1, const pixman_region32_t *region2); PIXMAN_API pixman_bool_t pixman_region32_selfcheck (pixman_region32_t *region); PIXMAN_API void pixman_region32_reset (pixman_region32_t *region, const pixman_box32_t *box); PIXMAN_API void pixman_region32_clear (pixman_region32_t *region); /* Copy / Fill / Misc */ PIXMAN_API pixman_bool_t pixman_blt (uint32_t *src_bits, uint32_t *dst_bits, int src_stride, int dst_stride, int src_bpp, int dst_bpp, int src_x, int src_y, int dest_x, int dest_y, int width, int height); PIXMAN_API pixman_bool_t pixman_fill (uint32_t *bits, int stride, int bpp, int x, int y, int width, int height, uint32_t _xor); PIXMAN_API int pixman_version (void); PIXMAN_API const char* pixman_version_string (void); /* * Images */ typedef struct pixman_indexed pixman_indexed_t; typedef struct pixman_gradient_stop pixman_gradient_stop_t; typedef uint32_t (* pixman_read_memory_func_t) (const void *src, int size); typedef void (* pixman_write_memory_func_t) (void *dst, uint32_t value, int size); typedef void (* pixman_image_destroy_func_t) (pixman_image_t *image, void *data); struct pixman_gradient_stop { pixman_fixed_t x; pixman_color_t color; }; #define PIXMAN_MAX_INDEXED 256 /* XXX depth must be <= 8 */ #if PIXMAN_MAX_INDEXED <= 256 typedef uint8_t pixman_index_type; #endif struct pixman_indexed { pixman_bool_t color; uint32_t rgba[PIXMAN_MAX_INDEXED]; pixman_index_type ent[32768]; }; /* * While the protocol is generous in format support, the * sample implementation allows only packed RGB and GBR * representations for data to simplify software rendering, */ #define PIXMAN_FORMAT(bpp,type,a,r,g,b) (((bpp) << 24) | \ ((type) << 16) | \ ((a) << 12) | \ ((r) << 8) | \ ((g) << 4) | \ ((b))) #define PIXMAN_FORMAT_BYTE(bpp,type,a,r,g,b) \ (((bpp >> 3) << 24) | \ (3 << 22) | ((type) << 16) | \ ((a >> 3) << 12) | \ ((r >> 3) << 8) | \ ((g >> 3) << 4) | \ ((b >> 3))) #define PIXMAN_FORMAT_RESHIFT(val, ofs, num) \ (((val >> (ofs)) & ((1 << (num)) - 1)) << ((val >> 22) & 3)) #define PIXMAN_FORMAT_BPP(f) PIXMAN_FORMAT_RESHIFT(f, 24, 8) #define PIXMAN_FORMAT_SHIFT(f) ((uint32_t)((f >> 22) & 3)) #define PIXMAN_FORMAT_TYPE(f) (((f) >> 16) & 0x3f) #define PIXMAN_FORMAT_A(f) PIXMAN_FORMAT_RESHIFT(f, 12, 4) #define PIXMAN_FORMAT_R(f) PIXMAN_FORMAT_RESHIFT(f, 8, 4) #define PIXMAN_FORMAT_G(f) PIXMAN_FORMAT_RESHIFT(f, 4, 4) #define PIXMAN_FORMAT_B(f) PIXMAN_FORMAT_RESHIFT(f, 0, 4) #define PIXMAN_FORMAT_RGB(f) (((f) ) & 0xfff) #define PIXMAN_FORMAT_VIS(f) (((f) ) & 0xffff) #define PIXMAN_FORMAT_DEPTH(f) (PIXMAN_FORMAT_A(f) + \ PIXMAN_FORMAT_R(f) + \ PIXMAN_FORMAT_G(f) + \ PIXMAN_FORMAT_B(f)) #define PIXMAN_TYPE_OTHER 0 #define PIXMAN_TYPE_A 1 #define PIXMAN_TYPE_ARGB 2 #define PIXMAN_TYPE_ABGR 3 #define PIXMAN_TYPE_COLOR 4 #define PIXMAN_TYPE_GRAY 5 #define PIXMAN_TYPE_YUY2 6 #define PIXMAN_TYPE_YV12 7 #define PIXMAN_TYPE_BGRA 8 #define PIXMAN_TYPE_RGBA 9 #define PIXMAN_TYPE_ARGB_SRGB 10 #define PIXMAN_TYPE_RGBA_FLOAT 11 #define PIXMAN_FORMAT_COLOR(f) \ (PIXMAN_FORMAT_TYPE(f) == PIXMAN_TYPE_ARGB || \ PIXMAN_FORMAT_TYPE(f) == PIXMAN_TYPE_ABGR || \ PIXMAN_FORMAT_TYPE(f) == PIXMAN_TYPE_BGRA || \ PIXMAN_FORMAT_TYPE(f) == PIXMAN_TYPE_RGBA || \ PIXMAN_FORMAT_TYPE(f) == PIXMAN_TYPE_RGBA_FLOAT) typedef enum { /* 128bpp formats */ PIXMAN_rgba_float = PIXMAN_FORMAT_BYTE(128,PIXMAN_TYPE_RGBA_FLOAT,32,32,32,32), /* 96bpp formats */ PIXMAN_rgb_float = PIXMAN_FORMAT_BYTE(96,PIXMAN_TYPE_RGBA_FLOAT,0,32,32,32), /* 32bpp formats */ PIXMAN_a8r8g8b8 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ARGB,8,8,8,8), PIXMAN_x8r8g8b8 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ARGB,0,8,8,8), PIXMAN_a8b8g8r8 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ABGR,8,8,8,8), PIXMAN_x8b8g8r8 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ABGR,0,8,8,8), PIXMAN_b8g8r8a8 = PIXMAN_FORMAT(32,PIXMAN_TYPE_BGRA,8,8,8,8), PIXMAN_b8g8r8x8 = PIXMAN_FORMAT(32,PIXMAN_TYPE_BGRA,0,8,8,8), PIXMAN_r8g8b8a8 = PIXMAN_FORMAT(32,PIXMAN_TYPE_RGBA,8,8,8,8), PIXMAN_r8g8b8x8 = PIXMAN_FORMAT(32,PIXMAN_TYPE_RGBA,0,8,8,8), PIXMAN_x14r6g6b6 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ARGB,0,6,6,6), PIXMAN_x2r10g10b10 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ARGB,0,10,10,10), PIXMAN_a2r10g10b10 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ARGB,2,10,10,10), PIXMAN_x2b10g10r10 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ABGR,0,10,10,10), PIXMAN_a2b10g10r10 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ABGR,2,10,10,10), /* sRGB formats */ PIXMAN_a8r8g8b8_sRGB = PIXMAN_FORMAT(32,PIXMAN_TYPE_ARGB_SRGB,8,8,8,8), PIXMAN_r8g8b8_sRGB = PIXMAN_FORMAT(24,PIXMAN_TYPE_ARGB_SRGB,0,8,8,8), /* 24bpp formats */ PIXMAN_r8g8b8 = PIXMAN_FORMAT(24,PIXMAN_TYPE_ARGB,0,8,8,8), PIXMAN_b8g8r8 = PIXMAN_FORMAT(24,PIXMAN_TYPE_ABGR,0,8,8,8), /* 16bpp formats */ PIXMAN_r5g6b5 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ARGB,0,5,6,5), PIXMAN_b5g6r5 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ABGR,0,5,6,5), PIXMAN_a1r5g5b5 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ARGB,1,5,5,5), PIXMAN_x1r5g5b5 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ARGB,0,5,5,5), PIXMAN_a1b5g5r5 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ABGR,1,5,5,5), PIXMAN_x1b5g5r5 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ABGR,0,5,5,5), PIXMAN_a4r4g4b4 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ARGB,4,4,4,4), PIXMAN_x4r4g4b4 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ARGB,0,4,4,4), PIXMAN_a4b4g4r4 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ABGR,4,4,4,4), PIXMAN_x4b4g4r4 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ABGR,0,4,4,4), /* 8bpp formats */ PIXMAN_a8 = PIXMAN_FORMAT(8,PIXMAN_TYPE_A,8,0,0,0), PIXMAN_r3g3b2 = PIXMAN_FORMAT(8,PIXMAN_TYPE_ARGB,0,3,3,2), PIXMAN_b2g3r3 = PIXMAN_FORMAT(8,PIXMAN_TYPE_ABGR,0,3,3,2), PIXMAN_a2r2g2b2 = PIXMAN_FORMAT(8,PIXMAN_TYPE_ARGB,2,2,2,2), PIXMAN_a2b2g2r2 = PIXMAN_FORMAT(8,PIXMAN_TYPE_ABGR,2,2,2,2), PIXMAN_c8 = PIXMAN_FORMAT(8,PIXMAN_TYPE_COLOR,0,0,0,0), PIXMAN_g8 = PIXMAN_FORMAT(8,PIXMAN_TYPE_GRAY,0,0,0,0), PIXMAN_x4a4 = PIXMAN_FORMAT(8,PIXMAN_TYPE_A,4,0,0,0), PIXMAN_x4c4 = PIXMAN_FORMAT(8,PIXMAN_TYPE_COLOR,0,0,0,0), PIXMAN_x4g4 = PIXMAN_FORMAT(8,PIXMAN_TYPE_GRAY,0,0,0,0), /* 4bpp formats */ PIXMAN_a4 = PIXMAN_FORMAT(4,PIXMAN_TYPE_A,4,0,0,0), PIXMAN_r1g2b1 = PIXMAN_FORMAT(4,PIXMAN_TYPE_ARGB,0,1,2,1), PIXMAN_b1g2r1 = PIXMAN_FORMAT(4,PIXMAN_TYPE_ABGR,0,1,2,1), PIXMAN_a1r1g1b1 = PIXMAN_FORMAT(4,PIXMAN_TYPE_ARGB,1,1,1,1), PIXMAN_a1b1g1r1 = PIXMAN_FORMAT(4,PIXMAN_TYPE_ABGR,1,1,1,1), PIXMAN_c4 = PIXMAN_FORMAT(4,PIXMAN_TYPE_COLOR,0,0,0,0), PIXMAN_g4 = PIXMAN_FORMAT(4,PIXMAN_TYPE_GRAY,0,0,0,0), /* 1bpp formats */ PIXMAN_a1 = PIXMAN_FORMAT(1,PIXMAN_TYPE_A,1,0,0,0), PIXMAN_g1 = PIXMAN_FORMAT(1,PIXMAN_TYPE_GRAY,0,0,0,0), /* YUV formats */ PIXMAN_yuy2 = PIXMAN_FORMAT(16,PIXMAN_TYPE_YUY2,0,0,0,0), PIXMAN_yv12 = PIXMAN_FORMAT(12,PIXMAN_TYPE_YV12,0,0,0,0) } pixman_format_code_t; /* Querying supported format values. */ PIXMAN_API pixman_bool_t pixman_format_supported_destination (pixman_format_code_t format); PIXMAN_API pixman_bool_t pixman_format_supported_source (pixman_format_code_t format); /* Constructors */ PIXMAN_API pixman_image_t *pixman_image_create_solid_fill (const pixman_color_t *color); PIXMAN_API pixman_image_t *pixman_image_create_linear_gradient (const pixman_point_fixed_t *p1, const pixman_point_fixed_t *p2, const pixman_gradient_stop_t *stops, int n_stops); PIXMAN_API pixman_image_t *pixman_image_create_radial_gradient (const pixman_point_fixed_t *inner, const pixman_point_fixed_t *outer, pixman_fixed_t inner_radius, pixman_fixed_t outer_radius, const pixman_gradient_stop_t *stops, int n_stops); PIXMAN_API pixman_image_t *pixman_image_create_conical_gradient (const pixman_point_fixed_t *center, pixman_fixed_t angle, const pixman_gradient_stop_t *stops, int n_stops); PIXMAN_API pixman_image_t *pixman_image_create_bits (pixman_format_code_t format, int width, int height, uint32_t *bits, int rowstride_bytes); PIXMAN_API pixman_image_t *pixman_image_create_bits_no_clear (pixman_format_code_t format, int width, int height, uint32_t * bits, int rowstride_bytes); /* Destructor */ PIXMAN_API pixman_image_t *pixman_image_ref (pixman_image_t *image); PIXMAN_API pixman_bool_t pixman_image_unref (pixman_image_t *image); PIXMAN_API void pixman_image_set_destroy_function (pixman_image_t *image, pixman_image_destroy_func_t function, void *data); PIXMAN_API void * pixman_image_get_destroy_data (pixman_image_t *image); /* Set properties */ PIXMAN_API pixman_bool_t pixman_image_set_clip_region (pixman_image_t *image, const pixman_region16_t *region); PIXMAN_API pixman_bool_t pixman_image_set_clip_region32 (pixman_image_t *image, const pixman_region32_t *region); PIXMAN_API void pixman_image_set_has_client_clip (pixman_image_t *image, pixman_bool_t clien_clip); PIXMAN_API pixman_bool_t pixman_image_set_transform (pixman_image_t *image, const pixman_transform_t *transform); PIXMAN_API void pixman_image_set_repeat (pixman_image_t *image, pixman_repeat_t repeat); PIXMAN_API void pixman_image_set_dither (pixman_image_t *image, pixman_dither_t dither); PIXMAN_API void pixman_image_set_dither_offset (pixman_image_t *image, int offset_x, int offset_y); PIXMAN_API pixman_bool_t pixman_image_set_filter (pixman_image_t *image, pixman_filter_t filter, const pixman_fixed_t *filter_params, int n_filter_params); PIXMAN_API void pixman_image_set_source_clipping (pixman_image_t *image, pixman_bool_t source_clipping); PIXMAN_API void pixman_image_set_alpha_map (pixman_image_t *image, pixman_image_t *alpha_map, int16_t x, int16_t y); PIXMAN_API void pixman_image_set_component_alpha (pixman_image_t *image, pixman_bool_t component_alpha); PIXMAN_API pixman_bool_t pixman_image_get_component_alpha (pixman_image_t *image); PIXMAN_API void pixman_image_set_accessors (pixman_image_t *image, pixman_read_memory_func_t read_func, pixman_write_memory_func_t write_func); PIXMAN_API void pixman_image_set_indexed (pixman_image_t *image, const pixman_indexed_t *indexed); PIXMAN_API uint32_t *pixman_image_get_data (pixman_image_t *image); PIXMAN_API int pixman_image_get_width (pixman_image_t *image); PIXMAN_API int pixman_image_get_height (pixman_image_t *image); PIXMAN_API int pixman_image_get_stride (pixman_image_t *image); /* in bytes */ PIXMAN_API int pixman_image_get_depth (pixman_image_t *image); PIXMAN_API pixman_format_code_t pixman_image_get_format (pixman_image_t *image); typedef enum { PIXMAN_KERNEL_IMPULSE, PIXMAN_KERNEL_BOX, PIXMAN_KERNEL_LINEAR, PIXMAN_KERNEL_CUBIC, PIXMAN_KERNEL_GAUSSIAN, PIXMAN_KERNEL_LANCZOS2, PIXMAN_KERNEL_LANCZOS3, PIXMAN_KERNEL_LANCZOS3_STRETCHED /* Jim Blinn's 'nice' filter */ } pixman_kernel_t; /* Create the parameter list for a SEPARABLE_CONVOLUTION filter * with the given kernels and scale parameters. */ PIXMAN_API pixman_fixed_t * pixman_filter_create_separable_convolution (int *n_values, pixman_fixed_t scale_x, pixman_fixed_t scale_y, pixman_kernel_t reconstruct_x, pixman_kernel_t reconstruct_y, pixman_kernel_t sample_x, pixman_kernel_t sample_y, int subsample_bits_x, int subsample_bits_y); PIXMAN_API pixman_bool_t pixman_image_fill_rectangles (pixman_op_t op, pixman_image_t *image, const pixman_color_t *color, int n_rects, const pixman_rectangle16_t *rects); PIXMAN_API pixman_bool_t pixman_image_fill_boxes (pixman_op_t op, pixman_image_t *dest, const pixman_color_t *color, int n_boxes, const pixman_box32_t *boxes); /* Composite */ PIXMAN_API pixman_bool_t pixman_compute_composite_region (pixman_region16_t *region, pixman_image_t *src_image, pixman_image_t *mask_image, pixman_image_t *dest_image, int16_t src_x, int16_t src_y, int16_t mask_x, int16_t mask_y, int16_t dest_x, int16_t dest_y, uint16_t width, uint16_t height); PIXMAN_API void pixman_image_composite (pixman_op_t op, pixman_image_t *src, pixman_image_t *mask, pixman_image_t *dest, int16_t src_x, int16_t src_y, int16_t mask_x, int16_t mask_y, int16_t dest_x, int16_t dest_y, uint16_t width, uint16_t height); PIXMAN_API void pixman_image_composite32 (pixman_op_t op, pixman_image_t *src, pixman_image_t *mask, pixman_image_t *dest, int32_t src_x, int32_t src_y, int32_t mask_x, int32_t mask_y, int32_t dest_x, int32_t dest_y, int32_t width, int32_t height); /* Executive Summary: This function is a no-op that only exists * for historical reasons. * * There used to be a bug in the X server where it would rely on * out-of-bounds accesses when it was asked to composite with a * window as the source. It would create a pixman image pointing * to some bogus position in memory, but then set a clip region * to the position where the actual bits were. * * Due to a bug in old versions of pixman, where it would not clip * against the image bounds when a clip region was set, this would * actually work. So when the pixman bug was fixed, a workaround was * added to allow certain out-of-bound accesses. This function disabled * those workarounds. * * Since 0.21.2, pixman doesn't do these workarounds anymore, so now this * function is a no-op. */ PIXMAN_API void pixman_disable_out_of_bounds_workaround (void); /* * Glyphs */ typedef struct pixman_glyph_cache_t pixman_glyph_cache_t; typedef struct { int x, y; const void *glyph; } pixman_glyph_t; PIXMAN_API pixman_glyph_cache_t *pixman_glyph_cache_create (void); PIXMAN_API void pixman_glyph_cache_destroy (pixman_glyph_cache_t *cache); PIXMAN_API void pixman_glyph_cache_freeze (pixman_glyph_cache_t *cache); PIXMAN_API void pixman_glyph_cache_thaw (pixman_glyph_cache_t *cache); PIXMAN_API const void * pixman_glyph_cache_lookup (pixman_glyph_cache_t *cache, void *font_key, void *glyph_key); PIXMAN_API const void * pixman_glyph_cache_insert (pixman_glyph_cache_t *cache, void *font_key, void *glyph_key, int origin_x, int origin_y, pixman_image_t *glyph_image); PIXMAN_API void pixman_glyph_cache_remove (pixman_glyph_cache_t *cache, void *font_key, void *glyph_key); PIXMAN_API void pixman_glyph_get_extents (pixman_glyph_cache_t *cache, int n_glyphs, pixman_glyph_t *glyphs, pixman_box32_t *extents); PIXMAN_API pixman_format_code_t pixman_glyph_get_mask_format (pixman_glyph_cache_t *cache, int n_glyphs, const pixman_glyph_t *glyphs); PIXMAN_API void pixman_composite_glyphs (pixman_op_t op, pixman_image_t *src, pixman_image_t *dest, pixman_format_code_t mask_format, int32_t src_x, int32_t src_y, int32_t mask_x, int32_t mask_y, int32_t dest_x, int32_t dest_y, int32_t width, int32_t height, pixman_glyph_cache_t *cache, int n_glyphs, const pixman_glyph_t *glyphs); PIXMAN_API void pixman_composite_glyphs_no_mask (pixman_op_t op, pixman_image_t *src, pixman_image_t *dest, int32_t src_x, int32_t src_y, int32_t dest_x, int32_t dest_y, pixman_glyph_cache_t *cache, int n_glyphs, const pixman_glyph_t *glyphs); /* * Trapezoids */ typedef struct pixman_edge pixman_edge_t; typedef struct pixman_trapezoid pixman_trapezoid_t; typedef struct pixman_trap pixman_trap_t; typedef struct pixman_span_fix pixman_span_fix_t; typedef struct pixman_triangle pixman_triangle_t; /* * An edge structure. This represents a single polygon edge * and can be quickly stepped across small or large gaps in the * sample grid */ struct pixman_edge { pixman_fixed_t x; pixman_fixed_t e; pixman_fixed_t stepx; pixman_fixed_t signdx; pixman_fixed_t dy; pixman_fixed_t dx; pixman_fixed_t stepx_small; pixman_fixed_t stepx_big; pixman_fixed_t dx_small; pixman_fixed_t dx_big; }; struct pixman_trapezoid { pixman_fixed_t top, bottom; pixman_line_fixed_t left, right; }; struct pixman_triangle { pixman_point_fixed_t p1, p2, p3; }; /* whether 't' is a well defined not obviously empty trapezoid */ #define pixman_trapezoid_valid(t) \ ((t)->left.p1.y != (t)->left.p2.y && \ (t)->right.p1.y != (t)->right.p2.y && \ ((t)->bottom > (t)->top)) struct pixman_span_fix { pixman_fixed_t l, r, y; }; struct pixman_trap { pixman_span_fix_t top, bot; }; PIXMAN_API pixman_fixed_t pixman_sample_ceil_y (pixman_fixed_t y, int bpp); PIXMAN_API pixman_fixed_t pixman_sample_floor_y (pixman_fixed_t y, int bpp); PIXMAN_API void pixman_edge_step (pixman_edge_t *e, int n); PIXMAN_API void pixman_edge_init (pixman_edge_t *e, int bpp, pixman_fixed_t y_start, pixman_fixed_t x_top, pixman_fixed_t y_top, pixman_fixed_t x_bot, pixman_fixed_t y_bot); PIXMAN_API void pixman_line_fixed_edge_init (pixman_edge_t *e, int bpp, pixman_fixed_t y, const pixman_line_fixed_t *line, int x_off, int y_off); PIXMAN_API void pixman_rasterize_edges (pixman_image_t *image, pixman_edge_t *l, pixman_edge_t *r, pixman_fixed_t t, pixman_fixed_t b); PIXMAN_API void pixman_add_traps (pixman_image_t *image, int16_t x_off, int16_t y_off, int ntrap, const pixman_trap_t *traps); PIXMAN_API void pixman_add_trapezoids (pixman_image_t *image, int16_t x_off, int y_off, int ntraps, const pixman_trapezoid_t *traps); PIXMAN_API void pixman_rasterize_trapezoid (pixman_image_t *image, const pixman_trapezoid_t *trap, int x_off, int y_off); PIXMAN_API void pixman_composite_trapezoids (pixman_op_t op, pixman_image_t * src, pixman_image_t * dst, pixman_format_code_t mask_format, int x_src, int y_src, int x_dst, int y_dst, int n_traps, const pixman_trapezoid_t * traps); PIXMAN_API void pixman_composite_triangles (pixman_op_t op, pixman_image_t * src, pixman_image_t * dst, pixman_format_code_t mask_format, int x_src, int y_src, int x_dst, int y_dst, int n_tris, const pixman_triangle_t * tris); PIXMAN_API void pixman_add_triangles (pixman_image_t *image, int32_t x_off, int32_t y_off, int n_tris, const pixman_triangle_t *tris); PIXMAN_END_DECLS #endif /* PIXMAN_H__ */ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/rounding.txt0000664000175000017500000001127414712446423017005 0ustar00mattst88mattst88*** General notes about rounding Suppose a function is sampled at positions [k + o] where k is an integer and o is a fractional offset 0 <= o < 1. To round a value to the nearest sample, breaking ties by rounding up, we can do this: round(x) = floor(x - o + 0.5) + o That is, first subtract o to let us pretend that the samples are at integer coordinates, then add 0.5 and floor to round to nearest integer, then add the offset back in. To break ties by rounding down: round(x) = ceil(x - o - 0.5) + o or if we have an epsilon value: round(x) = floor(x - o + 0.5 - e) + o To always round *up* to the next sample: round_up(x) = ceil(x - o) + o To always round *down* to the previous sample: round_down(x) = floor(x - o) + o If a set of samples is stored in an array, you get from the sample position to an index by subtracting the position of the first sample in the array: index(s) = s - first_sample *** Application to pixman In pixman, images are sampled with o = 0.5, that is, pixels are located midways between integers. We usually break ties by rounding down (i.e., "round towards north-west"). -- NEAREST filtering: The NEAREST filter simply picks the closest pixel to the given position: round(x) = floor(x - 0.5 + 0.5 - e) + 0.5 = floor (x - e) + 0.5 The first sample of a pixman image has position 0.5, so to find the index in the pixel array, we have to subtract 0.5: floor (x - e) + 0.5 - 0.5 = floor (x - e). Therefore a 16.16 fixed-point image location is turned into a pixel value with NEAREST filtering by doing this: pixels[((y - e) >> 16) * stride + ((x - e) >> 16)] where stride is the number of pixels allocated per scanline and e = 0x0001. -- CONVOLUTION filtering: A convolution matrix is considered a sampling of a function f at values surrounding 0. For example, this convolution matrix: [a, b, c, d] is interpreted as the values of a function f: a = f(-1.5) b = f(-0.5) c = f(0.5) d = f(1.5) The sample offset in this case is o = 0.5 and the first sample has position s0 = -1.5. If the matrix is: [a, b, c, d, e] the sample offset is o = 0 and the first sample has position s0 = -2.0. In general we have s0 = (- width / 2.0 + 0.5). and o = frac (s0) To evaluate f at a position between the samples, we round to the closest sample, and then we subtract the position of the first sample to get the index in the matrix: f(t) = matrix[floor(t - o + 0.5) + o - s0] Note that in this case we break ties by rounding up. If we write s0 = m + o, where m is an integer, this is equivalent to f(t) = matrix[floor(t - o + 0.5) + o - (m + o)] = matrix[floor(t - o + 0.5 - m) + o - o] = matrix[floor(t - s0 + 0.5)] The convolution filter in pixman positions f such that 0 aligns with the given position x. For a given pixel x0 in the image, the closest sample of f is then computed by taking (x - x0) and rounding that to the closest index: i = floor ((x0 - x) - s0 + 0.5) To perform the convolution, we have to find the first pixel x0 whose corresponding sample has index 0. We can write x0 = k + 0.5, where k is an integer: 0 = floor(k + 0.5 - x - s0 + 0.5) = k + floor(1 - x - s0) = k - ceil(x + s0 - 1) = k - floor(x + s0 - e) = k - floor(x - (width - 1) / 2.0 - e) And so the final formula for the index k of x0 in the image is: k = floor(x - (width - 1) / 2.0 - e) Computing the result is then simply a matter of convolving all the pixels starting at k with all the samples in the matrix. --- SEPARABLE_CONVOLUTION For this filter, x is first rounded to one of n regularly spaced subpixel positions. This subpixel position determines which of n convolution matrices is being used. Then, as in a regular convolution filter, the first pixel to be used is determined: k = floor (x - (width - 1) / 2.0 - e) and then the image pixels starting there are convolved with the chosen matrix. If we write x = xi + frac, where xi is an integer, we get k = xi + floor (frac - (width - 1) / 2.0 - e) so the location of k relative to x is given by: (k + 0.5 - x) = xi + floor (frac - (width - 1) / 2.0 - e) + 0.5 - x = floor (frac - (width - 1) / 2.0 - e) + 0.5 - frac which means the contents of the matrix corresponding to (frac) should contain width samplings of the function, with the first sample at: floor (frac - (width - 1) / 2.0 - e) + 0.5 - frac = ceil (frac - width / 2.0 - 0.5) + 0.5 - frac This filter is called separable because each of the k x k convolution matrices is specified with two k-wide vectors, one for each dimension, where each entry in the matrix is computed as the product of the corresponding entries in the vectors. ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/pixman/solaris-hwcap.mapfile0000664000175000017500000000302714712446423020527 0ustar00mattst88mattst88############################################################################### # # Copyright 2009, Oracle and/or its affiliates. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice (including the next # paragraph) shall be included in all copies or substantial portions of the # Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # ############################################################################### # # Override the linker's detection of CMOV/MMX/SSE instructions so this # library isn't flagged as only usable on CPU's with those ISA's, since it # checks at runtime for availability before calling them hwcap_1 = V0x0 FPU OVERRIDE; ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/0000775000175000017500000000000014712446423014075 5ustar00mattst88mattst88././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/a1-trap-test.c0000664000175000017500000000357014712446423016470 0ustar00mattst88mattst88#include #include #include #include #include "utils.h" int main (int argc, char **argv) { #define WIDTH 20 #define HEIGHT 20 pixman_image_t *src_img; pixman_image_t *mask_img; pixman_image_t *dest_img; pixman_trap_t trap; pixman_color_t red = { 0xffff, 0x0000, 0x0000, 0xffff }; uint32_t *bits = malloc (WIDTH * HEIGHT * 4); uint32_t *mbits = malloc (WIDTH * HEIGHT); memset (mbits, 0, WIDTH * HEIGHT); memset (bits, 0xff, WIDTH * HEIGHT * 4); trap.top.l = pixman_double_to_fixed (0.5); trap.top.r = pixman_double_to_fixed (1.5); trap.top.y = pixman_double_to_fixed (0.5); trap.bot.l = pixman_double_to_fixed (0.5); trap.bot.r = pixman_double_to_fixed (1.5); trap.bot.y = pixman_double_to_fixed (1.5); mask_img = pixman_image_create_bits ( PIXMAN_a1, WIDTH, HEIGHT, mbits, WIDTH); src_img = pixman_image_create_solid_fill (&red); dest_img = pixman_image_create_bits ( PIXMAN_a8r8g8b8, WIDTH, HEIGHT, bits, WIDTH * 4); pixman_add_traps (mask_img, 0, 0, 1, &trap); pixman_image_composite (PIXMAN_OP_OVER, src_img, mask_img, dest_img, 0, 0, 0, 0, 0, 0, WIDTH, HEIGHT); assert (bits[0] == 0xffff0000); assert (bits[1] == 0xffffffff); assert (bits[1 * WIDTH + 0] == 0xffffffff); assert (bits[1 * WIDTH + 1] == 0xffffffff); /* The check-formats test depends on operator_name() and format_name() returning * these precise formats, so if those change, check-formats.c must be updated too. */ assert ( strcmp (operator_name (PIXMAN_OP_DISJOINT_OVER), "PIXMAN_OP_DISJOINT_OVER") == 0); assert ( strcmp (format_name (PIXMAN_r5g6b5), "r5g6b5") == 0); free (bits); free (mbits); pixman_image_unref (mask_img); pixman_image_unref (dest_img); pixman_image_unref (src_img); return 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/affine-bench.c0000664000175000017500000003165414712446423016557 0ustar00mattst88mattst88/* * Copyright Âİ 2014 RISC OS Open Ltd * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of the copyright holders not be used in * advertising or publicity pertaining to distribution of the software without * specific, written prior permission. The copyright holders make no * representations about the suitability of this software for any purpose. It * is provided "as is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS * SOFTWARE. * * Author: Ben Avison (bavison@riscosopen.org) */ #include #include #include #include #include #include "utils.h" #ifdef HAVE_GETTIMEOFDAY #include #else #include #endif #define WIDTH 1920 #define HEIGHT 1080 /* How much data to read to flush all cached data to RAM */ #define MAX_L2CACHE_SIZE (8 * 1024 * 1024) #define PAGE_SIZE (4 * 1024) struct bench_info { pixman_op_t op; pixman_transform_t transform; pixman_image_t *src_image; pixman_image_t *mask_image; pixman_image_t *dest_image; int32_t src_x; int32_t src_y; }; typedef struct bench_info bench_info_t; struct box_48_16 { pixman_fixed_48_16_t x1; pixman_fixed_48_16_t y1; pixman_fixed_48_16_t x2; pixman_fixed_48_16_t y2; }; typedef struct box_48_16 box_48_16_t; /* This function is copied verbatim from pixman.c. */ static pixman_bool_t compute_transformed_extents (pixman_transform_t *transform, const pixman_box32_t *extents, box_48_16_t *transformed) { pixman_fixed_48_16_t tx1, ty1, tx2, ty2; pixman_fixed_t x1, y1, x2, y2; int i; x1 = pixman_int_to_fixed (extents->x1) + pixman_fixed_1 / 2; y1 = pixman_int_to_fixed (extents->y1) + pixman_fixed_1 / 2; x2 = pixman_int_to_fixed (extents->x2) - pixman_fixed_1 / 2; y2 = pixman_int_to_fixed (extents->y2) - pixman_fixed_1 / 2; if (!transform) { transformed->x1 = x1; transformed->y1 = y1; transformed->x2 = x2; transformed->y2 = y2; return TRUE; } tx1 = ty1 = INT64_MAX; tx2 = ty2 = INT64_MIN; for (i = 0; i < 4; ++i) { pixman_fixed_48_16_t tx, ty; pixman_vector_t v; v.vector[0] = (i & 0x01)? x1 : x2; v.vector[1] = (i & 0x02)? y1 : y2; v.vector[2] = pixman_fixed_1; if (!pixman_transform_point (transform, &v)) return FALSE; tx = (pixman_fixed_48_16_t)v.vector[0]; ty = (pixman_fixed_48_16_t)v.vector[1]; if (tx < tx1) tx1 = tx; if (ty < ty1) ty1 = ty; if (tx > tx2) tx2 = tx; if (ty > ty2) ty2 = ty; } transformed->x1 = tx1; transformed->y1 = ty1; transformed->x2 = tx2; transformed->y2 = ty2; return TRUE; } static void create_image (uint32_t width, uint32_t height, pixman_format_code_t format, pixman_filter_t filter, uint32_t **bits, pixman_image_t **image) { uint32_t stride = (width * PIXMAN_FORMAT_BPP (format) + 31) / 32 * 4; *bits = aligned_malloc (PAGE_SIZE, stride * height); memset (*bits, 0xCC, stride * height); *image = pixman_image_create_bits (format, width, height, *bits, stride); pixman_image_set_repeat (*image, PIXMAN_REPEAT_NORMAL); pixman_image_set_filter (*image, filter, NULL, 0); } /* This needs to match the shortest cacheline length we expect to encounter */ #define CACHE_CLEAN_INCREMENT 32 static void flush_cache (void) { static const char clean_space[MAX_L2CACHE_SIZE]; volatile const char *x = clean_space; const char *clean_end = clean_space + sizeof clean_space; while (x < clean_end) { (void) *x; x += CACHE_CLEAN_INCREMENT; } } /* Obtain current time in microseconds modulo 2^32 */ uint32_t gettimei (void) { #ifdef HAVE_GETTIMEOFDAY struct timeval tv; gettimeofday (&tv, NULL); return tv.tv_sec * 1000000 + tv.tv_usec; #else return (uint64_t) clock () * 1000000 / CLOCKS_PER_SEC; #endif } static void pixman_image_composite_wrapper (const pixman_composite_info_t *info) { pixman_image_composite (info->op, info->src_image, info->mask_image, info->dest_image, info->src_x, info->src_y, info->mask_x, info->mask_y, info->dest_x, info->dest_y, info->width, info->height); } static void pixman_image_composite_empty (const pixman_composite_info_t *info) { pixman_image_composite (info->op, info->src_image, info->mask_image, info->dest_image, info->src_x, info->src_y, info->mask_x, info->mask_y, info->dest_x, info->dest_y, 1, 1); } static void bench (const bench_info_t *bi, uint32_t max_n, uint32_t max_time, uint32_t *ret_n, uint32_t *ret_time, void (*func) (const pixman_composite_info_t *info)) { uint32_t n = 0; uint32_t t0; uint32_t t1; uint32_t x = 0; pixman_transform_t t; pixman_composite_info_t info; t = bi->transform; info.op = bi->op; info.src_image = bi->src_image; info.mask_image = bi->mask_image; info.dest_image = bi->dest_image; info.src_x = 0; info.src_y = 0; info.mask_x = 0; info.mask_y = 0; /* info.dest_x set below */ info.dest_y = 0; info.width = WIDTH; info.height = HEIGHT; t0 = gettimei (); do { if (++x >= 64) x = 0; info.dest_x = 63 - x; t.matrix[0][2] = pixman_int_to_fixed (bi->src_x + x); t.matrix[1][2] = pixman_int_to_fixed (bi->src_y); pixman_image_set_transform (bi->src_image, &t); if (bi->mask_image) pixman_image_set_transform (bi->mask_image, &t); func (&info); t1 = gettimei (); } while (++n < max_n && (t1 - t0) < max_time); if (ret_n) *ret_n = n; *ret_time = t1 - t0; } int parse_fixed_argument (char *arg, pixman_fixed_t *value) { char *tailptr; *value = pixman_double_to_fixed (strtod (arg, &tailptr)); return *tailptr == '\0'; } int parse_arguments (int argc, char *argv[], pixman_transform_t *t, pixman_op_t *op, pixman_format_code_t *src_format, pixman_format_code_t *mask_format, pixman_format_code_t *dest_format) { if (!parse_fixed_argument (*argv, &t->matrix[0][0])) return 0; if (*++argv == NULL) return 1; if (!parse_fixed_argument (*argv, &t->matrix[0][1])) return 0; if (*++argv == NULL) return 1; if (!parse_fixed_argument (*argv, &t->matrix[1][0])) return 0; if (*++argv == NULL) return 1; if (!parse_fixed_argument (*argv, &t->matrix[1][1])) return 0; if (*++argv == NULL) return 1; *op = operator_from_string (*argv); if (*op == PIXMAN_OP_NONE) return 0; if (*++argv == NULL) return 1; *src_format = format_from_string (*argv); if (*src_format == PIXMAN_null) return 0; ++argv; if (argv[0] && argv[1]) { *mask_format = format_from_string (*argv); if (*mask_format == PIXMAN_null) return 0; ++argv; } if (*argv) { *dest_format = format_from_string (*argv); if (*dest_format == PIXMAN_null) return 0; } return 1; } static void run_benchmark (const bench_info_t *bi) { uint32_t n; /* number of iterations in at least 5 seconds */ uint32_t t1; /* time taken to do n iterations, microseconds */ uint32_t t2; /* calling overhead for n iterations, microseconds */ flush_cache (); bench (bi, UINT32_MAX, 5000000, &n, &t1, pixman_image_composite_wrapper); bench (bi, n, UINT32_MAX, NULL, &t2, pixman_image_composite_empty); /* The result indicates the output rate in megapixels/second */ printf ("%6.2f\n", (double) n * WIDTH * HEIGHT / (t1 - t2)); } int main (int argc, char *argv[]) { bench_info_t binfo; pixman_filter_t filter = PIXMAN_FILTER_NEAREST; pixman_format_code_t src_format = PIXMAN_a8r8g8b8; pixman_format_code_t mask_format = 0; pixman_format_code_t dest_format = PIXMAN_a8r8g8b8; pixman_box32_t dest_box = { 0, 0, WIDTH, HEIGHT }; box_48_16_t transformed = { 0 }; int32_t xmin, ymin, xmax, ymax; uint32_t *src, *mask, *dest; binfo.op = PIXMAN_OP_SRC; binfo.mask_image = NULL; pixman_transform_init_identity (&binfo.transform); ++argv; if (*argv && (*argv)[0] == '-' && (*argv)[1] == 'n') { filter = PIXMAN_FILTER_NEAREST; ++argv; --argc; } if (*argv && (*argv)[0] == '-' && (*argv)[1] == 'b') { filter = PIXMAN_FILTER_BILINEAR; ++argv; --argc; } if (argc == 1 || !parse_arguments (argc, argv, &binfo.transform, &binfo.op, &src_format, &mask_format, &dest_format)) { printf ("Usage: affine-bench [-n] [-b] axx [axy] [ayx] [ayy] [combine type]\n"); printf (" [src format] [mask format] [dest format]\n"); printf (" -n : nearest scaling (default)\n"); printf (" -b : bilinear scaling\n"); printf (" axx : x_out:x_in factor\n"); printf (" axy : x_out:y_in factor (default 0)\n"); printf (" ayx : y_out:x_in factor (default 0)\n"); printf (" ayy : y_out:y_in factor (default 1)\n"); printf (" combine type : src, over, in etc (default src)\n"); printf (" src format : a8r8g8b8, r5g6b5 etc (default a8r8g8b8)\n"); printf (" mask format : as for src format, but no mask used if omitted\n"); printf (" dest format : as for src format (default a8r8g8b8)\n"); printf ("The output is a single number in megapixels/second.\n"); return EXIT_FAILURE; } /* Compute required extents for source and mask image so they qualify * for COVER fast paths and get the flags in pixman.c:analyze_extent(). * These computations are for FAST_PATH_SAMPLES_COVER_CLIP_BILINEAR, * but at the same time they also allow COVER_CLIP_NEAREST. */ compute_transformed_extents (&binfo.transform, &dest_box, &transformed); xmin = pixman_fixed_to_int (transformed.x1 - pixman_fixed_1 / 2); ymin = pixman_fixed_to_int (transformed.y1 - pixman_fixed_1 / 2); xmax = pixman_fixed_to_int (transformed.x2 + pixman_fixed_1 / 2); ymax = pixman_fixed_to_int (transformed.y2 + pixman_fixed_1 / 2); /* Note: * The upper limits can be reduced to the following when fetchers * are guaranteed to not access pixels with zero weight. This concerns * particularly all bilinear samplers. * * xmax = pixman_fixed_to_int (transformed.x2 + pixman_fixed_1 / 2 - pixman_fixed_e); * ymax = pixman_fixed_to_int (transformed.y2 + pixman_fixed_1 / 2 - pixman_fixed_e); * This is equivalent to subtracting 0.5 and rounding up, rather than * subtracting 0.5, rounding down and adding 1. */ binfo.src_x = -xmin; binfo.src_y = -ymin; /* Always over-allocate width by 64 pixels for all src, mask and dst, * so that we can iterate over an x-offset 0..63 in bench (). * This is similar to lowlevel-blt-bench, which uses the same method * to hit different cacheline misalignments. */ create_image (xmax - xmin + 64, ymax - ymin + 1, src_format, filter, &src, &binfo.src_image); if (mask_format) { create_image (xmax - xmin + 64, ymax - ymin + 1, mask_format, filter, &mask, &binfo.mask_image); if ((PIXMAN_FORMAT_R(mask_format) || PIXMAN_FORMAT_G(mask_format) || PIXMAN_FORMAT_B(mask_format))) { pixman_image_set_component_alpha (binfo.mask_image, 1); } } create_image (WIDTH + 64, HEIGHT, dest_format, filter, &dest, &binfo.dest_image); run_benchmark (&binfo); return EXIT_SUCCESS; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/affine-test.c0000664000175000017500000002130314712446423016445 0ustar00mattst88mattst88/* * Test program, which can detect some problems with affine transformations * in pixman. Testing is done by running lots of random SRC and OVER * compositing operations a8r8g8b8, x8a8r8g8b8, r5g6b5 and a8 color formats * with random scaled, rotated and translated transforms. * * Script 'fuzzer-find-diff.pl' can be used to narrow down the problem in * the case of test failure. */ #include #include #include #include "utils.h" #define MAX_SRC_WIDTH 16 #define MAX_SRC_HEIGHT 16 #define MAX_DST_WIDTH 16 #define MAX_DST_HEIGHT 16 #define MAX_STRIDE 4 /* * Composite operation with pseudorandom images */ uint32_t test_composite (int testnum, int verbose) { int i; pixman_image_t * src_img; pixman_image_t * dst_img; pixman_transform_t transform; pixman_region16_t clip; int src_width, src_height; int dst_width, dst_height; int src_stride, dst_stride; int src_x, src_y; int dst_x, dst_y; int src_bpp; int dst_bpp; int w, h; pixman_fixed_t scale_x = 65536, scale_y = 65536; pixman_fixed_t translate_x = 0, translate_y = 0; pixman_op_t op; pixman_repeat_t repeat = PIXMAN_REPEAT_NONE; pixman_format_code_t src_fmt, dst_fmt; uint32_t * srcbuf; uint32_t * dstbuf; uint32_t crc32; FLOAT_REGS_CORRUPTION_DETECTOR_START (); prng_srand (testnum); src_bpp = (prng_rand_n (2) == 0) ? 2 : 4; dst_bpp = (prng_rand_n (2) == 0) ? 2 : 4; op = (prng_rand_n (2) == 0) ? PIXMAN_OP_SRC : PIXMAN_OP_OVER; src_width = prng_rand_n (MAX_SRC_WIDTH) + 1; src_height = prng_rand_n (MAX_SRC_HEIGHT) + 1; dst_width = prng_rand_n (MAX_DST_WIDTH) + 1; dst_height = prng_rand_n (MAX_DST_HEIGHT) + 1; src_stride = src_width * src_bpp + prng_rand_n (MAX_STRIDE) * src_bpp; dst_stride = dst_width * dst_bpp + prng_rand_n (MAX_STRIDE) * dst_bpp; if (src_stride & 3) src_stride += 2; if (dst_stride & 3) dst_stride += 2; src_x = -(src_width / 4) + prng_rand_n (src_width * 3 / 2); src_y = -(src_height / 4) + prng_rand_n (src_height * 3 / 2); dst_x = -(dst_width / 4) + prng_rand_n (dst_width * 3 / 2); dst_y = -(dst_height / 4) + prng_rand_n (dst_height * 3 / 2); w = prng_rand_n (dst_width * 3 / 2 - dst_x); h = prng_rand_n (dst_height * 3 / 2 - dst_y); srcbuf = (uint32_t *)malloc (src_stride * src_height); dstbuf = (uint32_t *)malloc (dst_stride * dst_height); prng_randmemset (srcbuf, src_stride * src_height, 0); prng_randmemset (dstbuf, dst_stride * dst_height, 0); if (prng_rand_n (2) == 0) { srcbuf += (src_stride / 4) * (src_height - 1); src_stride = - src_stride; } if (prng_rand_n (2) == 0) { dstbuf += (dst_stride / 4) * (dst_height - 1); dst_stride = - dst_stride; } src_fmt = src_bpp == 4 ? (prng_rand_n (2) == 0 ? PIXMAN_a8r8g8b8 : PIXMAN_x8r8g8b8) : PIXMAN_r5g6b5; dst_fmt = dst_bpp == 4 ? (prng_rand_n (2) == 0 ? PIXMAN_a8r8g8b8 : PIXMAN_x8r8g8b8) : PIXMAN_r5g6b5; src_img = pixman_image_create_bits ( src_fmt, src_width, src_height, srcbuf, src_stride); dst_img = pixman_image_create_bits ( dst_fmt, dst_width, dst_height, dstbuf, dst_stride); image_endian_swap (src_img); image_endian_swap (dst_img); pixman_transform_init_identity (&transform); if (prng_rand_n (3) > 0) { scale_x = -65536 * 3 + prng_rand_n (65536 * 6); if (prng_rand_n (2)) scale_y = -65536 * 3 + prng_rand_n (65536 * 6); else scale_y = scale_x; pixman_transform_init_scale (&transform, scale_x, scale_y); } if (prng_rand_n (3) > 0) { translate_x = -65536 * 3 + prng_rand_n (6 * 65536); if (prng_rand_n (2)) translate_y = -65536 * 3 + prng_rand_n (6 * 65536); else translate_y = translate_x; pixman_transform_translate (&transform, NULL, translate_x, translate_y); } if (prng_rand_n (4) > 0) { int c, s, tx = 0, ty = 0; switch (prng_rand_n (4)) { case 0: /* 90 degrees */ c = 0; s = pixman_fixed_1; tx = pixman_int_to_fixed (MAX_SRC_HEIGHT); break; case 1: /* 180 degrees */ c = -pixman_fixed_1; s = 0; tx = pixman_int_to_fixed (MAX_SRC_WIDTH); ty = pixman_int_to_fixed (MAX_SRC_HEIGHT); break; case 2: /* 270 degrees */ c = 0; s = -pixman_fixed_1; ty = pixman_int_to_fixed (MAX_SRC_WIDTH); break; default: /* arbitrary rotation */ c = prng_rand_n (2 * 65536) - 65536; s = prng_rand_n (2 * 65536) - 65536; break; } pixman_transform_rotate (&transform, NULL, c, s); pixman_transform_translate (&transform, NULL, tx, ty); } if (prng_rand_n (8) == 0) { /* Flip random bits */ int maxflipcount = 8; while (maxflipcount--) { int i = prng_rand_n (2); int j = prng_rand_n (3); int bitnum = prng_rand_n (32); transform.matrix[i][j] ^= 1U << bitnum; if (prng_rand_n (2)) break; } } pixman_image_set_transform (src_img, &transform); switch (prng_rand_n (4)) { case 0: repeat = PIXMAN_REPEAT_NONE; break; case 1: repeat = PIXMAN_REPEAT_NORMAL; break; case 2: repeat = PIXMAN_REPEAT_PAD; break; case 3: repeat = PIXMAN_REPEAT_REFLECT; break; default: break; } pixman_image_set_repeat (src_img, repeat); if (prng_rand_n (2)) pixman_image_set_filter (src_img, PIXMAN_FILTER_NEAREST, NULL, 0); else pixman_image_set_filter (src_img, PIXMAN_FILTER_BILINEAR, NULL, 0); if (verbose) { #define M(r,c) \ transform.matrix[r][c] printf ("src_fmt=%s, dst_fmt=%s\n", format_name (src_fmt), format_name (dst_fmt)); printf ("op=%s, repeat=%d, transform=\n", operator_name (op), repeat); printf (" { { { 0x%08x, 0x%08x, 0x%08x },\n" " { 0x%08x, 0x%08x, 0x%08x },\n" " { 0x%08x, 0x%08x, 0x%08x },\n" " } };\n", M(0,0), M(0,1), M(0,2), M(1,0), M(1,1), M(1,2), M(2,0), M(2,1), M(2,2)); printf ("src_width=%d, src_height=%d, dst_width=%d, dst_height=%d\n", src_width, src_height, dst_width, dst_height); printf ("src_x=%d, src_y=%d, dst_x=%d, dst_y=%d\n", src_x, src_y, dst_x, dst_y); printf ("w=%d, h=%d\n", w, h); } if (prng_rand_n (8) == 0) { pixman_box16_t clip_boxes[2]; int n = prng_rand_n (2) + 1; for (i = 0; i < n; i++) { clip_boxes[i].x1 = prng_rand_n (src_width); clip_boxes[i].y1 = prng_rand_n (src_height); clip_boxes[i].x2 = clip_boxes[i].x1 + prng_rand_n (src_width - clip_boxes[i].x1); clip_boxes[i].y2 = clip_boxes[i].y1 + prng_rand_n (src_height - clip_boxes[i].y1); if (verbose) { printf ("source clip box: [%d,%d-%d,%d]\n", clip_boxes[i].x1, clip_boxes[i].y1, clip_boxes[i].x2, clip_boxes[i].y2); } } pixman_region_init_rects (&clip, clip_boxes, n); pixman_image_set_clip_region (src_img, &clip); pixman_image_set_source_clipping (src_img, 1); pixman_region_fini (&clip); } if (prng_rand_n (8) == 0) { pixman_box16_t clip_boxes[2]; int n = prng_rand_n (2) + 1; for (i = 0; i < n; i++) { clip_boxes[i].x1 = prng_rand_n (dst_width); clip_boxes[i].y1 = prng_rand_n (dst_height); clip_boxes[i].x2 = clip_boxes[i].x1 + prng_rand_n (dst_width - clip_boxes[i].x1); clip_boxes[i].y2 = clip_boxes[i].y1 + prng_rand_n (dst_height - clip_boxes[i].y1); if (verbose) { printf ("destination clip box: [%d,%d-%d,%d]\n", clip_boxes[i].x1, clip_boxes[i].y1, clip_boxes[i].x2, clip_boxes[i].y2); } } pixman_region_init_rects (&clip, clip_boxes, n); pixman_image_set_clip_region (dst_img, &clip); pixman_region_fini (&clip); } pixman_image_composite (op, src_img, NULL, dst_img, src_x, src_y, 0, 0, dst_x, dst_y, w, h); crc32 = compute_crc32_for_image (0, dst_img); if (verbose) print_image (dst_img); pixman_image_unref (src_img); pixman_image_unref (dst_img); if (src_stride < 0) srcbuf += (src_stride / 4) * (src_height - 1); if (dst_stride < 0) dstbuf += (dst_stride / 4) * (dst_height - 1); free (srcbuf); free (dstbuf); FLOAT_REGS_CORRUPTION_DETECTOR_FINISH (); return crc32; } #if BILINEAR_INTERPOLATION_BITS == 7 #define CHECKSUM 0xBE724CFE #elif BILINEAR_INTERPOLATION_BITS == 4 #define CHECKSUM 0x79BBE501 #else #define CHECKSUM 0x00000000 #endif int main (int argc, const char *argv[]) { pixman_disable_out_of_bounds_workaround (); return fuzzer_test_main ("affine", 8000000, CHECKSUM, test_composite, argc, argv); } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/alpha-loop.c0000664000175000017500000000177614712446423016310 0ustar00mattst88mattst88#include #include #include "utils.h" #define WIDTH 400 #define HEIGHT 200 int main (int argc, char **argv) { pixman_image_t *a, *d, *s; uint8_t *alpha; uint32_t *src, *dest; prng_srand (0); alpha = make_random_bytes (WIDTH * HEIGHT); src = (uint32_t *)make_random_bytes (WIDTH * HEIGHT * 4); dest = (uint32_t *)make_random_bytes (WIDTH * HEIGHT * 4); a = pixman_image_create_bits (PIXMAN_a8, WIDTH, HEIGHT, (uint32_t *)alpha, WIDTH); d = pixman_image_create_bits (PIXMAN_a8r8g8b8, WIDTH, HEIGHT, dest, WIDTH * 4); s = pixman_image_create_bits (PIXMAN_a2r10g10b10, WIDTH, HEIGHT, src, WIDTH * 4); fail_after (5, "Infinite loop detected: 5 seconds without progress\n"); pixman_image_set_alpha_map (s, a, 0, 0); pixman_image_set_alpha_map (a, s, 0, 0); pixman_image_composite (PIXMAN_OP_SRC, s, NULL, d, 0, 0, 0, 0, 0, 0, WIDTH, HEIGHT); pixman_image_unref (a); pixman_image_unref (d); pixman_image_unref (s); return 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/alphamap.c0000664000175000017500000002014114712446423016022 0ustar00mattst88mattst88#include #include #include "utils.h" #define WIDTH 48 #define HEIGHT 48 static const pixman_format_code_t formats[] = { PIXMAN_a8r8g8b8, PIXMAN_a2r10g10b10, PIXMAN_a4r4g4b4, PIXMAN_a8, PIXMAN_rgba_float, }; static const pixman_format_code_t alpha_formats[] = { PIXMAN_null, PIXMAN_a8, PIXMAN_a2r10g10b10, PIXMAN_a4r4g4b4, PIXMAN_rgba_float, }; static const int origins[] = { 0, 10, -100 }; static void on_destroy (pixman_image_t *image, void *data) { uint32_t *bits = pixman_image_get_data (image); fence_free (bits); } static pixman_image_t * make_image (pixman_format_code_t format) { uint32_t *bits; uint8_t bpp = PIXMAN_FORMAT_BPP (format) / 8; pixman_image_t *image; if (format != PIXMAN_rgba_float) bits = (uint32_t *)make_random_bytes (WIDTH * HEIGHT * bpp); else bits = (uint32_t *)make_random_floats (WIDTH * HEIGHT * bpp); image = pixman_image_create_bits (format, WIDTH, HEIGHT, bits, WIDTH * bpp); if (image && bits) pixman_image_set_destroy_function (image, on_destroy, NULL); return image; } static float get_alpha (pixman_image_t *image, int x, int y, int orig_x, int orig_y) { uint8_t *bits; uint32_t r; if (image->common.alpha_map) { if (x - orig_x >= 0 && x - orig_x < WIDTH && y - orig_y >= 0 && y - orig_y < HEIGHT) { image = (pixman_image_t *)image->common.alpha_map; x -= orig_x; y -= orig_y; } else { return 0.f; } } bits = (uint8_t *)image->bits.bits; if (image->bits.format == PIXMAN_a8) { r = bits[y * WIDTH + x]; return r / 255.f; } else if (image->bits.format == PIXMAN_a2r10g10b10) { r = ((uint32_t *)bits)[y * WIDTH + x] >> 30; return r / 3.f; } else if (image->bits.format == PIXMAN_a8r8g8b8) { r = ((uint32_t *)bits)[y * WIDTH + x] >> 24; return r / 255.f; } else if (image->bits.format == PIXMAN_a4r4g4b4) { r = ((uint16_t *)bits)[y * WIDTH + x] >> 12; return r / 15.f; } else if (image->bits.format == PIXMAN_rgba_float) { return ((float *)bits)[y * WIDTH * 4 + x * 4 + 3]; } else { assert (0); return 0.f; } } static uint16_t get_red (pixman_image_t *image, int x, int y, int orig_x, int orig_y) { uint8_t *bits; uint16_t r; bits = (uint8_t *)image->bits.bits; if (image->bits.format == PIXMAN_a8) { r = 0x00; } else if (image->bits.format == PIXMAN_a2r10g10b10) { r = ((uint32_t *)bits)[y * WIDTH + x] >> 14; r &= 0xffc0; r |= (r >> 10); } else if (image->bits.format == PIXMAN_a8r8g8b8) { r = ((uint32_t *)bits)[y * WIDTH + x] >> 16; r &= 0xff; r |= r << 8; } else if (image->bits.format == PIXMAN_a4r4g4b4) { r = ((uint16_t *)bits)[y * WIDTH + x] >> 8; r &= 0xf; r |= r << 4; r |= r << 8; } else if (image->bits.format == PIXMAN_rgba_float) { double tmp = ((float *)bits)[y * WIDTH * 4 + x * 4]; return tmp * 65535.; } else { assert (0); } return r; } static float get_alpha_err(pixman_format_code_t sf, pixman_format_code_t saf, pixman_format_code_t df, pixman_format_code_t daf) { pixman_format_code_t s = saf != PIXMAN_null ? saf : sf; pixman_format_code_t d = daf != PIXMAN_null ? daf : df; /* There are cases where we go through the 8 bit compositing * path even with 10bpc and higher formats. */ if (PIXMAN_FORMAT_A(s) == PIXMAN_FORMAT_A(d)) return 1.f / 255.f; else if (PIXMAN_FORMAT_A(s) > PIXMAN_FORMAT_A(d)) return 1.f / ((1 << PIXMAN_FORMAT_A(d)) - 1); else return 1.f / ((1 << PIXMAN_FORMAT_A(s)) - 1); } static int run_test (int s, int d, int sa, int da, int soff, int doff) { pixman_format_code_t sf = formats[s]; pixman_format_code_t df = formats[d]; pixman_format_code_t saf = alpha_formats[sa]; pixman_format_code_t daf = alpha_formats[da]; pixman_image_t *src, *dst, *orig_dst, *alpha, *orig_alpha; pixman_transform_t t1; int j, k; int n_red_bits; soff = origins[soff]; doff = origins[doff]; n_red_bits = PIXMAN_FORMAT_R (df); /* Source */ src = make_image (sf); if (saf != PIXMAN_null) { alpha = make_image (saf); pixman_image_set_alpha_map (src, alpha, soff, soff); pixman_image_unref (alpha); } /* Destination */ orig_dst = make_image (df); dst = make_image (df); pixman_image_composite (PIXMAN_OP_SRC, orig_dst, NULL, dst, 0, 0, 0, 0, 0, 0, WIDTH, HEIGHT); if (daf != PIXMAN_null) { orig_alpha = make_image (daf); alpha = make_image (daf); pixman_image_composite (PIXMAN_OP_SRC, orig_alpha, NULL, alpha, 0, 0, 0, 0, 0, 0, WIDTH, HEIGHT); pixman_image_set_alpha_map (orig_dst, orig_alpha, doff, doff); pixman_image_set_alpha_map (dst, alpha, doff, doff); pixman_image_unref (orig_alpha); pixman_image_unref (alpha); } /* Transformations, repeats and filters on destinations should be ignored, * so just set some random ones. */ pixman_transform_init_identity (&t1); pixman_transform_scale (&t1, NULL, pixman_int_to_fixed (100), pixman_int_to_fixed (11)); pixman_transform_rotate (&t1, NULL, pixman_double_to_fixed (0.5), pixman_double_to_fixed (0.11)); pixman_transform_translate (&t1, NULL, pixman_int_to_fixed (11), pixman_int_to_fixed (17)); pixman_image_set_transform (dst, &t1); pixman_image_set_filter (dst, PIXMAN_FILTER_BILINEAR, NULL, 0); pixman_image_set_repeat (dst, PIXMAN_REPEAT_REFLECT); pixman_image_composite (PIXMAN_OP_ADD, src, NULL, dst, 0, 0, 0, 0, 0, 0, WIDTH, HEIGHT); for (j = MAX (doff, 0); j < MIN (HEIGHT, HEIGHT + doff); ++j) { for (k = MAX (doff, 0); k < MIN (WIDTH, WIDTH + doff); ++k) { float sa, da, oda, refa; uint16_t sr, dr, odr, refr; float err; err = get_alpha_err(sf, saf, df, daf); sa = get_alpha (src, k, j, soff, soff); da = get_alpha (dst, k, j, doff, doff); oda = get_alpha (orig_dst, k, j, doff, doff); if (sa + oda > 1.f) refa = 1.f; else refa = sa + oda; if (da - err > refa || da + err < refa) { printf ("\nWrong alpha value at (%d, %d). Should be %g; got %g. Source was %g, original dest was %g\n", k, j, refa, da, sa, oda); printf ("src: %s, alpha: %s, origin %d %d\ndst: %s, alpha: %s, origin: %d %d\n\n", format_name (sf), format_name (saf), soff, soff, format_name (df), format_name (daf), doff, doff); return 1; } /* There are cases where we go through the 8 bit compositing * path even with 10bpc formats. This results in incorrect * results here, so only do the red check for narrow formats */ if (n_red_bits <= 8) { sr = get_red (src, k, j, soff, soff); dr = get_red (dst, k, j, doff, doff); odr = get_red (orig_dst, k, j, doff, doff); if (sr + odr > 0xffff) refr = 0xffff; else refr = sr + odr; if (abs ((dr >> (16 - n_red_bits)) - (refr >> (16 - n_red_bits))) > 1) { printf ("%d red bits\n", n_red_bits); printf ("\nWrong red value at (%d, %d). Should be 0x%x; got 0x%x. Source was 0x%x, original dest was 0x%x\n", k, j, refr, dr, sr, odr); printf ("src: %s, alpha: %s, origin %d %d\ndst: %s, alpha: %s, origin: %d %d\n\n", format_name (sf), format_name (saf), soff, soff, format_name (df), format_name (daf), doff, doff); return 1; } } } } pixman_image_set_alpha_map (src, NULL, 0, 0); pixman_image_set_alpha_map (dst, NULL, 0, 0); pixman_image_set_alpha_map (orig_dst, NULL, 0, 0); pixman_image_unref (src); pixman_image_unref (dst); pixman_image_unref (orig_dst); return 0; } int main (int argc, char **argv) { int i, j, a, b, x, y; prng_srand (0); for (i = 0; i < ARRAY_LENGTH (formats); ++i) { for (j = 0; j < ARRAY_LENGTH (formats); ++j) { for (a = 0; a < ARRAY_LENGTH (alpha_formats); ++a) { for (b = 0; b < ARRAY_LENGTH (alpha_formats); ++b) { for (x = 0; x < ARRAY_LENGTH (origins); ++x) { for (y = 0; y < ARRAY_LENGTH (origins); ++y) { if (run_test (i, j, a, b, x, y) != 0) return 1; } } } } } } return 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/blitters-test.c0000664000175000017500000002355314712446423017056 0ustar00mattst88mattst88/* * Test program, which stresses the use of different color formats and * compositing operations. * * Script 'fuzzer-find-diff.pl' can be used to narrow down the problem in * the case of test failure. */ #include #include #include "utils.h" static pixman_indexed_t rgb_palette[9]; static pixman_indexed_t y_palette[9]; /* The first eight format in the list are by far the most widely * used formats, so we test those more than the others */ #define N_MOST_LIKELY_FORMATS 8 /* Create random image for testing purposes */ static pixman_image_t * create_random_image (pixman_format_code_t *allowed_formats, int max_width, int max_height, int max_extra_stride, pixman_format_code_t *used_fmt) { int n = 0, width, height, stride; pixman_format_code_t fmt; uint32_t *buf; pixman_image_t *img; while (allowed_formats[n] != PIXMAN_null) n++; if (n > N_MOST_LIKELY_FORMATS && prng_rand_n (4) != 0) n = N_MOST_LIKELY_FORMATS; fmt = allowed_formats[prng_rand_n (n)]; width = prng_rand_n (max_width) + 1; height = prng_rand_n (max_height) + 1; stride = (width * PIXMAN_FORMAT_BPP (fmt) + 7) / 8 + prng_rand_n (max_extra_stride + 1); stride = (stride + 3) & ~3; /* do the allocation */ buf = aligned_malloc (64, stride * height); if (prng_rand_n (4) == 0) { /* uniform distribution */ prng_randmemset (buf, stride * height, 0); } else { /* significantly increased probability for 0x00 and 0xFF */ prng_randmemset (buf, stride * height, RANDMEMSET_MORE_00_AND_FF); } /* test negative stride */ if (prng_rand_n (4) == 0) { buf += (stride / 4) * (height - 1); stride = - stride; } img = pixman_image_create_bits (fmt, width, height, buf, stride); if (PIXMAN_FORMAT_TYPE (fmt) == PIXMAN_TYPE_COLOR) { pixman_image_set_indexed (img, &(rgb_palette[PIXMAN_FORMAT_BPP (fmt)])); } else if (PIXMAN_FORMAT_TYPE (fmt) == PIXMAN_TYPE_GRAY) { pixman_image_set_indexed (img, &(y_palette[PIXMAN_FORMAT_BPP (fmt)])); } if (prng_rand_n (16) == 0) pixman_image_set_filter (img, PIXMAN_FILTER_BILINEAR, NULL, 0); image_endian_swap (img); if (used_fmt) *used_fmt = fmt; return img; } /* Free random image, and optionally update crc32 based on its data */ static uint32_t free_random_image (uint32_t initcrc, pixman_image_t *img, pixman_format_code_t fmt) { uint32_t crc32 = 0; uint32_t *data = pixman_image_get_data (img); if (fmt != PIXMAN_null) crc32 = compute_crc32_for_image (initcrc, img); if (img->bits.rowstride < 0) data += img->bits.rowstride * (img->bits.height - 1); pixman_image_unref (img); free (data); return crc32; } static pixman_op_t op_list[] = { PIXMAN_OP_SRC, PIXMAN_OP_OVER, PIXMAN_OP_ADD, PIXMAN_OP_CLEAR, PIXMAN_OP_SRC, PIXMAN_OP_DST, PIXMAN_OP_OVER, PIXMAN_OP_OVER_REVERSE, PIXMAN_OP_IN, PIXMAN_OP_IN_REVERSE, PIXMAN_OP_OUT, PIXMAN_OP_OUT_REVERSE, PIXMAN_OP_ATOP, PIXMAN_OP_ATOP_REVERSE, PIXMAN_OP_XOR, PIXMAN_OP_ADD, PIXMAN_OP_MULTIPLY, PIXMAN_OP_SCREEN, PIXMAN_OP_OVERLAY, PIXMAN_OP_DARKEN, PIXMAN_OP_LIGHTEN, PIXMAN_OP_HARD_LIGHT, PIXMAN_OP_DIFFERENCE, PIXMAN_OP_EXCLUSION, #if 0 /* these use floating point math and are not always bitexact on different platforms */ PIXMAN_OP_SATURATE, PIXMAN_OP_DISJOINT_CLEAR, PIXMAN_OP_DISJOINT_SRC, PIXMAN_OP_DISJOINT_DST, PIXMAN_OP_DISJOINT_OVER, PIXMAN_OP_DISJOINT_OVER_REVERSE, PIXMAN_OP_DISJOINT_IN, PIXMAN_OP_DISJOINT_IN_REVERSE, PIXMAN_OP_DISJOINT_OUT, PIXMAN_OP_DISJOINT_OUT_REVERSE, PIXMAN_OP_DISJOINT_ATOP, PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_OP_DISJOINT_XOR, PIXMAN_OP_CONJOINT_CLEAR, PIXMAN_OP_CONJOINT_SRC, PIXMAN_OP_CONJOINT_DST, PIXMAN_OP_CONJOINT_OVER, PIXMAN_OP_CONJOINT_OVER_REVERSE, PIXMAN_OP_CONJOINT_IN, PIXMAN_OP_CONJOINT_IN_REVERSE, PIXMAN_OP_CONJOINT_OUT, PIXMAN_OP_CONJOINT_OUT_REVERSE, PIXMAN_OP_CONJOINT_ATOP, PIXMAN_OP_CONJOINT_ATOP_REVERSE, PIXMAN_OP_CONJOINT_XOR, PIXMAN_OP_COLOR_DODGE, PIXMAN_OP_COLOR_BURN, PIXMAN_OP_SOFT_LIGHT, PIXMAN_OP_HSL_HUE, PIXMAN_OP_HSL_SATURATION, PIXMAN_OP_HSL_COLOR, PIXMAN_OP_HSL_LUMINOSITY, #endif }; static pixman_format_code_t img_fmt_list[] = { PIXMAN_a8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_x8r8g8b8, PIXMAN_x8b8g8r8, PIXMAN_r5g6b5, PIXMAN_b5g6r5, PIXMAN_a8, PIXMAN_a1, PIXMAN_r3g3b2, PIXMAN_b8g8r8a8, PIXMAN_b8g8r8x8, PIXMAN_r8g8b8a8, PIXMAN_r8g8b8x8, PIXMAN_x14r6g6b6, PIXMAN_r8g8b8, PIXMAN_b8g8r8, #if 0 /* These are going to use floating point in the near future */ PIXMAN_x2r10g10b10, PIXMAN_a2r10g10b10, PIXMAN_x2b10g10r10, PIXMAN_a2b10g10r10, #endif PIXMAN_a1r5g5b5, PIXMAN_x1r5g5b5, PIXMAN_a1b5g5r5, PIXMAN_x1b5g5r5, PIXMAN_a4r4g4b4, PIXMAN_x4r4g4b4, PIXMAN_a4b4g4r4, PIXMAN_x4b4g4r4, PIXMAN_r3g3b2, PIXMAN_b2g3r3, PIXMAN_a2r2g2b2, PIXMAN_a2b2g2r2, PIXMAN_c8, PIXMAN_g8, PIXMAN_x4c4, PIXMAN_x4g4, PIXMAN_c4, PIXMAN_g4, PIXMAN_g1, PIXMAN_x4a4, PIXMAN_a4, PIXMAN_r1g2b1, PIXMAN_b1g2r1, PIXMAN_a1r1g1b1, PIXMAN_a1b1g1r1, PIXMAN_null }; static pixman_format_code_t mask_fmt_list[] = { PIXMAN_a8r8g8b8, PIXMAN_a8, PIXMAN_a4, PIXMAN_a1, PIXMAN_null }; /* * Composite operation with pseudorandom images */ uint32_t test_composite (int testnum, int verbose) { pixman_image_t *src_img = NULL; pixman_image_t *dst_img = NULL; pixman_image_t *mask_img = NULL; int src_width, src_height; int dst_width, dst_height; int src_stride, dst_stride; int src_x, src_y; int dst_x, dst_y; int mask_x, mask_y; int w, h; pixman_op_t op; pixman_format_code_t src_fmt, dst_fmt, mask_fmt; uint32_t *srcbuf, *maskbuf; uint32_t crc32; int max_width, max_height, max_extra_stride; FLOAT_REGS_CORRUPTION_DETECTOR_START (); max_width = max_height = 24 + testnum / 10000; max_extra_stride = 4 + testnum / 1000000; if (max_width > 256) max_width = 256; if (max_height > 16) max_height = 16; if (max_extra_stride > 8) max_extra_stride = 8; prng_srand (testnum); op = op_list[prng_rand_n (ARRAY_LENGTH (op_list))]; if (prng_rand_n (8)) { /* normal image */ src_img = create_random_image (img_fmt_list, max_width, max_height, max_extra_stride, &src_fmt); } else { /* solid case */ src_img = create_random_image (img_fmt_list, 1, 1, max_extra_stride, &src_fmt); pixman_image_set_repeat (src_img, PIXMAN_REPEAT_NORMAL); } dst_img = create_random_image (img_fmt_list, max_width, max_height, max_extra_stride, &dst_fmt); src_width = pixman_image_get_width (src_img); src_height = pixman_image_get_height (src_img); src_stride = pixman_image_get_stride (src_img); dst_width = pixman_image_get_width (dst_img); dst_height = pixman_image_get_height (dst_img); dst_stride = pixman_image_get_stride (dst_img); srcbuf = pixman_image_get_data (src_img); src_x = prng_rand_n (src_width); src_y = prng_rand_n (src_height); dst_x = prng_rand_n (dst_width); dst_y = prng_rand_n (dst_height); mask_img = NULL; mask_fmt = PIXMAN_null; mask_x = 0; mask_y = 0; maskbuf = NULL; if ((src_fmt == PIXMAN_x8r8g8b8 || src_fmt == PIXMAN_x8b8g8r8) && (prng_rand_n (4) == 0)) { /* PIXBUF */ mask_fmt = prng_rand_n (2) ? PIXMAN_a8r8g8b8 : PIXMAN_a8b8g8r8; mask_img = pixman_image_create_bits (mask_fmt, src_width, src_height, srcbuf, src_stride); mask_x = src_x; mask_y = src_y; maskbuf = srcbuf; } else if (prng_rand_n (2)) { if (prng_rand_n (2)) { mask_img = create_random_image (mask_fmt_list, max_width, max_height, max_extra_stride, &mask_fmt); } else { /* solid case */ mask_img = create_random_image (mask_fmt_list, 1, 1, max_extra_stride, &mask_fmt); pixman_image_set_repeat (mask_img, PIXMAN_REPEAT_NORMAL); } if (prng_rand_n (2)) pixman_image_set_component_alpha (mask_img, 1); mask_x = prng_rand_n (pixman_image_get_width (mask_img)); mask_y = prng_rand_n (pixman_image_get_height (mask_img)); } w = prng_rand_n (dst_width - dst_x + 1); h = prng_rand_n (dst_height - dst_y + 1); if (verbose) { printf ("op=%s\n", operator_name (op)); printf ("src_fmt=%s, dst_fmt=%s, mask_fmt=%s\n", format_name (src_fmt), format_name (dst_fmt), format_name (mask_fmt)); printf ("src_width=%d, src_height=%d, dst_width=%d, dst_height=%d\n", src_width, src_height, dst_width, dst_height); printf ("src_x=%d, src_y=%d, dst_x=%d, dst_y=%d\n", src_x, src_y, dst_x, dst_y); printf ("src_stride=%d, dst_stride=%d\n", src_stride, dst_stride); printf ("w=%d, h=%d\n", w, h); } pixman_image_composite (op, src_img, mask_img, dst_img, src_x, src_y, mask_x, mask_y, dst_x, dst_y, w, h); if (verbose) print_image (dst_img); free_random_image (0, src_img, PIXMAN_null); crc32 = free_random_image (0, dst_img, dst_fmt); if (mask_img) { if (srcbuf == maskbuf) pixman_image_unref(mask_img); else free_random_image (0, mask_img, PIXMAN_null); } FLOAT_REGS_CORRUPTION_DETECTOR_FINISH (); return crc32; } int main (int argc, const char *argv[]) { int i; prng_srand (0); for (i = 1; i <= 8; i++) { initialize_palette (&(rgb_palette[i]), i, TRUE); initialize_palette (&(y_palette[i]), i, FALSE); } return fuzzer_test_main("blitters", 2000000, 0xCC21DDF0, test_composite, argc, argv); } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/check-formats.c0000664000175000017500000001135014712446423016767 0ustar00mattst88mattst88#include #include "utils.h" static int check_op (pixman_op_t op, pixman_format_code_t src_format, pixman_format_code_t dest_format) { uint32_t src_alpha_mask, src_green_mask; uint32_t dest_alpha_mask, dest_green_mask; pixel_checker_t src_checker, dest_checker; pixman_image_t *si, *di; uint32_t sa, sg, da, dg; uint32_t s, d; int retval = 0; pixel_checker_init (&src_checker, src_format); pixel_checker_init (&dest_checker, dest_format); pixel_checker_get_masks ( &src_checker, &src_alpha_mask, NULL, &src_green_mask, NULL); pixel_checker_get_masks ( &dest_checker, &dest_alpha_mask, NULL, &dest_green_mask, NULL); /* printf ("masks: %x %x %x %x\n", */ /* src_alpha_mask, src_green_mask, */ /* dest_alpha_mask, dest_green_mask); */ si = pixman_image_create_bits (src_format, 1, 1, &s, 4); di = pixman_image_create_bits (dest_format, 1, 1, &d, 4); sa = 0; do { sg = 0; do { da = 0; do { dg = 0; do { color_t src_color, dest_color, result_color; uint32_t orig_d; s = sa | sg; d = da | dg; orig_d = d; pixel_checker_convert_pixel_to_color (&src_checker, s, &src_color); pixel_checker_convert_pixel_to_color (&dest_checker, d, &dest_color); do_composite (op, &src_color, NULL, &dest_color, &result_color, FALSE); if (!is_little_endian()) { s <<= 32 - PIXMAN_FORMAT_BPP (src_format); d <<= 32 - PIXMAN_FORMAT_BPP (dest_format); } pixman_image_composite32 (op, si, NULL, di, 0, 0, 0, 0, 0, 0, 1, 1); if (!is_little_endian()) d >>= (32 - PIXMAN_FORMAT_BPP (dest_format)); if (!pixel_checker_check (&dest_checker, d, &result_color)) { printf ("---- test failed ----\n"); printf ("operator: %-32s\n", operator_name (op)); printf ("source: %-12s pixel: %08x\n", format_name (src_format), s); printf ("dest: %-12s pixel: %08x\n", format_name (dest_format), orig_d); printf ("got: %-12s pixel: %08x\n", format_name (dest_format), d); retval = 1; } dg -= dest_green_mask; dg &= dest_green_mask; } while (dg != 0); da -= dest_alpha_mask; da &= dest_alpha_mask; } while (da != 0); sg -= src_green_mask; sg &= src_green_mask; } while (sg != 0); sa -= src_alpha_mask; sa &= src_alpha_mask; } while (sa != 0); pixman_image_unref (si); pixman_image_unref (di); return retval; } int main (int argc, char **argv) { enum { OPTION_OP, OPTION_SRC, OPTION_DEST, LAST_OPTION } option; pixman_format_code_t src_fmt, dest_fmt; pixman_op_t op; op = PIXMAN_OP_NONE; src_fmt = PIXMAN_null; dest_fmt = PIXMAN_null; argc--; argv++; for (option = OPTION_OP; option < LAST_OPTION; ++option) { char *arg = NULL; if (argc) { argc--; arg = *argv++; } switch (option) { case OPTION_OP: if (!arg) printf (" - missing operator\n"); else if ((op = operator_from_string (arg)) == PIXMAN_OP_NONE) printf (" - unknown operator %s\n", arg); break; case OPTION_SRC: if (!arg) printf (" - missing source format\n"); else if ((src_fmt = format_from_string (arg)) == PIXMAN_null) printf (" - unknown source format %s\n", arg); break; case OPTION_DEST: if (!arg) printf (" - missing destination format\n"); else if ((dest_fmt = format_from_string (arg)) == PIXMAN_null) printf (" - unknown destination format %s\n", arg); break; default: assert (0); break; } } while (argc--) { op = PIXMAN_OP_NONE; printf (" - unexpected argument: %s\n", *argv++); } if (op == PIXMAN_OP_NONE || src_fmt == PIXMAN_null || dest_fmt == PIXMAN_null) { printf ("\nUsage:\n check-formats \n\n"); list_operators(); list_formats(); return -1; } return check_op (op, src_fmt, dest_fmt); } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/combiner-test.c0000664000175000017500000000624314712446423017021 0ustar00mattst88mattst88#include #include #include "utils.h" #include #include "pixman-private.h" static const pixman_op_t op_list[] = { PIXMAN_OP_SRC, PIXMAN_OP_OVER, PIXMAN_OP_ADD, PIXMAN_OP_CLEAR, PIXMAN_OP_SRC, PIXMAN_OP_DST, PIXMAN_OP_OVER, PIXMAN_OP_OVER_REVERSE, PIXMAN_OP_IN, PIXMAN_OP_IN_REVERSE, PIXMAN_OP_OUT, PIXMAN_OP_OUT_REVERSE, PIXMAN_OP_ATOP, PIXMAN_OP_ATOP_REVERSE, PIXMAN_OP_XOR, PIXMAN_OP_ADD, PIXMAN_OP_SATURATE, PIXMAN_OP_DISJOINT_CLEAR, PIXMAN_OP_DISJOINT_SRC, PIXMAN_OP_DISJOINT_DST, PIXMAN_OP_DISJOINT_OVER, PIXMAN_OP_DISJOINT_OVER_REVERSE, PIXMAN_OP_DISJOINT_IN, PIXMAN_OP_DISJOINT_IN_REVERSE, PIXMAN_OP_DISJOINT_OUT, PIXMAN_OP_DISJOINT_OUT_REVERSE, PIXMAN_OP_DISJOINT_ATOP, PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_OP_DISJOINT_XOR, PIXMAN_OP_CONJOINT_CLEAR, PIXMAN_OP_CONJOINT_SRC, PIXMAN_OP_CONJOINT_DST, PIXMAN_OP_CONJOINT_OVER, PIXMAN_OP_CONJOINT_OVER_REVERSE, PIXMAN_OP_CONJOINT_IN, PIXMAN_OP_CONJOINT_IN_REVERSE, PIXMAN_OP_CONJOINT_OUT, PIXMAN_OP_CONJOINT_OUT_REVERSE, PIXMAN_OP_CONJOINT_ATOP, PIXMAN_OP_CONJOINT_ATOP_REVERSE, PIXMAN_OP_CONJOINT_XOR, PIXMAN_OP_MULTIPLY, PIXMAN_OP_SCREEN, PIXMAN_OP_OVERLAY, PIXMAN_OP_DARKEN, PIXMAN_OP_LIGHTEN, PIXMAN_OP_COLOR_DODGE, PIXMAN_OP_COLOR_BURN, PIXMAN_OP_HARD_LIGHT, PIXMAN_OP_DIFFERENCE, PIXMAN_OP_EXCLUSION, PIXMAN_OP_SOFT_LIGHT, PIXMAN_OP_HSL_HUE, PIXMAN_OP_HSL_SATURATION, PIXMAN_OP_HSL_COLOR, PIXMAN_OP_HSL_LUMINOSITY, }; static float rand_float (void) { uint32_t u = prng_rand(); return *(float *)&u; } static void random_floats (argb_t *argb, int width) { int i; for (i = 0; i < width; ++i) { argb_t *p = argb + i; p->a = rand_float(); p->r = rand_float(); p->g = rand_float(); p->b = rand_float(); } } #define WIDTH 512 static pixman_combine_float_func_t lookup_combiner (pixman_implementation_t *imp, pixman_op_t op, pixman_bool_t component_alpha) { pixman_combine_float_func_t f; do { if (component_alpha) f = imp->combine_float_ca[op]; else f = imp->combine_float[op]; imp = imp->fallback; } while (!f); return f; } int main () { pixman_implementation_t *impl; argb_t *src_bytes = malloc (WIDTH * sizeof (argb_t)); argb_t *mask_bytes = malloc (WIDTH * sizeof (argb_t)); argb_t *dest_bytes = malloc (WIDTH * sizeof (argb_t)); int i; enable_divbyzero_exceptions(); impl = _pixman_internal_only_get_implementation(); prng_srand (0); for (i = 0; i < ARRAY_LENGTH (op_list); ++i) { pixman_op_t op = op_list[i]; pixman_combine_float_func_t combiner; int ca; for (ca = 0; ca < 2; ++ca) { combiner = lookup_combiner (impl, op, ca); random_floats (src_bytes, WIDTH); random_floats (mask_bytes, WIDTH); random_floats (dest_bytes, WIDTH); combiner (impl, op, (float *)dest_bytes, (float *)mask_bytes, (float *)src_bytes, WIDTH); } } free (src_bytes); free (mask_bytes); free (dest_bytes); return 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/composite-traps-test.c0000664000175000017500000001501614712446423020352 0ustar00mattst88mattst88/* Based loosely on scaling-test */ #include #include #include "utils.h" #define MAX_SRC_WIDTH 48 #define MAX_SRC_HEIGHT 48 #define MAX_DST_WIDTH 48 #define MAX_DST_HEIGHT 48 #define MAX_STRIDE 4 static pixman_format_code_t formats[] = { PIXMAN_a8r8g8b8, PIXMAN_a8, PIXMAN_r5g6b5, PIXMAN_a1, PIXMAN_a4 }; static pixman_format_code_t mask_formats[] = { PIXMAN_a1, PIXMAN_a4, PIXMAN_a8, }; static pixman_op_t operators[] = { PIXMAN_OP_OVER, PIXMAN_OP_ADD, PIXMAN_OP_SRC, PIXMAN_OP_IN }; #define RANDOM_ELT(array) \ ((array)[prng_rand_n(ARRAY_LENGTH((array)))]) static void destroy_bits (pixman_image_t *image, void *data) { fence_free (data); } static pixman_fixed_t random_fixed (int n) { return prng_rand_n (n << 16); } /* * Composite operation with pseudorandom images */ uint32_t test_composite (int testnum, int verbose) { int i; pixman_image_t * src_img; pixman_image_t * dst_img; pixman_region16_t clip; int dst_width, dst_height; int dst_stride; int dst_x, dst_y; int dst_bpp; pixman_op_t op; uint32_t * dst_bits; uint32_t crc32; pixman_format_code_t mask_format, dst_format; pixman_trapezoid_t *traps; int src_x, src_y; int n_traps; static pixman_color_t colors[] = { { 0xffff, 0xffff, 0xffff, 0xffff }, { 0x0000, 0x0000, 0x0000, 0x0000 }, { 0xabcd, 0xabcd, 0x0000, 0xabcd }, { 0x0000, 0x0000, 0x0000, 0xffff }, { 0x0101, 0x0101, 0x0101, 0x0101 }, { 0x7777, 0x6666, 0x5555, 0x9999 }, }; FLOAT_REGS_CORRUPTION_DETECTOR_START (); prng_srand (testnum); op = RANDOM_ELT (operators); mask_format = RANDOM_ELT (mask_formats); /* Create source image */ if (prng_rand_n (4) == 0) { src_img = pixman_image_create_solid_fill ( &(colors[prng_rand_n (ARRAY_LENGTH (colors))])); src_x = 10; src_y = 234; } else { pixman_format_code_t src_format = RANDOM_ELT(formats); int src_bpp = (PIXMAN_FORMAT_BPP (src_format) + 7) / 8; int src_width = prng_rand_n (MAX_SRC_WIDTH) + 1; int src_height = prng_rand_n (MAX_SRC_HEIGHT) + 1; int src_stride = src_width * src_bpp + prng_rand_n (MAX_STRIDE) * src_bpp; uint32_t *bits, *orig; src_x = -(src_width / 4) + prng_rand_n (src_width * 3 / 2); src_y = -(src_height / 4) + prng_rand_n (src_height * 3 / 2); src_stride = (src_stride + 3) & ~3; orig = bits = (uint32_t *)make_random_bytes (src_stride * src_height); if (prng_rand_n (2) == 0) { bits += (src_stride / 4) * (src_height - 1); src_stride = - src_stride; } src_img = pixman_image_create_bits ( src_format, src_width, src_height, bits, src_stride); pixman_image_set_destroy_function (src_img, destroy_bits, orig); if (prng_rand_n (8) == 0) { pixman_box16_t clip_boxes[2]; int n = prng_rand_n (2) + 1; for (i = 0; i < n; i++) { clip_boxes[i].x1 = prng_rand_n (src_width); clip_boxes[i].y1 = prng_rand_n (src_height); clip_boxes[i].x2 = clip_boxes[i].x1 + prng_rand_n (src_width - clip_boxes[i].x1); clip_boxes[i].y2 = clip_boxes[i].y1 + prng_rand_n (src_height - clip_boxes[i].y1); if (verbose) { printf ("source clip box: [%d,%d-%d,%d]\n", clip_boxes[i].x1, clip_boxes[i].y1, clip_boxes[i].x2, clip_boxes[i].y2); } } pixman_region_init_rects (&clip, clip_boxes, n); pixman_image_set_clip_region (src_img, &clip); pixman_image_set_source_clipping (src_img, 1); pixman_region_fini (&clip); } image_endian_swap (src_img); } /* Create destination image */ { dst_format = RANDOM_ELT(formats); dst_bpp = (PIXMAN_FORMAT_BPP (dst_format) + 7) / 8; dst_width = prng_rand_n (MAX_DST_WIDTH) + 1; dst_height = prng_rand_n (MAX_DST_HEIGHT) + 1; dst_stride = dst_width * dst_bpp + prng_rand_n (MAX_STRIDE) * dst_bpp; dst_stride = (dst_stride + 3) & ~3; dst_bits = (uint32_t *)make_random_bytes (dst_stride * dst_height); if (prng_rand_n (2) == 0) { dst_bits += (dst_stride / 4) * (dst_height - 1); dst_stride = - dst_stride; } dst_x = -(dst_width / 4) + prng_rand_n (dst_width * 3 / 2); dst_y = -(dst_height / 4) + prng_rand_n (dst_height * 3 / 2); dst_img = pixman_image_create_bits ( dst_format, dst_width, dst_height, dst_bits, dst_stride); image_endian_swap (dst_img); } /* Create traps */ { int i; n_traps = prng_rand_n (25); traps = fence_malloc (n_traps * sizeof (pixman_trapezoid_t)); for (i = 0; i < n_traps; ++i) { pixman_trapezoid_t *t = &(traps[i]); t->top = random_fixed (MAX_DST_HEIGHT) - MAX_DST_HEIGHT / 2; t->bottom = t->top + random_fixed (MAX_DST_HEIGHT); t->left.p1.x = random_fixed (MAX_DST_WIDTH) - MAX_DST_WIDTH / 2; t->left.p1.y = t->top - random_fixed (50); t->left.p2.x = random_fixed (MAX_DST_WIDTH) - MAX_DST_WIDTH / 2; t->left.p2.y = t->bottom + random_fixed (50); t->right.p1.x = t->left.p1.x + random_fixed (MAX_DST_WIDTH); t->right.p1.y = t->top - random_fixed (50); t->right.p2.x = t->left.p2.x + random_fixed (MAX_DST_WIDTH); t->right.p2.y = t->bottom - random_fixed (50); } } if (prng_rand_n (8) == 0) { pixman_box16_t clip_boxes[2]; int n = prng_rand_n (2) + 1; for (i = 0; i < n; i++) { clip_boxes[i].x1 = prng_rand_n (dst_width); clip_boxes[i].y1 = prng_rand_n (dst_height); clip_boxes[i].x2 = clip_boxes[i].x1 + prng_rand_n (dst_width - clip_boxes[i].x1); clip_boxes[i].y2 = clip_boxes[i].y1 + prng_rand_n (dst_height - clip_boxes[i].y1); if (verbose) { printf ("destination clip box: [%d,%d-%d,%d]\n", clip_boxes[i].x1, clip_boxes[i].y1, clip_boxes[i].x2, clip_boxes[i].y2); } } pixman_region_init_rects (&clip, clip_boxes, n); pixman_image_set_clip_region (dst_img, &clip); pixman_region_fini (&clip); } pixman_composite_trapezoids (op, src_img, dst_img, mask_format, src_x, src_y, dst_x, dst_y, n_traps, traps); crc32 = compute_crc32_for_image (0, dst_img); if (verbose) print_image (dst_img); if (dst_stride < 0) dst_bits += (dst_stride / 4) * (dst_height - 1); fence_free (dst_bits); pixman_image_unref (src_img); pixman_image_unref (dst_img); fence_free (traps); FLOAT_REGS_CORRUPTION_DETECTOR_FINISH (); return crc32; } int main (int argc, const char *argv[]) { return fuzzer_test_main("composite traps", 40000, 0xAF41D210, test_composite, argc, argv); } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/composite.c0000664000175000017500000002737614712446423016262 0ustar00mattst88mattst88/* * Copyright Âİ 2005 Eric Anholt * Copyright Âİ 2009 Chris Wilson * Copyright Âİ 2010 Soeren Sandmann * Copyright Âİ 2010 Red Hat, Inc. * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of Eric Anholt not be used in * advertising or publicity pertaining to distribution of the software without * specific, written prior permission. Eric Anholt makes no * representations about the suitability of this software for any purpose. It * is provided "as is" without express or implied warranty. * * ERIC ANHOLT DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL ERIC ANHOLT BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ #include #include /* abort() */ #include #include #include "utils.h" typedef struct image_t image_t; static const color_t colors[] = { { 1.0, 1.0, 1.0, 1.0 }, { 1.0, 1.0, 1.0, 0.0 }, { 0.0, 0.0, 0.0, 1.0 }, { 0.0, 0.0, 0.0, 0.0 }, { 1.0, 0.0, 0.0, 1.0 }, { 0.0, 1.0, 0.0, 1.0 }, { 0.0, 0.0, 1.0, 1.0 }, { 0.5, 0.0, 0.0, 0.5 }, }; static uint16_t _color_double_to_short (double d) { uint32_t i; i = (uint32_t) (d * 65536); i -= (i >> 16); return i; } static void compute_pixman_color (const color_t *color, pixman_color_t *out) { out->red = _color_double_to_short (color->r); out->green = _color_double_to_short (color->g); out->blue = _color_double_to_short (color->b); out->alpha = _color_double_to_short (color->a); } #define REPEAT 0x01000000 #define FLAGS 0xff000000 static const int sizes[] = { 0, 1, 1 | REPEAT, 10 }; static const pixman_format_code_t formats[] = { /* 32 bpp formats */ PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, PIXMAN_b8g8r8a8, PIXMAN_b8g8r8x8, PIXMAN_r8g8b8a8, PIXMAN_r8g8b8x8, PIXMAN_x2r10g10b10, PIXMAN_x2b10g10r10, PIXMAN_a2r10g10b10, PIXMAN_a2b10g10r10, /* sRGB formats */ PIXMAN_a8r8g8b8_sRGB, PIXMAN_r8g8b8_sRGB, /* 24 bpp formats */ PIXMAN_r8g8b8, PIXMAN_b8g8r8, PIXMAN_r5g6b5, PIXMAN_b5g6r5, /* 16 bpp formats */ PIXMAN_x1r5g5b5, PIXMAN_x1b5g5r5, PIXMAN_a1r5g5b5, PIXMAN_a1b5g5r5, PIXMAN_a4b4g4r4, PIXMAN_x4b4g4r4, PIXMAN_a4r4g4b4, PIXMAN_x4r4g4b4, /* 8 bpp formats */ PIXMAN_a8, PIXMAN_r3g3b2, PIXMAN_b2g3r3, PIXMAN_a2r2g2b2, PIXMAN_a2b2g2r2, PIXMAN_x4a4, /* 4 bpp formats */ PIXMAN_a4, PIXMAN_r1g2b1, PIXMAN_b1g2r1, PIXMAN_a1r1g1b1, PIXMAN_a1b1g1r1, /* 1 bpp formats */ PIXMAN_a1, }; struct image_t { pixman_image_t *image; pixman_format_code_t format; const color_t *color; pixman_repeat_t repeat; int size; }; static const pixman_op_t operators[] = { PIXMAN_OP_CLEAR, PIXMAN_OP_SRC, PIXMAN_OP_DST, PIXMAN_OP_OVER, PIXMAN_OP_OVER_REVERSE, PIXMAN_OP_IN, PIXMAN_OP_IN_REVERSE, PIXMAN_OP_OUT, PIXMAN_OP_OUT_REVERSE, PIXMAN_OP_ATOP, PIXMAN_OP_ATOP_REVERSE, PIXMAN_OP_XOR, PIXMAN_OP_ADD, PIXMAN_OP_SATURATE, PIXMAN_OP_DISJOINT_CLEAR, PIXMAN_OP_DISJOINT_SRC, PIXMAN_OP_DISJOINT_DST, PIXMAN_OP_DISJOINT_OVER, PIXMAN_OP_DISJOINT_OVER_REVERSE, PIXMAN_OP_DISJOINT_IN, PIXMAN_OP_DISJOINT_IN_REVERSE, PIXMAN_OP_DISJOINT_OUT, PIXMAN_OP_DISJOINT_OUT_REVERSE, PIXMAN_OP_DISJOINT_ATOP, PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_OP_DISJOINT_XOR, PIXMAN_OP_CONJOINT_CLEAR, PIXMAN_OP_CONJOINT_SRC, PIXMAN_OP_CONJOINT_DST, PIXMAN_OP_CONJOINT_OVER, PIXMAN_OP_CONJOINT_OVER_REVERSE, PIXMAN_OP_CONJOINT_IN, PIXMAN_OP_CONJOINT_IN_REVERSE, PIXMAN_OP_CONJOINT_OUT, PIXMAN_OP_CONJOINT_OUT_REVERSE, PIXMAN_OP_CONJOINT_ATOP, PIXMAN_OP_CONJOINT_ATOP_REVERSE, PIXMAN_OP_CONJOINT_XOR, }; static uint32_t get_value (pixman_image_t *image) { uint32_t value = *(uint32_t *)pixman_image_get_data (image); #ifdef WORDS_BIGENDIAN { pixman_format_code_t format = pixman_image_get_format (image); value >>= 8 * sizeof(value) - PIXMAN_FORMAT_BPP (format); } #endif return value; } static char * describe_image (image_t *info, char *buf) { if (info->size) { sprintf (buf, "%s, %dx%d%s", format_name (info->format), info->size, info->size, info->repeat ? " R" :""); } else { sprintf (buf, "solid"); } return buf; } static char * describe_color (const color_t *color, char *buf) { sprintf (buf, "%.3f %.3f %.3f %.3f", color->r, color->g, color->b, color->a); return buf; } static pixman_bool_t composite_test (image_t *dst, pixman_op_t op, image_t *src, image_t *mask, pixman_bool_t component_alpha, int testno) { color_t expected, tdst, tsrc, tmsk; pixel_checker_t checker; if (mask) { pixman_image_set_component_alpha (mask->image, component_alpha); pixman_image_composite (op, src->image, mask->image, dst->image, 0, 0, 0, 0, 0, 0, dst->size, dst->size); } else { pixman_image_composite (op, src->image, NULL, dst->image, 0, 0, 0, 0, 0, 0, dst->size, dst->size); } tdst = *dst->color; tsrc = *src->color; if (mask) { tmsk = *mask->color; } /* It turns out that by construction all source, mask etc. colors are * linear because they are made from fills, and fills are always in linear * color space. However, if they have been converted to bitmaps, we need * to simulate the sRGB approximation to pass the test cases. */ if (src->size) { if (PIXMAN_FORMAT_TYPE (src->format) == PIXMAN_TYPE_ARGB_SRGB) { tsrc.r = convert_linear_to_srgb (tsrc.r); tsrc.g = convert_linear_to_srgb (tsrc.g); tsrc.b = convert_linear_to_srgb (tsrc.b); round_color (src->format, &tsrc); tsrc.r = convert_srgb_to_linear (tsrc.r); tsrc.g = convert_srgb_to_linear (tsrc.g); tsrc.b = convert_srgb_to_linear (tsrc.b); } else { round_color (src->format, &tsrc); } } if (mask && mask->size) { if (PIXMAN_FORMAT_TYPE (mask->format) == PIXMAN_TYPE_ARGB_SRGB) { tmsk.r = convert_linear_to_srgb (tmsk.r); tmsk.g = convert_linear_to_srgb (tmsk.g); tmsk.b = convert_linear_to_srgb (tmsk.b); round_color (mask->format, &tmsk); tmsk.r = convert_srgb_to_linear (tmsk.r); tmsk.g = convert_srgb_to_linear (tmsk.g); tmsk.b = convert_srgb_to_linear (tmsk.b); } else { round_color (mask->format, &tmsk); } } if (PIXMAN_FORMAT_TYPE (dst->format) == PIXMAN_TYPE_ARGB_SRGB) { tdst.r = convert_linear_to_srgb (tdst.r); tdst.g = convert_linear_to_srgb (tdst.g); tdst.b = convert_linear_to_srgb (tdst.b); round_color (dst->format, &tdst); tdst.r = convert_srgb_to_linear (tdst.r); tdst.g = convert_srgb_to_linear (tdst.g); tdst.b = convert_srgb_to_linear (tdst.b); } else { round_color (dst->format, &tdst); } do_composite (op, &tsrc, mask? &tmsk : NULL, &tdst, &expected, component_alpha); pixel_checker_init (&checker, dst->format); if (!pixel_checker_check (&checker, get_value (dst->image), &expected)) { char buf[40], buf2[40]; int a, r, g, b; uint32_t pixel; printf ("---- Test %d failed ----\n", testno); printf ("Operator: %s %s\n", operator_name (op), component_alpha ? "CA" : ""); printf ("Source: %s\n", describe_image (src, buf)); if (mask != NULL) printf ("Mask: %s\n", describe_image (mask, buf)); printf ("Destination: %s\n\n", describe_image (dst, buf)); printf (" R G B A Rounded\n"); printf ("Source color: %s %s\n", describe_color (src->color, buf), describe_color (&tsrc, buf2)); if (mask) { printf ("Mask color: %s %s\n", describe_color (mask->color, buf), describe_color (&tmsk, buf2)); } printf ("Dest. color: %s %s\n", describe_color (dst->color, buf), describe_color (&tdst, buf2)); pixel = get_value (dst->image); printf ("Expected: %s\n", describe_color (&expected, buf)); pixel_checker_split_pixel (&checker, pixel, &a, &r, &g, &b); printf ("Got: %5d %5d %5d %5d [pixel: 0x%08x]\n", r, g, b, a, pixel); pixel_checker_get_min (&checker, &expected, &a, &r, &g, &b); printf ("Min accepted: %5d %5d %5d %5d\n", r, g, b, a); pixel_checker_get_max (&checker, &expected, &a, &r, &g, &b); printf ("Max accepted: %5d %5d %5d %5d\n", r, g, b, a); return FALSE; } return TRUE; } static void image_init (image_t *info, int color, int format, int size) { pixman_color_t fill; info->color = &colors[color]; compute_pixman_color (info->color, &fill); info->format = formats[format]; info->size = sizes[size] & ~FLAGS; info->repeat = PIXMAN_REPEAT_NONE; if (info->size) { pixman_image_t *solid; info->image = pixman_image_create_bits (info->format, info->size, info->size, NULL, 0); solid = pixman_image_create_solid_fill (&fill); pixman_image_composite32 (PIXMAN_OP_SRC, solid, NULL, info->image, 0, 0, 0, 0, 0, 0, info->size, info->size); pixman_image_unref (solid); if (sizes[size] & REPEAT) { pixman_image_set_repeat (info->image, PIXMAN_REPEAT_NORMAL); info->repeat = PIXMAN_REPEAT_NORMAL; } } else { info->image = pixman_image_create_solid_fill (&fill); } } static void image_fini (image_t *info) { pixman_image_unref (info->image); } static int random_size (void) { return prng_rand_n (ARRAY_LENGTH (sizes)); } static int random_color (void) { return prng_rand_n (ARRAY_LENGTH (colors)); } static int random_format (void) { return prng_rand_n (ARRAY_LENGTH (formats)); } static pixman_bool_t run_test (uint32_t seed) { image_t src, mask, dst; pixman_op_t op; int ca; int ok; prng_srand (seed); image_init (&dst, random_color(), random_format(), 1); image_init (&src, random_color(), random_format(), random_size()); image_init (&mask, random_color(), random_format(), random_size()); op = operators [prng_rand_n (ARRAY_LENGTH (operators))]; ca = prng_rand_n (3); switch (ca) { case 0: ok = composite_test (&dst, op, &src, NULL, FALSE, seed); break; case 1: ok = composite_test (&dst, op, &src, &mask, FALSE, seed); break; case 2: ok = composite_test (&dst, op, &src, &mask, mask.size? TRUE : FALSE, seed); break; default: ok = FALSE; break; } image_fini (&src); image_fini (&mask); image_fini (&dst); return ok; } int main (int argc, char **argv) { #define N_TESTS (8 * 1024 * 1024) int result = 0; uint32_t seed; int32_t i; if (argc > 1) { char *end; i = strtol (argv[1], &end, 0); if (end != argv[1]) { if (!run_test (i)) return 1; else return 0; } else { printf ("Usage:\n\n %s \n\n", argv[0]); return -1; } } if (getenv ("PIXMAN_RANDOMIZE_TESTS")) seed = get_random_seed(); else seed = 1; #ifdef USE_OPENMP # pragma omp parallel for default(none) shared(result, argv, seed) #endif for (i = 0; i <= N_TESTS; ++i) { if (!result && !run_test (i + seed)) { printf ("Test 0x%08X failed.\n", seed + i); result = seed + i; } } return result; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/cover-test.c0000664000175000017500000003627414712446423016350 0ustar00mattst88mattst88/* * Copyright Âİ 2015 RISC OS Open Ltd * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of the copyright holders not be used in * advertising or publicity pertaining to distribution of the software without * specific, written prior permission. The copyright holders make no * representations about the suitability of this software for any purpose. It * is provided "as is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS * SOFTWARE. * * Author: Ben Avison (bavison@riscosopen.org) * */ /* * This test aims to verify both numerical correctness and the honouring of * array bounds for scaled plots (both nearest-neighbour and bilinear) at or * close to the boundary conditions for applicability of "cover" type fast paths * and iter fetch routines. * * It has a secondary purpose: by setting the env var EXACT (to any value) it * will only test plots that are exactly on the boundary condition. This makes * it possible to ensure that "cover" routines are being used to the maximum, * although this requires the use of a debugger or code instrumentation to * verify. */ #include "utils.h" #include #include /* Approximate limits for random scale factor generation - these ensure we can * get at least 8x reduction and 8x enlargement. */ #define LOG2_MAX_FACTOR (3) /* 1/sqrt(2) (or sqrt(0.5), or 2^-0.5) as a 0.32 fixed-point number */ #define INV_SQRT_2_0POINT32_FIXED (0xB504F334u) /* The largest increment that can be generated by random_scale_factor(). * This occurs when the "mantissa" part is 0xFFFFFFFF and the "exponent" * part is -LOG2_MAX_FACTOR. */ #define MAX_INC ((pixman_fixed_t) \ (INV_SQRT_2_0POINT32_FIXED >> (31 - 16 - LOG2_MAX_FACTOR))) /* Minimum source width (in pixels) based on a typical page size of 4K and * maximum colour depth of 32bpp. */ #define MIN_SRC_WIDTH (4096 / 4) /* Derive the destination width so that at max increment we fit within source */ #define DST_WIDTH (MIN_SRC_WIDTH * pixman_fixed_1 / MAX_INC) /* Calculate heights the other way round. * No limits due to page alignment here. */ #define DST_HEIGHT 3 #define SRC_HEIGHT ((DST_HEIGHT * MAX_INC + pixman_fixed_1 - 1) / pixman_fixed_1) /* At the time of writing, all the scaled fast paths use SRC, OVER or ADD * Porter-Duff operators. XOR is included in the list to ensure good * representation of iter scanline fetch routines. */ static const pixman_op_t op_list[] = { PIXMAN_OP_SRC, PIXMAN_OP_OVER, PIXMAN_OP_ADD, PIXMAN_OP_XOR, }; /* At the time of writing, all the scaled fast paths use a8r8g8b8, x8r8g8b8 * or r5g6b5, or red-blue swapped versions of the same. When a mask channel is * used, it is always a8 (and so implicitly not component alpha). a1r5g5b5 is * included because it is the only other format to feature in any iters. */ static const pixman_format_code_t img_fmt_list[] = { PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, PIXMAN_r5g6b5, PIXMAN_a1r5g5b5 }; /* This is a flag reflecting the environment variable EXACT. It can be used * to ensure that source coordinates corresponding exactly to the "cover" limits * are used, rather than any "near misses". This can, for example, be used in * conjunction with a debugger to ensure that only COVER fast paths are used. */ static int exact; static pixman_image_t * create_src_image (pixman_format_code_t fmt) { pixman_image_t *tmp_img, *img; /* We need the left-most and right-most MIN_SRC_WIDTH pixels to have * predictable values, even though fence_image_create_bits() may allocate * an image somewhat larger than that, by an amount that varies depending * upon the page size on the current platform. The solution is to create a * temporary non-fenced image that is exactly MIN_SRC_WIDTH wide and blit it * into the fenced image. */ tmp_img = pixman_image_create_bits (fmt, MIN_SRC_WIDTH, SRC_HEIGHT, NULL, 0); if (tmp_img == NULL) return NULL; img = fence_image_create_bits (fmt, MIN_SRC_WIDTH, SRC_HEIGHT, TRUE); if (img == NULL) { pixman_image_unref (tmp_img); return NULL; } prng_randmemset (tmp_img->bits.bits, tmp_img->bits.rowstride * SRC_HEIGHT * sizeof (uint32_t), 0); image_endian_swap (tmp_img); pixman_image_composite (PIXMAN_OP_SRC, tmp_img, NULL, img, 0, 0, 0, 0, 0, 0, MIN_SRC_WIDTH, SRC_HEIGHT); pixman_image_composite (PIXMAN_OP_SRC, tmp_img, NULL, img, 0, 0, 0, 0, img->bits.width - MIN_SRC_WIDTH, 0, MIN_SRC_WIDTH, SRC_HEIGHT); pixman_image_unref (tmp_img); return img; } static pixman_fixed_t random_scale_factor(void) { /* Get a random number with top bit set. */ uint32_t f = prng_rand () | 0x80000000u; /* In log(2) space, this is still approximately evenly spread between 31 * and 32. Divide by sqrt(2) to centre the distribution on 2^31. */ f = ((uint64_t) f * INV_SQRT_2_0POINT32_FIXED) >> 32; /* Now shift right (ie divide by an integer power of 2) to spread the * distribution between centres at 2^(16 +/- LOG2_MAX_FACTOR). */ f >>= 31 - 16 + prng_rand_n (2 * LOG2_MAX_FACTOR + 1) - LOG2_MAX_FACTOR; return f; } static pixman_fixed_t calc_translate (int dst_size, int src_size, pixman_fixed_t scale, pixman_bool_t low_align, pixman_bool_t bilinear) { pixman_fixed_t ref_src, ref_dst, scaled_dst; if (low_align) { ref_src = bilinear ? pixman_fixed_1 / 2 : pixman_fixed_e; ref_dst = pixman_fixed_1 / 2; } else { ref_src = pixman_int_to_fixed (src_size) - bilinear * pixman_fixed_1 / 2; ref_dst = pixman_int_to_fixed (dst_size) - pixman_fixed_1 / 2; } scaled_dst = ((uint64_t) ref_dst * scale + pixman_fixed_1 / 2) / pixman_fixed_1; /* We need the translation to be set such that when ref_dst is fed through * the transformation matrix, we get ref_src as the result. */ return ref_src - scaled_dst; } static pixman_fixed_t random_offset (void) { pixman_fixed_t offset = 0; /* Ensure we test the exact case quite a lot */ if (prng_rand_n (2)) return offset; /* What happens when we are close to the edge of the first * interpolation step? */ if (prng_rand_n (2)) offset += (pixman_fixed_1 >> BILINEAR_INTERPOLATION_BITS) - 16; /* Try fine-grained variations */ offset += prng_rand_n (32); /* Test in both directions */ if (prng_rand_n (2)) offset = -offset; return offset; } static void check_transform (pixman_image_t *dst_img, pixman_image_t *src_img, pixman_transform_t *transform, pixman_bool_t bilinear) { pixman_vector_t v1, v2; v1.vector[0] = pixman_fixed_1 / 2; v1.vector[1] = pixman_fixed_1 / 2; v1.vector[2] = pixman_fixed_1; assert (pixman_transform_point (transform, &v1)); v2.vector[0] = pixman_int_to_fixed (dst_img->bits.width) - pixman_fixed_1 / 2; v2.vector[1] = pixman_int_to_fixed (dst_img->bits.height) - pixman_fixed_1 / 2; v2.vector[2] = pixman_fixed_1; assert (pixman_transform_point (transform, &v2)); if (bilinear) { assert (v1.vector[0] >= pixman_fixed_1 / 2); assert (v1.vector[1] >= pixman_fixed_1 / 2); assert (v2.vector[0] <= pixman_int_to_fixed (src_img->bits.width) - pixman_fixed_1 / 2); assert (v2.vector[1] <= pixman_int_to_fixed (src_img->bits.height) - pixman_fixed_1 / 2); } else { assert (v1.vector[0] >= pixman_fixed_e); assert (v1.vector[1] >= pixman_fixed_e); assert (v2.vector[0] <= pixman_int_to_fixed (src_img->bits.width)); assert (v2.vector[1] <= pixman_int_to_fixed (src_img->bits.height)); } } static uint32_t test_cover (int testnum, int verbose) { pixman_fixed_t x_scale, y_scale; pixman_bool_t left_align, top_align; pixman_bool_t bilinear; pixman_filter_t filter; pixman_op_t op; size_t src_fmt_index; pixman_format_code_t src_fmt, dst_fmt, mask_fmt; pixman_image_t *src_img, *dst_img, *mask_img; pixman_transform_t src_transform, mask_transform; pixman_fixed_t fuzz[4]; uint32_t crc32; /* We allocate one fenced image for each pixel format up-front. This is to * avoid spending a lot of time on memory management rather than on testing * Pixman optimisations. We need one per thread because the transformation * matrices and filtering are properties of the source and mask images. */ static pixman_image_t *src_imgs[ARRAY_LENGTH (img_fmt_list)]; static pixman_image_t *mask_bits_img; static pixman_bool_t fence_images_created; #ifdef USE_OPENMP #pragma omp threadprivate (src_imgs) #pragma omp threadprivate (mask_bits_img) #pragma omp threadprivate (fence_images_created) #endif if (!fence_images_created) { int i; prng_srand (0); for (i = 0; i < ARRAY_LENGTH (img_fmt_list); i++) src_imgs[i] = create_src_image (img_fmt_list[i]); mask_bits_img = create_src_image (PIXMAN_a8); fence_images_created = TRUE; } prng_srand (testnum); x_scale = random_scale_factor (); y_scale = random_scale_factor (); left_align = prng_rand_n (2); top_align = prng_rand_n (2); bilinear = prng_rand_n (2); filter = bilinear ? PIXMAN_FILTER_BILINEAR : PIXMAN_FILTER_NEAREST; op = op_list[prng_rand_n (ARRAY_LENGTH (op_list))]; dst_fmt = img_fmt_list[prng_rand_n (ARRAY_LENGTH (img_fmt_list))]; dst_img = pixman_image_create_bits (dst_fmt, DST_WIDTH, DST_HEIGHT, NULL, 0); prng_randmemset (dst_img->bits.bits, dst_img->bits.rowstride * DST_HEIGHT * sizeof (uint32_t), 0); image_endian_swap (dst_img); src_fmt_index = prng_rand_n (ARRAY_LENGTH (img_fmt_list)); src_fmt = img_fmt_list[src_fmt_index]; src_img = src_imgs[src_fmt_index]; pixman_image_set_filter (src_img, filter, NULL, 0); pixman_transform_init_scale (&src_transform, x_scale, y_scale); src_transform.matrix[0][2] = calc_translate (dst_img->bits.width, src_img->bits.width, x_scale, left_align, bilinear); src_transform.matrix[1][2] = calc_translate (dst_img->bits.height, src_img->bits.height, y_scale, top_align, bilinear); if (prng_rand_n (2)) { /* No mask */ mask_fmt = PIXMAN_null; mask_img = NULL; } else if (prng_rand_n (2)) { /* a8 bitmap mask */ mask_fmt = PIXMAN_a8; mask_img = mask_bits_img; pixman_image_set_filter (mask_img, filter, NULL, 0); pixman_transform_init_scale (&mask_transform, x_scale, y_scale); mask_transform.matrix[0][2] = calc_translate (dst_img->bits.width, mask_img->bits.width, x_scale, left_align, bilinear); mask_transform.matrix[1][2] = calc_translate (dst_img->bits.height, mask_img->bits.height, y_scale, top_align, bilinear); } else { /* Solid mask */ pixman_color_t color; memset (&color, 0xAA, sizeof color); mask_fmt = PIXMAN_solid; mask_img = pixman_image_create_solid_fill (&color); } if (!exact) { int i = 0; while (i < 4) fuzz[i++] = random_offset (); src_transform.matrix[0][2] += fuzz[0]; src_transform.matrix[1][2] += fuzz[1]; mask_transform.matrix[0][2] += fuzz[2]; mask_transform.matrix[1][2] += fuzz[3]; } pixman_image_set_transform (src_img, &src_transform); if (mask_fmt == PIXMAN_a8) pixman_image_set_transform (mask_img, &mask_transform); if (verbose) { printf ("op=%s\n", operator_name (op)); printf ("src_fmt=%s, dst_fmt=%s, mask_fmt=%s\n", format_name (src_fmt), format_name (dst_fmt), format_name (mask_fmt)); printf ("x_scale=0x%08X, y_scale=0x%08X, align %s/%s, %s\n", x_scale, y_scale, left_align ? "left" : "right", top_align ? "top" : "bottom", bilinear ? "bilinear" : "nearest"); if (!exact) { int i = 0; printf ("fuzz factors"); while (i < 4) printf (" %d", fuzz[i++]); printf ("\n"); } } if (exact) { check_transform (dst_img, src_img, &src_transform, bilinear); if (mask_fmt == PIXMAN_a8) check_transform (dst_img, mask_img, &mask_transform, bilinear); } pixman_image_composite (op, src_img, mask_img, dst_img, 0, 0, 0, 0, 0, 0, dst_img->bits.width, dst_img->bits.height); if (verbose) print_image (dst_img); crc32 = compute_crc32_for_image (0, dst_img); pixman_image_unref (dst_img); if (mask_fmt == PIXMAN_solid) pixman_image_unref (mask_img); return crc32; } #if BILINEAR_INTERPOLATION_BITS == 7 #define CHECKSUM_FUZZ 0x6B56F607 #define CHECKSUM_EXACT 0xA669F4A3 #elif BILINEAR_INTERPOLATION_BITS == 4 #define CHECKSUM_FUZZ 0x83119ED0 #define CHECKSUM_EXACT 0x0D3382CD #else #define CHECKSUM_FUZZ 0x00000000 #define CHECKSUM_EXACT 0x00000000 #endif int main (int argc, const char *argv[]) { unsigned long page_size; page_size = fence_get_page_size (); if (page_size == 0 || page_size > 16 * 1024) return 77; /* automake SKIP */ exact = getenv ("EXACT") != NULL; if (exact) printf ("Doing plots that are exactly aligned to boundaries\n"); return fuzzer_test_main ("cover", 2000000, exact ? CHECKSUM_EXACT : CHECKSUM_FUZZ, test_cover, argc, argv); } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/fence-image-self-test.c0000664000175000017500000001344114712446423020310 0ustar00mattst88mattst88/* * Copyright Âİ 2015 Raspberry Pi Foundation * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of the copyright holders not be used in * advertising or publicity pertaining to distribution of the software without * specific, written prior permission. The copyright holders make no * representations about the suitability of this software for any purpose. It * is provided "as is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS * SOFTWARE. * */ #ifdef HAVE_CONFIG_H #include #endif #include "utils.h" #if FENCE_MALLOC_ACTIVE && defined (HAVE_SIGACTION) #include #include #include #include #include #include #include #include pixman_bool_t verbose; static void segv_handler (int sig, siginfo_t *si, void *unused) { _exit (EXIT_SUCCESS); } static void die (const char *msg, int err) { if (err) perror (msg); else fprintf (stderr, "%s\n", msg); abort (); } static void prinfo (const char *fmt, ...) { va_list ap; if (!verbose) return; va_start (ap, fmt); vfprintf (stderr, fmt, ap); va_end (ap); } static void do_expect_signal (void (*fn)(void *), void *data) { struct sigaction sa; sa.sa_flags = SA_SIGINFO; sigemptyset (&sa.sa_mask); sa.sa_sigaction = segv_handler; if (sigaction (SIGSEGV, &sa, NULL) == -1) die ("sigaction failed", errno); if (sigaction (SIGBUS, &sa, NULL) == -1) die ("sigaction failed", errno); (*fn)(data); _exit (EXIT_FAILURE); } /* Check that calling fn(data) causes a segmentation fault. * * You cannot portably return from a SIGSEGV handler in any way, * so we fork, and do the test in the child process. Child's * exit status will reflect the result. Its SEGV handler causes it * to exit with success, and return failure otherwise. */ static pixman_bool_t expect_signal (void (*fn)(void *), void *data) { pid_t pid, wp; int status; pid = fork (); if (pid == -1) die ("fork failed", errno); if (pid == 0) do_expect_signal (fn, data); /* never returns */ wp = waitpid (pid, &status, 0); if (wp != pid) die ("waitpid did not work", wp == -1 ? errno : 0); if (WIFEXITED (status) && WEXITSTATUS (status) == EXIT_SUCCESS) return TRUE; return FALSE; } static void read_u8 (void *data) { volatile uint8_t *p = data; *p; } static pixman_bool_t test_read_fault (uint8_t *p, int offset) { prinfo ("*(uint8_t *)(%p + %d)", p, offset); if (expect_signal (read_u8, p + offset)) { prinfo ("\tsignal OK\n"); return TRUE; } prinfo ("\tFAILED\n"); return FALSE; } static void test_read_ok (uint8_t *p, int offset) { prinfo ("*(uint8_t *)(%p + %d)", p, offset); /* If fails, SEGV. */ read_u8 (p + offset); prinfo ("\tOK\n"); } static pixman_bool_t test_read_faults (pixman_image_t *image) { pixman_bool_t ok = TRUE; pixman_format_code_t format = pixman_image_get_format (image); int width = pixman_image_get_width (image); int height = pixman_image_get_height (image); int stride = pixman_image_get_stride (image); uint8_t *p = (void *)pixman_image_get_data (image); int row_bytes = width * PIXMAN_FORMAT_BPP (format) / 8; prinfo ("%s %dx%d, row %d B, stride %d B:\n", format_name (format), width, height, row_bytes, stride); assert (height > 3); test_read_ok (p, 0); test_read_ok (p, row_bytes - 1); test_read_ok (p, stride); test_read_ok (p, stride + row_bytes - 1); test_read_ok (p, 2 * stride); test_read_ok (p, 2 * stride + row_bytes - 1); test_read_ok (p, 3 * stride); test_read_ok (p, (height - 1) * stride + row_bytes - 1); ok &= test_read_fault (p, -1); ok &= test_read_fault (p, row_bytes); ok &= test_read_fault (p, stride - 1); ok &= test_read_fault (p, stride + row_bytes); ok &= test_read_fault (p, 2 * stride - 1); ok &= test_read_fault (p, 2 * stride + row_bytes); ok &= test_read_fault (p, 3 * stride - 1); ok &= test_read_fault (p, height * stride); return ok; } static pixman_bool_t test_image_faults (pixman_format_code_t format, int min_width, int height) { pixman_bool_t ok; pixman_image_t *image; image = fence_image_create_bits (format, min_width, height, TRUE); ok = test_read_faults (image); pixman_image_unref (image); return ok; } int main (int argc, char **argv) { pixman_bool_t ok = TRUE; if (getenv ("VERBOSE") != NULL) verbose = TRUE; ok &= test_image_faults (PIXMAN_a8r8g8b8, 7, 5); ok &= test_image_faults (PIXMAN_r8g8b8, 7, 5); ok &= test_image_faults (PIXMAN_r5g6b5, 7, 5); ok &= test_image_faults (PIXMAN_a8, 7, 5); ok &= test_image_faults (PIXMAN_a4, 7, 5); ok &= test_image_faults (PIXMAN_a1, 7, 5); if (ok) return EXIT_SUCCESS; return EXIT_FAILURE; } #else /* FENCE_MALLOC_ACTIVE */ int main (int argc, char **argv) { /* Automake return code for test SKIP. */ return 77; } #endif /* FENCE_MALLOC_ACTIVE */ ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/fetch-test.c0000664000175000017500000000731114712446423016311 0ustar00mattst88mattst88#include #include #include #include "utils.h" #define SIZE 1024 static pixman_indexed_t mono_palette = { 0, { 0x00000000, 0x00ffffff }, }; typedef struct { pixman_format_code_t format; int width, height; int stride; uint32_t src[SIZE]; uint32_t dst[SIZE]; pixman_indexed_t *indexed; } testcase_t; static testcase_t testcases[] = { { PIXMAN_a8r8g8b8, 2, 2, 8, { 0x00112233, 0x44556677, 0x8899aabb, 0xccddeeff }, { 0x00112233, 0x44556677, 0x8899aabb, 0xccddeeff }, NULL, }, { PIXMAN_r8g8b8a8, 2, 2, 8, { 0x11223300, 0x55667744, 0x99aabb88, 0xddeeffcc }, { 0x00112233, 0x44556677, 0x8899aabb, 0xccddeeff }, NULL, }, { PIXMAN_g1, 8, 2, 4, #ifdef WORDS_BIGENDIAN { 0xaa000000, 0x55000000 }, #else { 0x00000055, 0x000000aa }, #endif { 0x00ffffff, 0x00000000, 0x00ffffff, 0x00000000, 0x00ffffff, 0x00000000, 0x00ffffff, 0x00000000, 0x00000000, 0x00ffffff, 0x00000000, 0x00ffffff, 0x00000000, 0x00ffffff, 0x00000000, 0x00ffffff }, &mono_palette, }, #if 0 { PIXMAN_g8, 4, 2, 4, { 0x01234567, 0x89abcdef }, { 0x00010101, 0x00232323, 0x00454545, 0x00676767, 0x00898989, 0x00ababab, 0x00cdcdcd, 0x00efefef, }, }, #endif /* FIXME: make this work on big endian */ { PIXMAN_yv12, 8, 2, 8, #ifdef WORDS_BIGENDIAN { 0x00ff00ff, 0x00ff00ff, 0xff00ff00, 0xff00ff00, 0x80ff8000, 0x800080ff }, #else { 0xff00ff00, 0xff00ff00, 0x00ff00ff, 0x00ff00ff, 0x0080ff80, 0xff800080 }, #endif { 0xff000000, 0xffffffff, 0xffb80000, 0xffffe113, 0xff000000, 0xffffffff, 0xff0023ee, 0xff4affff, 0xffffffff, 0xff000000, 0xffffe113, 0xffb80000, 0xffffffff, 0xff000000, 0xff4affff, 0xff0023ee, }, }, }; int n_test_cases = ARRAY_LENGTH (testcases); static uint32_t reader (const void *src, int size) { switch (size) { case 1: return *(uint8_t *)src; case 2: return *(uint16_t *)src; case 4: return *(uint32_t *)src; default: assert(0); return 0; /* silence MSVC */ } } static void writer (void *src, uint32_t value, int size) { switch (size) { case 1: *(uint8_t *)src = value; break; case 2: *(uint16_t *)src = value; break; case 4: *(uint32_t *)src = value; break; default: assert(0); } } int main (int argc, char **argv) { uint32_t dst[SIZE]; pixman_image_t *src_img; pixman_image_t *dst_img; int i, j, x, y; int ret = 0; for (i = 0; i < n_test_cases; ++i) { for (j = 0; j < 2; ++j) { src_img = pixman_image_create_bits (testcases[i].format, testcases[i].width, testcases[i].height, testcases[i].src, testcases[i].stride); pixman_image_set_indexed(src_img, testcases[i].indexed); dst_img = pixman_image_create_bits (PIXMAN_a8r8g8b8, testcases[i].width, testcases[i].height, dst, testcases[i].width*4); if (j) { pixman_image_set_accessors (src_img, reader, writer); pixman_image_set_accessors (dst_img, reader, writer); } pixman_image_composite (PIXMAN_OP_SRC, src_img, NULL, dst_img, 0, 0, 0, 0, 0, 0, testcases[i].width, testcases[i].height); pixman_image_unref (src_img); pixman_image_unref (dst_img); for (y = 0; y < testcases[i].height; ++y) { for (x = 0; x < testcases[i].width; ++x) { int offset = y * testcases[i].width + x; if (dst[offset] != testcases[i].dst[offset]) { printf ("test %i%c: pixel mismatch at (x=%d,y=%d): %08x expected, %08x obtained\n", i + 1, 'a' + j, x, y, testcases[i].dst[offset], dst[offset]); ret = 1; } } } } } return ret; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/filter-reduction-test.c0000664000175000017500000000475414712446423020507 0ustar00mattst88mattst88#include #include #include "utils.h" static const pixman_fixed_t entries[] = { pixman_double_to_fixed (-1.0), pixman_double_to_fixed (-0.5), pixman_double_to_fixed (-1/3.0), pixman_double_to_fixed (0.0), pixman_double_to_fixed (0.5), pixman_double_to_fixed (1.0), pixman_double_to_fixed (1.5), pixman_double_to_fixed (2.0), pixman_double_to_fixed (3.0), }; #define SIZE 12 static uint32_t test_scale (const pixman_transform_t *xform, uint32_t crc) { uint32_t *srcbuf, *dstbuf; pixman_image_t *src, *dest; srcbuf = malloc (SIZE * SIZE * 4); prng_randmemset (srcbuf, SIZE * SIZE * 4, 0); src = pixman_image_create_bits ( PIXMAN_a8r8g8b8, SIZE, SIZE, srcbuf, SIZE * 4); dstbuf = malloc (SIZE * SIZE * 4); prng_randmemset (dstbuf, SIZE * SIZE * 4, 0); dest = pixman_image_create_bits ( PIXMAN_a8r8g8b8, SIZE, SIZE, dstbuf, SIZE * 4); pixman_image_set_transform (src, xform); pixman_image_set_repeat (src, PIXMAN_REPEAT_NORMAL); pixman_image_set_filter (src, PIXMAN_FILTER_BILINEAR, NULL, 0); image_endian_swap (src); image_endian_swap (dest); pixman_image_composite (PIXMAN_OP_SRC, src, NULL, dest, 0, 0, 0, 0, 0, 0, SIZE, SIZE); crc = compute_crc32_for_image (crc, dest); pixman_image_unref (src); pixman_image_unref (dest); free (srcbuf); free (dstbuf); return crc; } #if BILINEAR_INTERPOLATION_BITS == 7 #define CHECKSUM 0x02169677 #elif BILINEAR_INTERPOLATION_BITS == 4 #define CHECKSUM 0xE44B29AC #else #define CHECKSUM 0x00000000 #endif int main (int argc, const char *argv[]) { const pixman_fixed_t *end = entries + ARRAY_LENGTH (entries); const pixman_fixed_t *t0, *t1, *t2, *t3, *t4, *t5; uint32_t crc = 0; prng_srand (0x56EA1DBD); for (t0 = entries; t0 < end; ++t0) { for (t1 = entries; t1 < end; ++t1) { for (t2 = entries; t2 < end; ++t2) { for (t3 = entries; t3 < end; ++t3) { for (t4 = entries; t4 < end; ++t4) { for (t5 = entries; t5 < end; ++t5) { pixman_transform_t xform = { { { *t0, *t1, *t2 }, { *t3, *t4, *t5 }, { 0, 0, pixman_fixed_1 } } }; crc = test_scale (&xform, crc); } } } } } } if (crc != CHECKSUM) { printf ("filter-reduction-test failed! (checksum=0x%08X, expected 0x%08X)\n", crc, CHECKSUM); return 1; } else { printf ("filter-reduction-test passed (checksum=0x%08X)\n", crc); return 0; } } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/fuzzer-find-diff.pl0000775000175000017500000000416514712446423017614 0ustar00mattst88mattst88#!/usr/bin/env perl $usage = "Usage: fuzzer-find-diff.pl reference_binary new_binary [number_of_tests_to_run] The first two input arguments are the commands to run the test programs based on fuzzer_test_main() function from 'util.c' (preferably they should be statically compiled, this can be achieved via '--disable-shared' pixman configure option). The third optional argument is the number of test rounds to run (if not specified, then testing runs infinitely or until some problem is detected). Usage examples: fuzzer-find-diff.pl ./blitters-test-with-sse-disabled ./blitters-test 9000000 fuzzer-find-diff.pl ./blitters-test \"ssh ppc64_host /path/to/blitters-test\" "; $#ARGV >= 1 or die $usage; $batch_size = 10000; if ($#ARGV >= 2) { $number_of_tests = int($ARGV[2]); } else { $number_of_tests = -1 } sub test_range { my $min = shift; my $max = shift; # check that [$min, $max] range is "bad", otherwise return if (`$ARGV[0] $min $max 2>/dev/null` eq `$ARGV[1] $min $max 2>/dev/null`) { return; } # check that $min itself is "good", otherwise return if (`$ARGV[0] $min 2>/dev/null` ne `$ARGV[1] $min 2>/dev/null`) { return $min; } # start bisecting while ($max != $min + 1) { my $avg = int(($min + $max) / 2); my $res1 = `$ARGV[0] $min $avg 2>/dev/null`; my $res2 = `$ARGV[1] $min $avg 2>/dev/null`; if ($res1 ne $res2) { $max = $avg; } else { $min = $avg; } } return $max; } $base = 1; while ($number_of_tests <= 0 || $base <= $number_of_tests) { printf("testing %-12d\r", $base + $batch_size - 1); my $res = test_range($base, $base + $batch_size - 1); if ($res) { printf("Failure: results are different for test %d:\n", $res); printf("\n-- ref --\n"); print `$ARGV[0] $res`; printf("-- new --\n"); print `$ARGV[1] $res`; printf("The problematic conditions can be reproduced by running:\n"); printf("$ARGV[1] %d\n", $res); exit(1); } $base += $batch_size; } printf("Success: %d tests finished\n", $base - 1); ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/glyph-test.c0000664000175000017500000001664014712446423016350 0ustar00mattst88mattst88#include #include "utils.h" static const pixman_format_code_t glyph_formats[] = { PIXMAN_a8r8g8b8, PIXMAN_a8, PIXMAN_a4, PIXMAN_a1, PIXMAN_x8r8g8b8, PIXMAN_r3g3b2, PIXMAN_null, }; static const pixman_format_code_t formats[] = { PIXMAN_a8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_x8r8g8b8, PIXMAN_x8b8g8r8, PIXMAN_r5g6b5, PIXMAN_b5g6r5, PIXMAN_a8, PIXMAN_a1, PIXMAN_r3g3b2, PIXMAN_b8g8r8a8, PIXMAN_b8g8r8x8, PIXMAN_r8g8b8a8, PIXMAN_r8g8b8x8, PIXMAN_x14r6g6b6, PIXMAN_r8g8b8, PIXMAN_b8g8r8, #if 0 /* These use floating point */ PIXMAN_x2r10g10b10, PIXMAN_a2r10g10b10, PIXMAN_x2b10g10r10, PIXMAN_a2b10g10r10, #endif PIXMAN_a1r5g5b5, PIXMAN_x1r5g5b5, PIXMAN_a1b5g5r5, PIXMAN_x1b5g5r5, PIXMAN_a4r4g4b4, PIXMAN_x4r4g4b4, PIXMAN_a4b4g4r4, PIXMAN_x4b4g4r4, PIXMAN_r3g3b2, PIXMAN_b2g3r3, PIXMAN_a2r2g2b2, PIXMAN_a2b2g2r2, PIXMAN_x4a4, PIXMAN_a4, PIXMAN_r1g2b1, PIXMAN_b1g2r1, PIXMAN_a1r1g1b1, PIXMAN_a1b1g1r1, PIXMAN_null, }; static const pixman_op_t operators[] = { PIXMAN_OP_SRC, PIXMAN_OP_OVER, PIXMAN_OP_ADD, PIXMAN_OP_CLEAR, PIXMAN_OP_SRC, PIXMAN_OP_DST, PIXMAN_OP_OVER, PIXMAN_OP_OVER_REVERSE, PIXMAN_OP_IN, PIXMAN_OP_IN_REVERSE, PIXMAN_OP_OUT, PIXMAN_OP_OUT_REVERSE, PIXMAN_OP_ATOP, PIXMAN_OP_ATOP_REVERSE, PIXMAN_OP_XOR, PIXMAN_OP_ADD }; enum { ALLOW_CLIPPED = (1 << 0), ALLOW_ALPHA_MAP = (1 << 1), ALLOW_SOURCE_CLIPPING = (1 << 2), ALLOW_REPEAT = (1 << 3), ALLOW_SOLID = (1 << 4), ALLOW_FENCED_MEMORY = (1 << 5), }; static void destroy_fenced (pixman_image_t *image, void *data) { fence_free (data); } static void destroy_malloced (pixman_image_t *image, void *data) { free (data); } static pixman_format_code_t random_format (const pixman_format_code_t *formats) { int i; i = 0; while (formats[i] != PIXMAN_null) ++i; return formats[prng_rand_n (i)]; } static pixman_image_t * create_image (int max_size, const pixman_format_code_t *formats, uint32_t flags) { int width, height; pixman_image_t *image; pixman_format_code_t format; uint32_t *data; int bpp; int stride; int i; pixman_image_destroy_func_t destroy; if ((flags & ALLOW_SOLID) && prng_rand_n (4) == 0) { pixman_color_t color; color.alpha = prng_rand(); color.red = prng_rand(); color.green = prng_rand(); color.blue = prng_rand(); return pixman_image_create_solid_fill (&color); } width = prng_rand_n (max_size) + 1; height = prng_rand_n (max_size) + 1; format = random_format (formats); bpp = PIXMAN_FORMAT_BPP (format); stride = (width * bpp + 7) / 8 + prng_rand_n (17); stride = (stride + 3) & ~3; if (prng_rand_n (64) == 0) { if (!(data = (uint32_t *)make_random_bytes (stride * height))) { fprintf (stderr, "Out of memory\n"); abort (); } destroy = destroy_fenced; } else { data = malloc (stride * height); prng_randmemset (data, height * stride, 0); destroy = destroy_malloced; } image = pixman_image_create_bits (format, width, height, data, stride); pixman_image_set_destroy_function (image, destroy, data); if ((flags & ALLOW_CLIPPED) && prng_rand_n (8) == 0) { pixman_box16_t clip_boxes[8]; pixman_region16_t clip; int n = prng_rand_n (8) + 1; for (i = 0; i < n; i++) { clip_boxes[i].x1 = prng_rand_n (width); clip_boxes[i].y1 = prng_rand_n (height); clip_boxes[i].x2 = clip_boxes[i].x1 + prng_rand_n (width - clip_boxes[i].x1); clip_boxes[i].y2 = clip_boxes[i].y1 + prng_rand_n (height - clip_boxes[i].y1); } pixman_region_init_rects (&clip, clip_boxes, n); pixman_image_set_clip_region (image, &clip); pixman_region_fini (&clip); } if ((flags & ALLOW_SOURCE_CLIPPING) && prng_rand_n (4) == 0) { pixman_image_set_source_clipping (image, TRUE); pixman_image_set_has_client_clip (image, TRUE); } if ((flags & ALLOW_ALPHA_MAP) && prng_rand_n (16) == 0) { pixman_image_t *alpha_map; int alpha_x, alpha_y; alpha_x = prng_rand_n (width); alpha_y = prng_rand_n (height); alpha_map = create_image (max_size, formats, (flags & ~(ALLOW_ALPHA_MAP | ALLOW_SOLID))); pixman_image_set_alpha_map (image, alpha_map, alpha_x, alpha_y); pixman_image_unref (alpha_map); } if ((flags & ALLOW_REPEAT) && prng_rand_n (2) == 0) pixman_image_set_repeat (image, prng_rand_n (4)); image_endian_swap (image); return image; } #define KEY1(p) ((void *)(((uintptr_t)p) ^ (0xa7e23dfaUL))) #define KEY2(p) ((void *)(((uintptr_t)p) ^ (0xabcd9876UL))) #define MAX_GLYPHS 32 uint32_t test_glyphs (int testnum, int verbose) { pixman_image_t *glyph_images[MAX_GLYPHS]; pixman_glyph_t glyphs[4 * MAX_GLYPHS]; uint32_t crc32 = 0; pixman_image_t *source, *dest; int n_glyphs, i; pixman_glyph_cache_t *cache; prng_srand (testnum); cache = pixman_glyph_cache_create (); source = create_image (300, formats, ALLOW_CLIPPED | ALLOW_ALPHA_MAP | ALLOW_SOURCE_CLIPPING | ALLOW_REPEAT | ALLOW_SOLID); dest = create_image (128, formats, ALLOW_CLIPPED | ALLOW_ALPHA_MAP | ALLOW_SOURCE_CLIPPING); pixman_glyph_cache_freeze (cache); n_glyphs = prng_rand_n (MAX_GLYPHS); for (i = 0; i < n_glyphs; ++i) glyph_images[i] = create_image (32, glyph_formats, 0); for (i = 0; i < 4 * n_glyphs; ++i) { int g = prng_rand_n (n_glyphs); pixman_image_t *glyph_img = glyph_images[g]; void *key1 = KEY1 (glyph_img); void *key2 = KEY2 (glyph_img); const void *glyph; if (!(glyph = pixman_glyph_cache_lookup (cache, key1, key2))) { glyph = pixman_glyph_cache_insert (cache, key1, key2, 5, 8, glyph_img); } glyphs[i].glyph = glyph; glyphs[i].x = prng_rand_n (128); glyphs[i].y = prng_rand_n (128); } if (prng_rand_n (2) == 0) { int src_x = prng_rand_n (300) - 150; int src_y = prng_rand_n (300) - 150; int mask_x = prng_rand_n (64) - 32; int mask_y = prng_rand_n (64) - 32; int dest_x = prng_rand_n (64) - 32; int dest_y = prng_rand_n (64) - 32; int width = prng_rand_n (64); int height = prng_rand_n (64); pixman_op_t op = operators[prng_rand_n (ARRAY_LENGTH (operators))]; pixman_format_code_t format = random_format (glyph_formats); pixman_composite_glyphs ( op, source, dest, format, src_x, src_y, mask_x, mask_y, dest_x, dest_y, width, height, cache, 4 * n_glyphs, glyphs); } else { pixman_op_t op = operators[prng_rand_n (ARRAY_LENGTH (operators))]; int src_x = prng_rand_n (300) - 150; int src_y = prng_rand_n (300) - 150; int dest_x = prng_rand_n (64) - 32; int dest_y = prng_rand_n (64) - 32; pixman_composite_glyphs_no_mask ( op, source, dest, src_x, src_y, dest_x, dest_y, cache, 4 * n_glyphs, glyphs); } pixman_glyph_cache_thaw (cache); for (i = 0; i < n_glyphs; ++i) { pixman_image_t *img = glyph_images[i]; void *key1, *key2; key1 = KEY1 (img); key2 = KEY2 (img); pixman_glyph_cache_remove (cache, key1, key2); pixman_image_unref (glyph_images[i]); } crc32 = compute_crc32_for_image (0, dest); pixman_image_unref (source); pixman_image_unref (dest); pixman_glyph_cache_destroy (cache); return crc32; } int main (int argc, const char *argv[]) { return fuzzer_test_main ("glyph", 30000, 0xFA478A79, test_glyphs, argc, argv); } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/gradient-crash-test.c0000664000175000017500000001122714712446423020114 0ustar00mattst88mattst88#include #include #include "utils.h" int main (int argc, char **argv) { #define WIDTH 400 #define HEIGHT 200 uint32_t *dest = malloc (WIDTH * HEIGHT * 4); pixman_image_t *src_img; pixman_image_t *dest_img; int i, j, k, p; typedef struct { pixman_point_fixed_t p0; pixman_point_fixed_t p1; } point_pair_t; pixman_gradient_stop_t onestop[1] = { { pixman_int_to_fixed (1), { 0xffff, 0xeeee, 0xeeee, 0xeeee } }, }; pixman_gradient_stop_t subsetstops[2] = { { pixman_int_to_fixed (1), { 0xffff, 0xeeee, 0xeeee, 0xeeee } }, { pixman_int_to_fixed (1), { 0xffff, 0xeeee, 0xeeee, 0xeeee } }, }; pixman_gradient_stop_t stops01[2] = { { pixman_int_to_fixed (0), { 0xffff, 0xeeee, 0xeeee, 0xeeee } }, { pixman_int_to_fixed (1), { 0xffff, 0x1111, 0x1111, 0x1111 } } }; point_pair_t point_pairs [] = { { { pixman_double_to_fixed (0), 0 }, { pixman_double_to_fixed (WIDTH / 8.), pixman_int_to_fixed (0) } }, { { pixman_double_to_fixed (WIDTH / 2.0), pixman_double_to_fixed (HEIGHT / 2.0) }, { pixman_double_to_fixed (WIDTH / 2.0), pixman_double_to_fixed (HEIGHT / 2.0) } } }; pixman_transform_t transformations[] = { { { { pixman_double_to_fixed (2), pixman_double_to_fixed (0.5), pixman_double_to_fixed (-100), }, { pixman_double_to_fixed (0), pixman_double_to_fixed (3), pixman_double_to_fixed (0), }, { pixman_double_to_fixed (0), pixman_double_to_fixed (0.000), pixman_double_to_fixed (1.0) } } }, { { { pixman_double_to_fixed (1), pixman_double_to_fixed (0), pixman_double_to_fixed (0), }, { pixman_double_to_fixed (0), pixman_double_to_fixed (1), pixman_double_to_fixed (0), }, { pixman_double_to_fixed (0), pixman_double_to_fixed (0.000), pixman_double_to_fixed (1.0) } } }, { { { pixman_double_to_fixed (2), pixman_double_to_fixed (1), pixman_double_to_fixed (0), }, { pixman_double_to_fixed (1), pixman_double_to_fixed (1), pixman_double_to_fixed (0), }, { pixman_double_to_fixed (2), pixman_double_to_fixed (1.000), pixman_double_to_fixed (1.0) } } }, { { { pixman_double_to_fixed (2), pixman_double_to_fixed (1), pixman_double_to_fixed (0), }, { pixman_double_to_fixed (1), pixman_double_to_fixed (1), pixman_double_to_fixed (0), }, { pixman_double_to_fixed (0), pixman_double_to_fixed (0), pixman_double_to_fixed (0) } } }, { { { pixman_double_to_fixed (2), pixman_double_to_fixed (1), pixman_double_to_fixed (0), }, { pixman_double_to_fixed (1), pixman_double_to_fixed (1), pixman_double_to_fixed (0), }, { pixman_double_to_fixed (2), pixman_double_to_fixed (-1), pixman_double_to_fixed (0) } } }, { { { pixman_double_to_fixed (2), pixman_double_to_fixed (1), pixman_double_to_fixed (3), }, { pixman_double_to_fixed (1), pixman_double_to_fixed (1), pixman_double_to_fixed (0), }, { pixman_double_to_fixed (2), pixman_double_to_fixed (-1), pixman_double_to_fixed (0) } } }, }; pixman_fixed_t r_inner; pixman_fixed_t r_outer; enable_divbyzero_exceptions(); for (i = 0; i < WIDTH * HEIGHT; ++i) dest[i] = 0x4f00004f; /* pale blue */ dest_img = pixman_image_create_bits (PIXMAN_a8r8g8b8, WIDTH, HEIGHT, dest, WIDTH * 4); r_inner = 0; r_outer = pixman_double_to_fixed (50.0); for (i = 0; i < 3; ++i) { pixman_gradient_stop_t *stops; int num_stops; if (i == 0) { stops = onestop; num_stops = ARRAY_LENGTH (onestop); } else if (i == 1) { stops = subsetstops; num_stops = ARRAY_LENGTH (subsetstops); } else { stops = stops01; num_stops = ARRAY_LENGTH (stops01); } for (j = 0; j < 3; ++j) { for (p = 0; p < ARRAY_LENGTH (point_pairs); ++p) { point_pair_t *pair = &(point_pairs[p]); if (j == 0) src_img = pixman_image_create_conical_gradient (&(pair->p0), r_inner, stops, num_stops); else if (j == 1) src_img = pixman_image_create_radial_gradient (&(pair->p0), &(pair->p1), r_inner, r_outer, stops, num_stops); else src_img = pixman_image_create_linear_gradient (&(pair->p0), &(pair->p1), stops, num_stops); for (k = 0; k < ARRAY_LENGTH (transformations); ++k) { pixman_image_set_transform (src_img, &transformations[k]); pixman_image_set_repeat (src_img, PIXMAN_REPEAT_NONE); pixman_image_composite (PIXMAN_OP_OVER, src_img, NULL, dest_img, 0, 0, 0, 0, 0, 0, 10 * WIDTH, HEIGHT); } pixman_image_unref (src_img); } } } pixman_image_unref (dest_img); free (dest); return 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/infinite-loop.c0000664000175000017500000000204514712446423017016 0ustar00mattst88mattst88#include #include #include #include #include "utils.h" int main (int argc, char **argv) { #define SRC_WIDTH 16 #define SRC_HEIGHT 12 #define DST_WIDTH 7 #define DST_HEIGHT 2 static const pixman_transform_t transform = { { { 0x200017bd, 0x00000000, 0x000e6465 }, { 0x00000000, 0x000a42fd, 0x000e6465 }, { 0x00000000, 0x00000000, 0x00010000 }, } }; pixman_image_t *src, *dest; src = pixman_image_create_bits ( PIXMAN_a8r8g8b8, SRC_WIDTH, SRC_HEIGHT, NULL, -1); dest = pixman_image_create_bits ( PIXMAN_a8r8g8b8, DST_WIDTH, DST_HEIGHT, NULL, -1); pixman_image_set_transform (src, &transform); pixman_image_set_repeat (src, PIXMAN_REPEAT_NORMAL); pixman_image_set_filter (src, PIXMAN_FILTER_BILINEAR, NULL, 0); if (argc == 1 || strcmp (argv[1], "-nf") != 0) fail_after (1, "infinite loop detected"); pixman_image_composite ( PIXMAN_OP_OVER, src, NULL, dest, -3, -3, 0, 0, 0, 0, 6, 2); pixman_image_unref (src); pixman_image_unref (dest); return 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/lowlevel-blt-bench.c0000664000175000017500000012514614712446423017737 0ustar00mattst88mattst88/* * Copyright Âİ 2009 Nokia Corporation * Copyright Âİ 2010 Movial Creative Technologies Oy * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include #include #include #include "utils.h" #define SOLID_FLAG 1 #define CA_FLAG 2 #define L1CACHE_SIZE (8 * 1024) #define L2CACHE_SIZE (128 * 1024) /* This is applied to both L1 and L2 tests - alternatively, you could * parameterise bench_L or split it into two functions. It could be * read at runtime on some architectures, but it only really matters * that it's a number that's an integer divisor of both cacheline * lengths, and further, it only really matters for caches that don't * do allocate0on-write. */ #define CACHELINE_LENGTH (32) /* bytes */ #define WIDTH 1920 #define HEIGHT 1080 #define BUFSIZE (WIDTH * HEIGHT * 4) #define XWIDTH 256 #define XHEIGHT 256 #define TILEWIDTH 32 #define TINYWIDTH 8 #define EXCLUDE_OVERHEAD 1 uint32_t *dst; uint32_t *src; uint32_t *mask; double bandwidth = 0.0; double bench_memcpy () { int64_t n = 0, total; double t1, t2; int x = 0; t1 = gettime (); while (1) { memcpy (dst, src, BUFSIZE - 64); memcpy (src, dst, BUFSIZE - 64); n += 4 * (BUFSIZE - 64); t2 = gettime (); if (t2 - t1 > 0.5) break; } n = total = n * 5; t1 = gettime (); while (n > 0) { if (++x >= 64) x = 0; memcpy ((char *)dst + 1, (char *)src + x, BUFSIZE - 64); memcpy ((char *)src + 1, (char *)dst + x, BUFSIZE - 64); n -= 4 * (BUFSIZE - 64); } t2 = gettime (); return (double)total / (t2 - t1); } static pixman_bool_t use_scaling = FALSE; static pixman_filter_t filter = PIXMAN_FILTER_NEAREST; static pixman_bool_t use_csv_output = FALSE; /* nearly 1x scale factor */ static pixman_transform_t m = { { { pixman_fixed_1 + 1, 0, 0 }, { 0, pixman_fixed_1, 0 }, { 0, 0, pixman_fixed_1 } } }; static void pixman_image_composite_wrapper (pixman_implementation_t *impl, pixman_composite_info_t *info) { if (use_scaling) { pixman_image_set_filter (info->src_image, filter, NULL, 0); pixman_image_set_transform(info->src_image, &m); } pixman_image_composite (info->op, info->src_image, info->mask_image, info->dest_image, info->src_x, info->src_y, info->mask_x, info->mask_y, info->dest_x, info->dest_y, info->width, info->height); } static void pixman_image_composite_empty (pixman_implementation_t *impl, pixman_composite_info_t *info) { if (use_scaling) { pixman_image_set_filter (info->src_image, filter, NULL, 0); pixman_image_set_transform(info->src_image, &m); } pixman_image_composite (info->op, info->src_image, info->mask_image, info->dest_image, 0, 0, 0, 0, 0, 0, 1, 1); } static inline void call_func (pixman_composite_func_t func, pixman_op_t op, pixman_image_t * src_image, pixman_image_t * mask_image, pixman_image_t * dest_image, int32_t src_x, int32_t src_y, int32_t mask_x, int32_t mask_y, int32_t dest_x, int32_t dest_y, int32_t width, int32_t height) { pixman_composite_info_t info; info.op = op; info.src_image = src_image; info.mask_image = mask_image; info.dest_image = dest_image; info.src_x = src_x; info.src_y = src_y; info.mask_x = mask_x; info.mask_y = mask_y; info.dest_x = dest_x; info.dest_y = dest_y; info.width = width; info.height = height; func (0, &info); } double noinline bench_L (pixman_op_t op, pixman_image_t * src_img, pixman_image_t * mask_img, pixman_image_t * dst_img, int64_t n, pixman_composite_func_t func, int width, int lines_count) { int64_t i, j, k; int x = 0; int q = 0; for (i = 0; i < n; i++) { /* For caches without allocate-on-write, we need to force the * destination buffer back into the cache on each iteration, * otherwise if they are evicted during the test, they remain * uncached. This doesn't matter for tests which read the * destination buffer, or for caches that do allocate-on-write, * but in those cases this loop just adds constant time, which * should be successfully cancelled out. */ for (j = 0; j < lines_count; j++) { for (k = 0; k < width + 62; k += CACHELINE_LENGTH / sizeof *dst) { q += dst[j * WIDTH + k]; } q += dst[j * WIDTH + width + 62]; } if (++x >= 64) x = 0; call_func (func, op, src_img, mask_img, dst_img, x, 0, x, 0, 63 - x, 0, width, lines_count); } return (double)n * lines_count * width; } double noinline bench_M (pixman_op_t op, pixman_image_t * src_img, pixman_image_t * mask_img, pixman_image_t * dst_img, int64_t n, pixman_composite_func_t func) { int64_t i; int x = 0; for (i = 0; i < n; i++) { if (++x >= 64) x = 0; call_func (func, op, src_img, mask_img, dst_img, x, 0, x, 0, 1, 0, WIDTH - 64, HEIGHT); } return (double)n * (WIDTH - 64) * HEIGHT; } double noinline bench_HT (pixman_op_t op, pixman_image_t * src_img, pixman_image_t * mask_img, pixman_image_t * dst_img, int64_t n, pixman_composite_func_t func) { double pix_cnt = 0; int x = 0; int y = 0; int64_t i; srand (0); for (i = 0; i < n; i++) { int w = (rand () % (TILEWIDTH * 2)) + 1; int h = (rand () % (TILEWIDTH * 2)) + 1; if (x + w > WIDTH) { x = 0; y += TILEWIDTH * 2; } if (y + h > HEIGHT) { y = 0; } call_func (func, op, src_img, mask_img, dst_img, x, y, x, y, x, y, w, h); x += w; pix_cnt += w * h; } return pix_cnt; } double noinline bench_VT (pixman_op_t op, pixman_image_t * src_img, pixman_image_t * mask_img, pixman_image_t * dst_img, int64_t n, pixman_composite_func_t func) { double pix_cnt = 0; int x = 0; int y = 0; int64_t i; srand (0); for (i = 0; i < n; i++) { int w = (rand () % (TILEWIDTH * 2)) + 1; int h = (rand () % (TILEWIDTH * 2)) + 1; if (y + h > HEIGHT) { y = 0; x += TILEWIDTH * 2; } if (x + w > WIDTH) { x = 0; } call_func (func, op, src_img, mask_img, dst_img, x, y, x, y, x, y, w, h); y += h; pix_cnt += w * h; } return pix_cnt; } double noinline bench_R (pixman_op_t op, pixman_image_t * src_img, pixman_image_t * mask_img, pixman_image_t * dst_img, int64_t n, pixman_composite_func_t func, int maxw, int maxh) { double pix_cnt = 0; int64_t i; if (maxw <= TILEWIDTH * 2 || maxh <= TILEWIDTH * 2) { printf("error: maxw <= TILEWIDTH * 2 || maxh <= TILEWIDTH * 2\n"); return 0; } srand (0); for (i = 0; i < n; i++) { int w = (rand () % (TILEWIDTH * 2)) + 1; int h = (rand () % (TILEWIDTH * 2)) + 1; int sx = rand () % (maxw - TILEWIDTH * 2); int sy = rand () % (maxh - TILEWIDTH * 2); int dx = rand () % (maxw - TILEWIDTH * 2); int dy = rand () % (maxh - TILEWIDTH * 2); call_func (func, op, src_img, mask_img, dst_img, sx, sy, sx, sy, dx, dy, w, h); pix_cnt += w * h; } return pix_cnt; } double noinline bench_RT (pixman_op_t op, pixman_image_t * src_img, pixman_image_t * mask_img, pixman_image_t * dst_img, int64_t n, pixman_composite_func_t func, int maxw, int maxh) { double pix_cnt = 0; int64_t i; if (maxw <= TINYWIDTH * 2 || maxh <= TINYWIDTH * 2) { printf("error: maxw <= TINYWIDTH * 2 || maxh <= TINYWIDTH * 2\n"); return 0; } srand (0); for (i = 0; i < n; i++) { int w = (rand () % (TINYWIDTH * 2)) + 1; int h = (rand () % (TINYWIDTH * 2)) + 1; int sx = rand () % (maxw - TINYWIDTH * 2); int sy = rand () % (maxh - TINYWIDTH * 2); int dx = rand () % (maxw - TINYWIDTH * 2); int dy = rand () % (maxh - TINYWIDTH * 2); call_func (func, op, src_img, mask_img, dst_img, sx, sy, sx, sy, dx, dy, w, h); pix_cnt += w * h; } return pix_cnt; } static double Mpx_per_sec (double pix_cnt, double t1, double t2, double t3) { double overhead = t2 - t1; double testtime = t3 - t2; return pix_cnt / (testtime - overhead) / 1e6; } void bench_composite (const char *testname, int src_fmt, int src_flags, int op, int mask_fmt, int mask_flags, int dst_fmt, double npix) { pixman_image_t * src_img; pixman_image_t * dst_img; pixman_image_t * mask_img; pixman_image_t * xsrc_img; pixman_image_t * xdst_img; pixman_image_t * xmask_img; double t1, t2, t3, pix_cnt; int64_t n, l1test_width, nlines; double bytes_per_pix = 0; pixman_bool_t bench_pixbuf = FALSE; pixman_composite_func_t func = pixman_image_composite_wrapper; if (!(src_flags & SOLID_FLAG)) { bytes_per_pix += (src_fmt >> 24) / 8.0; src_img = pixman_image_create_bits (src_fmt, WIDTH, HEIGHT, src, WIDTH * 4); xsrc_img = pixman_image_create_bits (src_fmt, XWIDTH, XHEIGHT, src, XWIDTH * 4); } else { src_img = pixman_image_create_bits (src_fmt, 1, 1, src, 4); xsrc_img = pixman_image_create_bits (src_fmt, 1, 1, src, 4); pixman_image_set_repeat (src_img, PIXMAN_REPEAT_NORMAL); pixman_image_set_repeat (xsrc_img, PIXMAN_REPEAT_NORMAL); } bytes_per_pix += (dst_fmt >> 24) / 8.0; dst_img = pixman_image_create_bits (dst_fmt, WIDTH, HEIGHT, dst, WIDTH * 4); mask_img = NULL; xmask_img = NULL; if (strcmp (testname, "pixbuf") == 0 || strcmp (testname, "rpixbuf") == 0) { bench_pixbuf = TRUE; } if (!(mask_flags & SOLID_FLAG) && mask_fmt != PIXMAN_null) { bytes_per_pix += (mask_fmt >> 24) / ((op == PIXMAN_OP_SRC) ? 8.0 : 4.0); mask_img = pixman_image_create_bits (mask_fmt, WIDTH, HEIGHT, bench_pixbuf ? src : mask, WIDTH * 4); xmask_img = pixman_image_create_bits (mask_fmt, XWIDTH, XHEIGHT, bench_pixbuf ? src : mask, XWIDTH * 4); } else if (mask_fmt != PIXMAN_null) { mask_img = pixman_image_create_bits (mask_fmt, 1, 1, mask, 4); xmask_img = pixman_image_create_bits (mask_fmt, 1, 1, mask, 4 * 4); pixman_image_set_repeat (mask_img, PIXMAN_REPEAT_NORMAL); pixman_image_set_repeat (xmask_img, PIXMAN_REPEAT_NORMAL); } if ((mask_flags & CA_FLAG) && mask_fmt != PIXMAN_null) { pixman_image_set_component_alpha (mask_img, 1); } xdst_img = pixman_image_create_bits (dst_fmt, XWIDTH, XHEIGHT, dst, XWIDTH * 4); if (!use_csv_output) printf ("%24s %c", testname, func != pixman_image_composite_wrapper ? '-' : '='); memcpy (dst, src, BUFSIZE); memcpy (src, dst, BUFSIZE); l1test_width = L1CACHE_SIZE / 8 - 64; if (l1test_width < 1) l1test_width = 1; if (l1test_width > WIDTH - 64) l1test_width = WIDTH - 64; n = 1 + npix / (l1test_width * 8); t1 = gettime (); #if EXCLUDE_OVERHEAD pix_cnt = bench_L (op, src_img, mask_img, dst_img, n, pixman_image_composite_empty, l1test_width, 1); #endif t2 = gettime (); pix_cnt = bench_L (op, src_img, mask_img, dst_img, n, func, l1test_width, 1); t3 = gettime (); if (use_csv_output) printf ("%g,", Mpx_per_sec (pix_cnt, t1, t2, t3)); else printf (" L1:%7.2f", Mpx_per_sec (pix_cnt, t1, t2, t3)); fflush (stdout); memcpy (dst, src, BUFSIZE); memcpy (src, dst, BUFSIZE); nlines = (L2CACHE_SIZE / l1test_width) / ((PIXMAN_FORMAT_BPP(src_fmt) + PIXMAN_FORMAT_BPP(dst_fmt)) / 8); if (nlines < 1) nlines = 1; n = 1 + npix / (l1test_width * nlines); t1 = gettime (); #if EXCLUDE_OVERHEAD pix_cnt = bench_L (op, src_img, mask_img, dst_img, n, pixman_image_composite_empty, l1test_width, nlines); #endif t2 = gettime (); pix_cnt = bench_L (op, src_img, mask_img, dst_img, n, func, l1test_width, nlines); t3 = gettime (); if (use_csv_output) printf ("%g,", Mpx_per_sec (pix_cnt, t1, t2, t3)); else printf (" L2:%7.2f", Mpx_per_sec (pix_cnt, t1, t2, t3)); fflush (stdout); memcpy (dst, src, BUFSIZE); memcpy (src, dst, BUFSIZE); n = 1 + npix / (WIDTH * HEIGHT); t1 = gettime (); #if EXCLUDE_OVERHEAD pix_cnt = bench_M (op, src_img, mask_img, dst_img, n, pixman_image_composite_empty); #endif t2 = gettime (); pix_cnt = bench_M (op, src_img, mask_img, dst_img, n, func); t3 = gettime (); if (use_csv_output) printf ("%g,", Mpx_per_sec (pix_cnt, t1, t2, t3)); else printf (" M:%6.2f (%6.2f%%)", Mpx_per_sec (pix_cnt, t1, t2, t3), (pix_cnt / ((t3 - t2) - (t2 - t1)) * bytes_per_pix) * (100.0 / bandwidth) ); fflush (stdout); memcpy (dst, src, BUFSIZE); memcpy (src, dst, BUFSIZE); n = 1 + npix / (8 * TILEWIDTH * TILEWIDTH); t1 = gettime (); #if EXCLUDE_OVERHEAD pix_cnt = bench_HT (op, src_img, mask_img, dst_img, n, pixman_image_composite_empty); #endif t2 = gettime (); pix_cnt = bench_HT (op, src_img, mask_img, dst_img, n, func); t3 = gettime (); if (use_csv_output) printf ("%g,", Mpx_per_sec (pix_cnt, t1, t2, t3)); else printf (" HT:%6.2f", Mpx_per_sec (pix_cnt, t1, t2, t3)); fflush (stdout); memcpy (dst, src, BUFSIZE); memcpy (src, dst, BUFSIZE); n = 1 + npix / (8 * TILEWIDTH * TILEWIDTH); t1 = gettime (); #if EXCLUDE_OVERHEAD pix_cnt = bench_VT (op, src_img, mask_img, dst_img, n, pixman_image_composite_empty); #endif t2 = gettime (); pix_cnt = bench_VT (op, src_img, mask_img, dst_img, n, func); t3 = gettime (); if (use_csv_output) printf ("%g,", Mpx_per_sec (pix_cnt, t1, t2, t3)); else printf (" VT:%6.2f", Mpx_per_sec (pix_cnt, t1, t2, t3)); fflush (stdout); memcpy (dst, src, BUFSIZE); memcpy (src, dst, BUFSIZE); n = 1 + npix / (8 * TILEWIDTH * TILEWIDTH); t1 = gettime (); #if EXCLUDE_OVERHEAD pix_cnt = bench_R (op, src_img, mask_img, dst_img, n, pixman_image_composite_empty, WIDTH, HEIGHT); #endif t2 = gettime (); pix_cnt = bench_R (op, src_img, mask_img, dst_img, n, func, WIDTH, HEIGHT); t3 = gettime (); if (use_csv_output) printf ("%g,", Mpx_per_sec (pix_cnt, t1, t2, t3)); else printf (" R:%6.2f", Mpx_per_sec (pix_cnt, t1, t2, t3)); fflush (stdout); memcpy (dst, src, BUFSIZE); memcpy (src, dst, BUFSIZE); n = 1 + npix / (16 * TINYWIDTH * TINYWIDTH); t1 = gettime (); #if EXCLUDE_OVERHEAD pix_cnt = bench_RT (op, src_img, mask_img, dst_img, n, pixman_image_composite_empty, WIDTH, HEIGHT); #endif t2 = gettime (); pix_cnt = bench_RT (op, src_img, mask_img, dst_img, n, func, WIDTH, HEIGHT); t3 = gettime (); if (use_csv_output) printf ("%g\n", Mpx_per_sec (pix_cnt, t1, t2, t3)); else printf (" RT:%6.2f (%4.0fKops/s)\n", Mpx_per_sec (pix_cnt, t1, t2, t3), (double) n / ((t3 - t2) * 1000)); if (mask_img) { pixman_image_unref (mask_img); pixman_image_unref (xmask_img); } pixman_image_unref (src_img); pixman_image_unref (dst_img); pixman_image_unref (xsrc_img); pixman_image_unref (xdst_img); } #define PIXMAN_OP_OUT_REV (PIXMAN_OP_OUT_REVERSE) struct test_entry { const char *testname; int src_fmt; int src_flags; int op; int mask_fmt; int mask_flags; int dst_fmt; }; typedef struct test_entry test_entry_t; static const test_entry_t tests_tbl[] = { { "add_8_8_8", PIXMAN_a8, 0, PIXMAN_OP_ADD, PIXMAN_a8, 0, PIXMAN_a8 }, { "add_n_8_8", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_ADD, PIXMAN_a8, 0, PIXMAN_a8 }, { "add_n_8_8888", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_ADD, PIXMAN_a8, 0, PIXMAN_a8r8g8b8 }, { "add_n_8_x888", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_ADD, PIXMAN_a8, 0, PIXMAN_x8r8g8b8 }, { "add_n_8_0565", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_ADD, PIXMAN_a8, 0, PIXMAN_r5g6b5 }, { "add_n_8_1555", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_ADD, PIXMAN_a8, 0, PIXMAN_a1r5g5b5 }, { "add_n_8_4444", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_ADD, PIXMAN_a8, 0, PIXMAN_a4r4g4b4 }, { "add_n_8_2222", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_ADD, PIXMAN_a8, 0, PIXMAN_a2r2g2b2 }, { "add_n_8_2x10", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_ADD, PIXMAN_a8, 0, PIXMAN_x2r10g10b10 }, { "add_n_8_2a10", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_ADD, PIXMAN_a8, 0, PIXMAN_a2r10g10b10 }, { "add_n_8", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_ADD, PIXMAN_null, 0, PIXMAN_a8 }, { "add_n_8888", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_ADD, PIXMAN_null, 0, PIXMAN_a8r8g8b8 }, { "add_n_x888", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_ADD, PIXMAN_null, 0, PIXMAN_x8r8g8b8 }, { "add_n_0565", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_ADD, PIXMAN_null, 0, PIXMAN_r5g6b5 }, { "add_n_1555", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_ADD, PIXMAN_null, 0, PIXMAN_a1r5g5b5 }, { "add_n_4444", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_ADD, PIXMAN_null, 0, PIXMAN_a4r4g4b4 }, { "add_n_2222", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_ADD, PIXMAN_null, 0, PIXMAN_a2r2g2b2 }, { "add_n_2x10", PIXMAN_a2r10g10b10, 1, PIXMAN_OP_ADD, PIXMAN_null, 0, PIXMAN_x2r10g10b10 }, { "add_n_2a10", PIXMAN_a2r10g10b10, 1, PIXMAN_OP_ADD, PIXMAN_null, 0, PIXMAN_a2r10g10b10 }, { "add_8_8", PIXMAN_a8, 0, PIXMAN_OP_ADD, PIXMAN_null, 0, PIXMAN_a8 }, { "add_x888_x888", PIXMAN_x8r8g8b8, 0, PIXMAN_OP_ADD, PIXMAN_null, 0, PIXMAN_x8r8g8b8 }, { "add_8888_8888", PIXMAN_a8r8g8b8, 0, PIXMAN_OP_ADD, PIXMAN_null, 0, PIXMAN_a8r8g8b8 }, { "add_8888_0565", PIXMAN_a8r8g8b8, 0, PIXMAN_OP_ADD, PIXMAN_null, 0, PIXMAN_r5g6b5 }, { "add_8888_1555", PIXMAN_a8r8g8b8, 0, PIXMAN_OP_ADD, PIXMAN_null, 0, PIXMAN_a1r5g5b5 }, { "add_8888_4444", PIXMAN_a8r8g8b8, 0, PIXMAN_OP_ADD, PIXMAN_null, 0, PIXMAN_a4r4g4b4 }, { "add_8888_2222", PIXMAN_a8r8g8b8, 0, PIXMAN_OP_ADD, PIXMAN_null, 0, PIXMAN_a2r2g2b2 }, { "add_0565_0565", PIXMAN_r5g6b5, 0, PIXMAN_OP_ADD, PIXMAN_null, 0, PIXMAN_r5g6b5 }, { "add_1555_1555", PIXMAN_a1r5g5b5, 0, PIXMAN_OP_ADD, PIXMAN_null, 0, PIXMAN_a1r5g5b5 }, { "add_0565_2x10", PIXMAN_r5g6b5, 0, PIXMAN_OP_ADD, PIXMAN_null, 0, PIXMAN_x2r10g10b10 }, { "add_2a10_2a10", PIXMAN_a2r10g10b10, 0, PIXMAN_OP_ADD, PIXMAN_null, 0, PIXMAN_a2r10g10b10 }, { "in_n_8_8", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_IN, PIXMAN_a8, 0, PIXMAN_a8 }, { "in_8_8", PIXMAN_a8, 0, PIXMAN_OP_IN, PIXMAN_null, 0, PIXMAN_a8 }, { "src_n_2222", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_SRC, PIXMAN_null, 0, PIXMAN_a2r2g2b2 }, { "src_n_0565", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_SRC, PIXMAN_null, 0, PIXMAN_r5g6b5 }, { "src_n_1555", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_SRC, PIXMAN_null, 0, PIXMAN_a1r5g5b5 }, { "src_n_4444", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_SRC, PIXMAN_null, 0, PIXMAN_a4r4g4b4 }, { "src_n_x888", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_SRC, PIXMAN_null, 0, PIXMAN_x8r8g8b8 }, { "src_n_8888", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_SRC, PIXMAN_null, 0, PIXMAN_a8r8g8b8 }, { "src_n_2x10", PIXMAN_a2r10g10b10, 1, PIXMAN_OP_SRC, PIXMAN_null, 0, PIXMAN_x2r10g10b10 }, { "src_n_2a10", PIXMAN_a2r10g10b10, 1, PIXMAN_OP_SRC, PIXMAN_null, 0, PIXMAN_a2r10g10b10 }, { "src_8888_0565", PIXMAN_a8r8g8b8, 0, PIXMAN_OP_SRC, PIXMAN_null, 0, PIXMAN_r5g6b5 }, { "src_0565_8888", PIXMAN_r5g6b5, 0, PIXMAN_OP_SRC, PIXMAN_null, 0, PIXMAN_a8r8g8b8 }, { "src_8888_4444", PIXMAN_a8r8g8b8, 0, PIXMAN_OP_SRC, PIXMAN_null, 0, PIXMAN_a4r4g4b4 }, { "src_8888_2222", PIXMAN_a8r8g8b8, 0, PIXMAN_OP_SRC, PIXMAN_null, 0, PIXMAN_a2r2g2b2 }, { "src_8888_2x10", PIXMAN_a8r8g8b8, 0, PIXMAN_OP_SRC, PIXMAN_null, 0, PIXMAN_x2r10g10b10 }, { "src_8888_2a10", PIXMAN_a8r8g8b8, 0, PIXMAN_OP_SRC, PIXMAN_null, 0, PIXMAN_a2r10g10b10 }, { "src_0888_0565", PIXMAN_r8g8b8, 0, PIXMAN_OP_SRC, PIXMAN_null, 0, PIXMAN_r5g6b5 }, { "src_0888_8888", PIXMAN_r8g8b8, 0, PIXMAN_OP_SRC, PIXMAN_null, 0, PIXMAN_a8r8g8b8 }, { "src_0888_x888", PIXMAN_r8g8b8, 0, PIXMAN_OP_SRC, PIXMAN_null, 0, PIXMAN_x8r8g8b8 }, { "src_0888_8888_rev", PIXMAN_b8g8r8, 0, PIXMAN_OP_SRC, PIXMAN_null, 0, PIXMAN_x8r8g8b8 }, { "src_0888_0565_rev", PIXMAN_b8g8r8, 0, PIXMAN_OP_SRC, PIXMAN_null, 0, PIXMAN_r5g6b5 }, { "src_x888_x888", PIXMAN_x8r8g8b8, 0, PIXMAN_OP_SRC, PIXMAN_null, 0, PIXMAN_x8r8g8b8 }, { "src_x888_8888", PIXMAN_x8r8g8b8, 0, PIXMAN_OP_SRC, PIXMAN_null, 0, PIXMAN_a8r8g8b8 }, { "src_8888_8888", PIXMAN_a8r8g8b8, 0, PIXMAN_OP_SRC, PIXMAN_null, 0, PIXMAN_a8r8g8b8 }, { "src_0565_0565", PIXMAN_r5g6b5, 0, PIXMAN_OP_SRC, PIXMAN_null, 0, PIXMAN_r5g6b5 }, { "src_1555_0565", PIXMAN_a1r5g5b5, 0, PIXMAN_OP_SRC, PIXMAN_null, 0, PIXMAN_r5g6b5 }, { "src_0565_1555", PIXMAN_r5g6b5, 0, PIXMAN_OP_SRC, PIXMAN_null, 0, PIXMAN_a1r5g5b5 }, { "src_8_8", PIXMAN_a8, 0, PIXMAN_OP_SRC, PIXMAN_null, 0, PIXMAN_a8 }, { "src_n_8", PIXMAN_a8, 1, PIXMAN_OP_SRC, PIXMAN_null, 0, PIXMAN_a8 }, { "src_n_8_0565", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_SRC, PIXMAN_a8, 0, PIXMAN_r5g6b5 }, { "src_n_8_1555", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_SRC, PIXMAN_a8, 0, PIXMAN_a1r5g5b5 }, { "src_n_8_4444", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_SRC, PIXMAN_a8, 0, PIXMAN_a4r4g4b4 }, { "src_n_8_2222", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_SRC, PIXMAN_a8, 0, PIXMAN_a2r2g2b2 }, { "src_n_8_x888", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_SRC, PIXMAN_a8, 0, PIXMAN_x8r8g8b8 }, { "src_n_8_8888", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_SRC, PIXMAN_a8, 0, PIXMAN_a8r8g8b8 }, { "src_n_8_2x10", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_SRC, PIXMAN_a8, 0, PIXMAN_x2r10g10b10 }, { "src_n_8_2a10", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_SRC, PIXMAN_a8, 0, PIXMAN_a2r10g10b10 }, { "src_8888_8_0565", PIXMAN_a8r8g8b8, 0, PIXMAN_OP_SRC, PIXMAN_a8, 0, PIXMAN_r5g6b5 }, { "src_0888_8_0565", PIXMAN_r8g8b8, 0, PIXMAN_OP_SRC, PIXMAN_a8, 0, PIXMAN_r5g6b5 }, { "src_0888_8_8888", PIXMAN_r8g8b8, 0, PIXMAN_OP_SRC, PIXMAN_a8, 0, PIXMAN_a8r8g8b8 }, { "src_0888_8_x888", PIXMAN_r8g8b8, 0, PIXMAN_OP_SRC, PIXMAN_a8, 0, PIXMAN_x8r8g8b8 }, { "src_x888_8_x888", PIXMAN_x8r8g8b8, 0, PIXMAN_OP_SRC, PIXMAN_a8, 0, PIXMAN_x8r8g8b8 }, { "src_x888_8_8888", PIXMAN_x8r8g8b8, 0, PIXMAN_OP_SRC, PIXMAN_a8, 0, PIXMAN_a8r8g8b8 }, { "src_0565_8_0565", PIXMAN_r5g6b5, 0, PIXMAN_OP_SRC, PIXMAN_a8, 0, PIXMAN_r5g6b5 }, { "src_1555_8_0565", PIXMAN_a1r5g5b5, 0, PIXMAN_OP_SRC, PIXMAN_a8, 0, PIXMAN_r5g6b5 }, { "src_0565_8_1555", PIXMAN_r5g6b5, 0, PIXMAN_OP_SRC, PIXMAN_a8, 0, PIXMAN_a1r5g5b5 }, { "over_n_x888", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_OVER, PIXMAN_null, 0, PIXMAN_x8r8g8b8 }, { "over_n_8888", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_OVER, PIXMAN_null, 0, PIXMAN_a8r8g8b8 }, { "over_n_0565", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_OVER, PIXMAN_null, 0, PIXMAN_r5g6b5 }, { "over_n_1555", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_OVER, PIXMAN_null, 0, PIXMAN_a1r5g5b5 }, { "over_8888_0565", PIXMAN_a8r8g8b8, 0, PIXMAN_OP_OVER, PIXMAN_null, 0, PIXMAN_r5g6b5 }, { "over_8888_8888", PIXMAN_a8r8g8b8, 0, PIXMAN_OP_OVER, PIXMAN_null, 0, PIXMAN_a8r8g8b8 }, { "over_8888_x888", PIXMAN_a8r8g8b8, 0, PIXMAN_OP_OVER, PIXMAN_null, 0, PIXMAN_x8r8g8b8 }, { "over_x888_8_0565", PIXMAN_x8r8g8b8, 0, PIXMAN_OP_OVER, PIXMAN_a8, 0, PIXMAN_r5g6b5 }, { "over_x888_8_8888", PIXMAN_x8r8g8b8, 0, PIXMAN_OP_OVER, PIXMAN_a8, 0, PIXMAN_a8r8g8b8 }, { "over_n_8_0565", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_OVER, PIXMAN_a8, 0, PIXMAN_r5g6b5 }, { "over_n_8_1555", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_OVER, PIXMAN_a8, 0, PIXMAN_a1r5g5b5 }, { "over_n_8_4444", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_OVER, PIXMAN_a8, 0, PIXMAN_a4r4g4b4 }, { "over_n_8_2222", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_OVER, PIXMAN_a8, 0, PIXMAN_a2r2g2b2 }, { "over_n_8_x888", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_OVER, PIXMAN_a8, 0, PIXMAN_x8r8g8b8 }, { "over_n_8_8888", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_OVER, PIXMAN_a8, 0, PIXMAN_a8r8g8b8 }, { "over_n_8_2x10", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_OVER, PIXMAN_a8, 0, PIXMAN_x2r10g10b10 }, { "over_n_8_2a10", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_OVER, PIXMAN_a8, 0, PIXMAN_a2r10g10b10 }, { "over_n_8888_8888_ca", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, 2, PIXMAN_a8r8g8b8 }, { "over_n_8888_x888_ca", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, 2, PIXMAN_x8r8g8b8 }, { "over_n_8888_0565_ca", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, 2, PIXMAN_r5g6b5 }, { "over_n_8888_1555_ca", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, 2, PIXMAN_a1r5g5b5 }, { "over_n_8888_4444_ca", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, 2, PIXMAN_a4r4g4b4 }, { "over_n_8888_2222_ca", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, 2, PIXMAN_a2r2g2b2 }, { "over_n_8888_2x10_ca", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, 2, PIXMAN_x2r10g10b10 }, { "over_n_8888_2a10_ca", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, 2, PIXMAN_a2r10g10b10 }, { "over_8888_n_8888", PIXMAN_a8r8g8b8, 0, PIXMAN_OP_OVER, PIXMAN_a8, 1, PIXMAN_a8r8g8b8 }, { "over_8888_n_x888", PIXMAN_a8r8g8b8, 0, PIXMAN_OP_OVER, PIXMAN_a8, 1, PIXMAN_x8r8g8b8 }, { "over_8888_n_0565", PIXMAN_a8r8g8b8, 0, PIXMAN_OP_OVER, PIXMAN_a8, 1, PIXMAN_r5g6b5 }, { "over_8888_n_1555", PIXMAN_a8r8g8b8, 0, PIXMAN_OP_OVER, PIXMAN_a8, 1, PIXMAN_a1r5g5b5 }, { "over_x888_n_8888", PIXMAN_x8r8g8b8, 0, PIXMAN_OP_OVER, PIXMAN_a8, 1, PIXMAN_a8r8g8b8 }, { "outrev_n_8_0565", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_OUT_REV, PIXMAN_a8, 0, PIXMAN_r5g6b5 }, { "outrev_n_8_1555", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_OUT_REV, PIXMAN_a8, 0, PIXMAN_a1r5g5b5 }, { "outrev_n_8_x888", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_OUT_REV, PIXMAN_a8, 0, PIXMAN_x8r8g8b8 }, { "outrev_n_8_8888", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_OUT_REV, PIXMAN_a8, 0, PIXMAN_a8r8g8b8 }, { "outrev_n_8888_0565_ca", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_OUT_REV, PIXMAN_a8r8g8b8, 2, PIXMAN_r5g6b5 }, { "outrev_n_8888_1555_ca", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_OUT_REV, PIXMAN_a8r8g8b8, 2, PIXMAN_a1r5g5b5 }, { "outrev_n_8888_x888_ca", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_OUT_REV, PIXMAN_a8r8g8b8, 2, PIXMAN_x8r8g8b8 }, { "outrev_n_8888_8888_ca", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_OUT_REV, PIXMAN_a8r8g8b8, 2, PIXMAN_a8r8g8b8 }, { "over_reverse_n_8888", PIXMAN_a8r8g8b8, 1, PIXMAN_OP_OVER_REVERSE, PIXMAN_null, 0, PIXMAN_a8r8g8b8 }, { "in_reverse_8888_8888", PIXMAN_a8r8g8b8, 0, PIXMAN_OP_IN_REVERSE, PIXMAN_null, 0, PIXMAN_a8r8g8b8 }, { "pixbuf", PIXMAN_x8b8g8r8, 0, PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, 0, PIXMAN_a8r8g8b8 }, { "rpixbuf", PIXMAN_x8b8g8r8, 0, PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, 0, PIXMAN_a8b8g8r8 }, }; static const test_entry_t special_patterns[] = { { "add_n_2x10", PIXMAN_a2r10g10b10, 1, PIXMAN_OP_ADD, PIXMAN_null, 0, PIXMAN_x2r10g10b10 }, { "add_n_2a10", PIXMAN_a2r10g10b10, 1, PIXMAN_OP_ADD, PIXMAN_null, 0, PIXMAN_a2r10g10b10 }, { "src_n_2x10", PIXMAN_a2r10g10b10, 1, PIXMAN_OP_SRC, PIXMAN_null, 0, PIXMAN_x2r10g10b10 }, { "src_n_2a10", PIXMAN_a2r10g10b10, 1, PIXMAN_OP_SRC, PIXMAN_null, 0, PIXMAN_a2r10g10b10 }, { "src_0888_8888_rev", PIXMAN_b8g8r8, 0, PIXMAN_OP_SRC, PIXMAN_null, 0, PIXMAN_x8r8g8b8 }, { "src_0888_0565_rev", PIXMAN_b8g8r8, 0, PIXMAN_OP_SRC, PIXMAN_null, 0, PIXMAN_r5g6b5 }, { "src_n_8", PIXMAN_a8, 1, PIXMAN_OP_SRC, PIXMAN_null, 0, PIXMAN_a8 }, { "pixbuf", PIXMAN_x8b8g8r8, 0, PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, 0, PIXMAN_a8r8g8b8 }, { "rpixbuf", PIXMAN_x8b8g8r8, 0, PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, 0, PIXMAN_a8b8g8r8 }, }; /* Returns the sub-string's end pointer in string. */ static const char * copy_sub_string (char *buf, const char *string, const char *scan_from, const char *end) { const char *delim; size_t n; delim = strchr (scan_from, '_'); if (!delim) delim = end; n = delim - string; strncpy(buf, string, n); buf[n] = '\0'; return delim; } static pixman_op_t parse_longest_operator (char *buf, const char **strp, const char *end) { const char *p = *strp; const char *sub_end; const char *best_end = p; pixman_op_t best_op = PIXMAN_OP_NONE; pixman_op_t op; while (p < end) { sub_end = copy_sub_string (buf, *strp, p, end); op = operator_from_string (buf); p = sub_end + 1; if (op != PIXMAN_OP_NONE) { best_end = p; best_op = op; } } *strp = best_end; return best_op; } static pixman_format_code_t parse_format (char *buf, const char **p, const char *end) { pixman_format_code_t format; const char *delim; if (*p >= end) return PIXMAN_null; delim = copy_sub_string (buf, *p, *p, end); format = format_from_string (buf); if (format != PIXMAN_null) *p = delim + 1; return format; } static int parse_test_pattern (test_entry_t *test, const char *pattern) { const char *p = pattern; const char *end = pattern + strlen (pattern); char buf[1024]; pixman_format_code_t format[3]; int i; if (strlen (pattern) > sizeof (buf) - 1) return -1; /* Special cases that the parser cannot produce. */ for (i = 0; i < ARRAY_LENGTH (special_patterns); i++) { if (strcmp (pattern, special_patterns[i].testname) == 0) { *test = special_patterns[i]; return 0; } } test->testname = pattern; /* Extract operator, may contain delimiters, * so take the longest string that matches. */ test->op = parse_longest_operator (buf, &p, end); if (test->op == PIXMAN_OP_NONE) return -1; /* extract up to three pixel formats */ format[0] = parse_format (buf, &p, end); format[1] = parse_format (buf, &p, end); format[2] = parse_format (buf, &p, end); if (format[0] == PIXMAN_null || format[1] == PIXMAN_null) return -1; /* recognize CA flag */ test->mask_flags = 0; if (p < end) { if (strcmp (p, "ca") == 0) test->mask_flags |= CA_FLAG; else return -1; /* trailing garbage */ } test->src_fmt = format[0]; if (format[2] == PIXMAN_null) { test->mask_fmt = PIXMAN_null; test->dst_fmt = format[1]; } else { test->mask_fmt = format[1]; test->dst_fmt = format[2]; } test->src_flags = 0; if (test->src_fmt == PIXMAN_solid) { test->src_fmt = PIXMAN_a8r8g8b8; test->src_flags |= SOLID_FLAG; } if (test->mask_fmt == PIXMAN_solid) { if (test->mask_flags & CA_FLAG) test->mask_fmt = PIXMAN_a8r8g8b8; else test->mask_fmt = PIXMAN_a8; test->mask_flags |= SOLID_FLAG; } return 0; } static int check_int (int got, int expected, const char *name, const char *field) { if (got == expected) return 0; printf ("%s: %s failure: expected %d, got %d.\n", name, field, expected, got); return 1; } static int check_format (int got, int expected, const char *name, const char *field) { if (got == expected) return 0; printf ("%s: %s failure: expected %s (%#x), got %s (%#x).\n", name, field, format_name (expected), expected, format_name (got), got); return 1; } static void parser_self_test (void) { const test_entry_t *ent; test_entry_t test; int fails = 0; int i; for (i = 0; i < ARRAY_LENGTH (tests_tbl); i++) { ent = &tests_tbl[i]; if (parse_test_pattern (&test, ent->testname) < 0) { printf ("parsing failed for '%s'\n", ent->testname); fails++; continue; } fails += check_format (test.src_fmt, ent->src_fmt, ent->testname, "src_fmt"); fails += check_format (test.mask_fmt, ent->mask_fmt, ent->testname, "mask_fmt"); fails += check_format (test.dst_fmt, ent->dst_fmt, ent->testname, "dst_fmt"); fails += check_int (test.src_flags, ent->src_flags, ent->testname, "src_flags"); fails += check_int (test.mask_flags, ent->mask_flags, ent->testname, "mask_flags"); fails += check_int (test.op, ent->op, ent->testname, "op"); } if (fails) { printf ("Parser self-test failed.\n"); exit (EXIT_FAILURE); } if (!use_csv_output) printf ("Parser self-test complete.\n"); } static void print_test_details (const test_entry_t *test) { printf ("%s: %s, src %s%s, mask %s%s%s, dst %s\n", test->testname, operator_name (test->op), format_name (test->src_fmt), test->src_flags & SOLID_FLAG ? " solid" : "", format_name (test->mask_fmt), test->mask_flags & SOLID_FLAG ? " solid" : "", test->mask_flags & CA_FLAG ? " CA" : "", format_name (test->dst_fmt)); } static void run_one_test (const char *pattern, double bandwidth_, pixman_bool_t prdetails) { test_entry_t test; if (parse_test_pattern (&test, pattern) < 0) { printf ("Error: Could not parse the test pattern '%s'.\n", pattern); return; } if (prdetails) { print_test_details (&test); printf ("---\n"); } bench_composite (pattern, test.src_fmt, test.src_flags, test.op, test.mask_fmt, test.mask_flags, test.dst_fmt, bandwidth_ / 8); } static void run_default_tests (double bandwidth_) { int i; for (i = 0; i < ARRAY_LENGTH (tests_tbl); i++) run_one_test (tests_tbl[i].testname, bandwidth_, FALSE); } static void print_explanation (void) { printf ("Benchmark for a set of most commonly used functions\n"); printf ("---\n"); printf ("All results are presented in millions of pixels per second\n"); printf ("L1 - small Xx1 rectangle (fitting L1 cache), always blitted at the same\n"); printf (" memory location with small drift in horizontal direction\n"); printf ("L2 - small XxY rectangle (fitting L2 cache), always blitted at the same\n"); printf (" memory location with small drift in horizontal direction\n"); printf ("M - large %dx%d rectangle, always blitted at the same\n", WIDTH - 64, HEIGHT); printf (" memory location with small drift in horizontal direction\n"); printf ("HT - random rectangles with %dx%d average size are copied from\n", TILEWIDTH, TILEWIDTH); printf (" one %dx%d buffer to another, traversing from left to right\n", WIDTH, HEIGHT); printf (" and from top to bottom\n"); printf ("VT - random rectangles with %dx%d average size are copied from\n", TILEWIDTH, TILEWIDTH); printf (" one %dx%d buffer to another, traversing from top to bottom\n", WIDTH, HEIGHT); printf (" and from left to right\n"); printf ("R - random rectangles with %dx%d average size are copied from\n", TILEWIDTH, TILEWIDTH); printf (" random locations of one %dx%d buffer to another\n", WIDTH, HEIGHT); printf ("RT - as R, but %dx%d average sized rectangles are copied\n", TINYWIDTH, TINYWIDTH); printf ("---\n"); } static void print_speed_scaling (double bw) { printf ("reference memcpy speed = %.1fMB/s (%.1fMP/s for 32bpp fills)\n", bw / 1000000., bw / 4000000); if (use_scaling) { printf ("---\n"); if (filter == PIXMAN_FILTER_BILINEAR) printf ("BILINEAR scaling\n"); else if (filter == PIXMAN_FILTER_NEAREST) printf ("NEAREST scaling\n"); else printf ("UNKNOWN scaling\n"); } printf ("---\n"); } static void usage (const char *progname) { printf ("Usage: %s [-b] [-n] [-c] [-m M] pattern\n", progname); printf (" -n : benchmark nearest scaling\n"); printf (" -b : benchmark bilinear scaling\n"); printf (" -c : print output as CSV data\n"); printf (" -m M : set reference memcpy speed to M MB/s instead of measuring it\n"); } int main (int argc, char *argv[]) { int i; const char *pattern = NULL; for (i = 1; i < argc; i++) { if (argv[i][0] == '-') { if (strchr (argv[i] + 1, 'b')) { use_scaling = TRUE; filter = PIXMAN_FILTER_BILINEAR; } else if (strchr (argv[i] + 1, 'n')) { use_scaling = TRUE; filter = PIXMAN_FILTER_NEAREST; } if (strchr (argv[i] + 1, 'c')) use_csv_output = TRUE; if (strcmp (argv[i], "-m") == 0 && i + 1 < argc) bandwidth = atof (argv[++i]) * 1e6; } else { if (pattern) { pattern = NULL; printf ("Error: extra arguments given.\n"); break; } pattern = argv[i]; } } if (!pattern) { usage (argv[0]); return 1; } parser_self_test (); src = aligned_malloc (4096, BUFSIZE * 3); memset (src, 0xCC, BUFSIZE * 3); dst = src + (BUFSIZE / 4); mask = dst + (BUFSIZE / 4); if (!use_csv_output) print_explanation (); if (bandwidth < 1.0) bandwidth = bench_memcpy (); if (!use_csv_output) print_speed_scaling (bandwidth); if (strcmp (pattern, "all") == 0) run_default_tests (bandwidth); else run_one_test (pattern, bandwidth, !use_csv_output); free (src); return 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/matrix-test.c0000664000175000017500000001551414712446423016530 0ustar00mattst88mattst88/* * Copyright Âİ 2012 Siarhei Siamashka * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include "utils.h" #include #include #include #include #ifdef HAVE_FLOAT128 #define pixman_fixed_to_float128(x) (((__float128)(x)) / 65536.0Q) typedef struct { __float128 v[3]; } pixman_vector_f128_t; typedef struct { __float128 m[3][3]; } pixman_transform_f128_t; pixman_bool_t pixman_transform_point_f128 (const pixman_transform_f128_t *t, const pixman_vector_f128_t *v, pixman_vector_f128_t *result) { int i; for (i = 0; i < 3; i++) { result->v[i] = t->m[i][0] * v->v[0] + t->m[i][1] * v->v[1] + t->m[i][2] * v->v[2]; } if (result->v[2] != 0) { result->v[0] /= result->v[2]; result->v[1] /= result->v[2]; result->v[2] = 1; return TRUE; } else { return FALSE; } } pixman_bool_t does_it_fit_fixed_48_16 (__float128 x) { if (x >= 65536.0Q * 65536.0Q * 32768.0Q) return FALSE; if (x <= -65536.0Q * 65536.0Q * 32768.0Q) return FALSE; return TRUE; } #endif static inline uint32_t byteswap32 (uint32_t x) { return ((x & ((uint32_t)0xFF << 24)) >> 24) | ((x & ((uint32_t)0xFF << 16)) >> 8) | ((x & ((uint32_t)0xFF << 8)) << 8) | ((x & ((uint32_t)0xFF << 0)) << 24); } static inline uint64_t byteswap64 (uint64_t x) { return ((x & ((uint64_t)0xFF << 56)) >> 56) | ((x & ((uint64_t)0xFF << 48)) >> 40) | ((x & ((uint64_t)0xFF << 40)) >> 24) | ((x & ((uint64_t)0xFF << 32)) >> 8) | ((x & ((uint64_t)0xFF << 24)) << 8) | ((x & ((uint64_t)0xFF << 16)) << 24) | ((x & ((uint64_t)0xFF << 8)) << 40) | ((x & ((uint64_t)0xFF << 0)) << 56); } static void byteswap_transform (pixman_transform_t *t) { int i, j; if (is_little_endian ()) return; for (i = 0; i < 3; i++) for (j = 0; j < 3; j++) t->matrix[i][j] = byteswap32 (t->matrix[i][j]); } static void byteswap_vector_48_16 (pixman_vector_48_16_t *v) { int i; if (is_little_endian ()) return; for (i = 0; i < 3; i++) v->v[i] = byteswap64 (v->v[i]); } uint32_t test_matrix (int testnum, int verbose) { uint32_t crc32 = 0; int i, j, k; pixman_bool_t is_affine; prng_srand (testnum); for (i = 0; i < 100; i++) { pixman_bool_t transform_ok; pixman_transform_t ti; pixman_vector_48_16_t vi, result_i; #ifdef HAVE_FLOAT128 pixman_transform_f128_t tf; pixman_vector_f128_t vf, result_f; #endif prng_randmemset (&ti, sizeof(ti), 0); prng_randmemset (&vi, sizeof(vi), 0); byteswap_transform (&ti); byteswap_vector_48_16 (&vi); for (j = 0; j < 3; j++) { /* make sure that "vi" contains 31.16 fixed point data */ vi.v[j] >>= 17; /* and apply random shift */ if (prng_rand_n (3) == 0) vi.v[j] >>= prng_rand_n (46); } if (prng_rand_n (2)) { /* random shift for the matrix */ for (j = 0; j < 3; j++) for (k = 0; k < 3; k++) ti.matrix[j][k] >>= prng_rand_n (30); } if (prng_rand_n (2)) { /* affine matrix */ ti.matrix[2][0] = 0; ti.matrix[2][1] = 0; ti.matrix[2][2] = pixman_fixed_1; } if (prng_rand_n (2)) { /* cartesian coordinates */ vi.v[2] = pixman_fixed_1; } is_affine = (ti.matrix[2][0] == 0 && ti.matrix[2][1] == 0 && ti.matrix[2][2] == pixman_fixed_1 && vi.v[2] == pixman_fixed_1); transform_ok = TRUE; if (is_affine && prng_rand_n (2)) pixman_transform_point_31_16_affine (&ti, &vi, &result_i); else transform_ok = pixman_transform_point_31_16 (&ti, &vi, &result_i); #ifdef HAVE_FLOAT128 /* compare with a reference 128-bit floating point implementation */ for (j = 0; j < 3; j++) { vf.v[j] = pixman_fixed_to_float128 (vi.v[j]); for (k = 0; k < 3; k++) { tf.m[j][k] = pixman_fixed_to_float128 (ti.matrix[j][k]); } } if (pixman_transform_point_f128 (&tf, &vf, &result_f)) { if (transform_ok || (does_it_fit_fixed_48_16 (result_f.v[0]) && does_it_fit_fixed_48_16 (result_f.v[1]) && does_it_fit_fixed_48_16 (result_f.v[2]))) { for (j = 0; j < 3; j++) { double diff = fabsl (result_f.v[j] - pixman_fixed_to_float128 (result_i.v[j])); if (is_affine && diff > (0.51 / 65536.0)) { printf ("%d:%d: bad precision for affine (%.12f)\n", testnum, i, diff); abort (); } else if (diff > (0.71 / 65536.0)) { printf ("%d:%d: bad precision for projective (%.12f)\n", testnum, i, diff); abort (); } } } } #endif byteswap_vector_48_16 (&result_i); crc32 = compute_crc32 (crc32, &result_i, sizeof (result_i)); } return crc32; } int main (int argc, const char *argv[]) { return fuzzer_test_main ("matrix", 20000, 0xBEBF98C3, test_matrix, argc, argv); } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/meson.build0000664000175000017500000000454614712446423016250 0ustar00mattst88mattst88# Copyright Âİ 2018 Intel Corporation # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. tests = [ 'oob-test', 'infinite-loop', 'trap-crasher', 'fence-image-self-test', 'region-translate-test', 'fetch-test', 'a1-trap-test', 'prng-test', 'radial-invalid', 'pdf-op-test', 'region-test', 'combiner-test', 'scaling-crash-test', 'alpha-loop', 'scaling-helpers-test', 'rotate-test', 'alphamap', 'gradient-crash-test', 'pixel-test', 'matrix-test', 'filter-reduction-test', 'composite-traps-test', 'region-contains-test', 'glyph-test', 'solid-test', 'stress-test', 'cover-test', 'blitters-test', 'affine-test', 'scaling-test', 'composite', 'tolerance-test', ] # Remove/update this once thread-test.c supports threading methods # other than PThreads and Windows threads if pthreads_found or host_machine.system() == 'windows' tests += 'thread-test' endif progs = [ 'lowlevel-blt-bench', 'radial-perf-test', 'check-formats', 'scaling-bench', 'affine-bench', ] foreach t : tests test( t, executable( t, [t + '.c', config_h], dependencies : [idep_pixman, libtestutils_dep, dep_threads, dep_openmp, dep_png], ), timeout : 120, is_parallel : true, ) endforeach foreach p : progs executable( p, p + '.c', dependencies : [idep_pixman, libtestutils_dep, dep_openmp], ) endforeach ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/oob-test.c0000664000175000017500000000353414712446423016002 0ustar00mattst88mattst88#include #include #include "utils.h" typedef struct { int width; int height; int stride; pixman_format_code_t format; } image_info_t; typedef struct { pixman_op_t op; image_info_t src; image_info_t dest; int src_x; int src_y; int dest_x; int dest_y; int width; int height; } composite_info_t; const composite_info_t info[] = { { PIXMAN_OP_SRC, { 3, 6, 16, PIXMAN_a8r8g8b8 }, { 5, 7, 20, PIXMAN_x8r8g8b8 }, 1, 8, 1, -1, 1, 8 }, { PIXMAN_OP_SRC, { 7, 5, 36, PIXMAN_a8r8g8b8 }, { 6, 5, 28, PIXMAN_x8r8g8b8 }, 8, 5, 5, 3, 1, 2 }, { PIXMAN_OP_OVER, { 10, 10, 40, PIXMAN_a2b10g10r10 }, { 10, 10, 40, PIXMAN_a2b10g10r10 }, 0, 0, 0, 0, 10, 10 }, { PIXMAN_OP_OVER, { 10, 10, 40, PIXMAN_x2b10g10r10 }, { 10, 10, 40, PIXMAN_x2b10g10r10 }, 0, 0, 0, 0, 10, 10 }, }; static pixman_image_t * make_image (const image_info_t *info) { char *data = malloc (info->stride * info->height); int i; for (i = 0; i < info->height * info->stride; ++i) data[i] = (i % 255) ^ (((i % 16) << 4) | (i & 0xf0)); return pixman_image_create_bits (info->format, info->width, info->height, (uint32_t *)data, info->stride); } static void test_composite (const composite_info_t *info) { pixman_image_t *src = make_image (&info->src); pixman_image_t *dest = make_image (&info->dest); pixman_image_composite (PIXMAN_OP_SRC, src, NULL, dest, info->src_x, info->src_y, 0, 0, info->dest_x, info->dest_y, info->width, info->height); free (src->bits.bits); free (dest->bits.bits); pixman_image_unref (src); pixman_image_unref (dest); } int main (int argc, char **argv) { int i; for (i = 0; i < ARRAY_LENGTH (info); ++i) test_composite (&info[i]); return 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/pdf-op-test.c0000664000175000017500000000302214712446423016400 0ustar00mattst88mattst88#include #include "utils.h" static const pixman_op_t pdf_ops[] = { PIXMAN_OP_MULTIPLY, PIXMAN_OP_SCREEN, PIXMAN_OP_OVERLAY, PIXMAN_OP_DARKEN, PIXMAN_OP_LIGHTEN, PIXMAN_OP_COLOR_DODGE, PIXMAN_OP_COLOR_BURN, PIXMAN_OP_HARD_LIGHT, PIXMAN_OP_SOFT_LIGHT, PIXMAN_OP_DIFFERENCE, PIXMAN_OP_EXCLUSION, PIXMAN_OP_HSL_HUE, PIXMAN_OP_HSL_SATURATION, PIXMAN_OP_HSL_COLOR, PIXMAN_OP_HSL_LUMINOSITY }; static const uint32_t pixels[] = { 0x00808080, 0x80123456, 0x00000000, 0xffffffff, 0x00ffffff, 0x80808080, 0x00123456, }; int main () { int o, s, m, d; enable_divbyzero_exceptions(); for (o = 0; o < ARRAY_LENGTH (pdf_ops); ++o) { pixman_op_t op = pdf_ops[o]; for (s = 0; s < ARRAY_LENGTH (pixels); ++s) { pixman_image_t *src; src = pixman_image_create_bits ( PIXMAN_a8r8g8b8, 1, 1, (uint32_t *)&(pixels[s]), 4); for (m = -1; m < ARRAY_LENGTH (pixels); ++m) { pixman_image_t *msk = NULL; if (m >= 0) { msk = pixman_image_create_bits ( PIXMAN_a8r8g8b8, 1, 1, (uint32_t *)&(pixels[m]), 4); } for (d = 0; d < ARRAY_LENGTH (pixels); ++d) { pixman_image_t *dst; uint32_t dp = pixels[d]; dst = pixman_image_create_bits ( PIXMAN_a8r8g8b8, 1, 1, &dp, 4); pixman_image_composite (op, src, msk, dst, 0, 0, 0, 0, 0, 0, 1, 1); pixman_image_unref (dst); } if (msk) pixman_image_unref (msk); } pixman_image_unref (src); } } return 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/pixel-test.c0000664000175000017500000023337714712446423016356 0ustar00mattst88mattst88/* * Copyright Âİ 2013 Soeren Sandmann * Copyright Âİ 2013 Red Hat, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include #include /* abort() */ #include #include #include "utils.h" typedef struct pixel_combination_t pixel_combination_t; struct pixel_combination_t { pixman_op_t op; pixman_format_code_t src_format; uint32_t src_pixel; pixman_format_code_t mask_format; uint32_t mask_pixel; pixman_format_code_t dest_format; uint32_t dest_pixel; }; static const pixel_combination_t regressions[] = { { PIXMAN_OP_DISJOINT_ATOP, PIXMAN_a8r8g8b8, 0x1ffc3ff, PIXMAN_a8, 0x7b, PIXMAN_a8r8g8b8, 0xff00c300, }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_a2r2g2b2, 0xb5, PIXMAN_a4r4g4b4, 0xe3ff, PIXMAN_a2r2g2b2, 0x2e }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_a2r2g2b2, 0xa6, PIXMAN_a8r8g8b8, 0x2b00ff00, PIXMAN_a4r4g4b4, 0x7e }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_a8r8g8b8, 0x27000013, PIXMAN_a2r2g2b2, 0x80, PIXMAN_a4r4g4b4, 0x9d }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_a4r4g4b4, 0xe6f7, PIXMAN_a2r2g2b2, 0xad, PIXMAN_a4r4g4b4, 0x71 }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_a8r8g8b8, 0xff4f70ff, PIXMAN_r5g6b5, 0xb828, PIXMAN_a8r8g8b8, 0xcac400 }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_a2r2g2b2, 0xa9, PIXMAN_a4r4g4b4, 0x41c2, PIXMAN_a8r8g8b8, 0xffff2b }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_a2r2g2b2, 0x89, PIXMAN_a8r8g8b8, 0x977cff61, PIXMAN_a4r4g4b4, 0x36 }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_a2r2g2b2, 0x81, PIXMAN_r5g6b5, 0x6f9e, PIXMAN_a4r4g4b4, 0x1eb }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_a2r2g2b2, 0xb5, PIXMAN_a4r4g4b4, 0xe247, PIXMAN_a8r8g8b8, 0xffbaff }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_a2r2g2b2, 0x97, PIXMAN_a2r2g2b2, 0x9d, PIXMAN_a2r2g2b2, 0x21 }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_a2r2g2b2, 0xb4, PIXMAN_a2r2g2b2, 0x90, PIXMAN_a8r8g8b8, 0xc0fd5c }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_a8r8g8b8, 0xdf00ff70, PIXMAN_a8r8g8b8, 0x2597ff27, PIXMAN_a4r4g4b4, 0xf3 }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_a2r2g2b2, 0xb7, PIXMAN_r3g3b2, 0xb1, PIXMAN_a8r8g8b8, 0x9f4bcc }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_a4r4g4b4, 0xf39e, PIXMAN_r5g6b5, 0x34, PIXMAN_a8r8g8b8, 0xf6ae00 }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_a8r8g8b8, 0x3aff1dff, PIXMAN_a2r2g2b2, 0x64, PIXMAN_a8r8g8b8, 0x94ffb4 }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_a2r2g2b2, 0xa4, PIXMAN_a2r2g2b2, 0x8a, PIXMAN_a4r4g4b4, 0xff }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_a2r2g2b2, 0xa5, PIXMAN_a4r4g4b4, 0x1a, PIXMAN_a4r4g4b4, 0xff }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_a2r2g2b2, 0xb4, PIXMAN_a2r2g2b2, 0xca, PIXMAN_a4r4g4b4, 0x7b }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_a2r2g2b2, 0xbd, PIXMAN_a4r4g4b4, 0xff37, PIXMAN_a4r4g4b4, 0xff }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_a2r2g2b2, 0x96, PIXMAN_a2r2g2b2, 0xbb, PIXMAN_a8r8g8b8, 0x96ffff }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_a2r2g2b2, 0x89, PIXMAN_r3g3b2, 0x92, PIXMAN_a4r4g4b4, 0xa8c }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_a4r4g4b4, 0xa95b, PIXMAN_a2r2g2b2, 0x68, PIXMAN_a8r8g8b8, 0x38ff }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_a2r2g2b2, 0x90, PIXMAN_a8r8g8b8, 0x53bd00ef, PIXMAN_a8r8g8b8, 0xff0003 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x1f5ffff, PIXMAN_r3g3b2, 0x22, PIXMAN_r5g6b5, 0x2000 }, { PIXMAN_OP_DISJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x10000b6, PIXMAN_a8r8g8b8, 0x9645, PIXMAN_r5g6b5, 0x6 }, { PIXMAN_OP_SATURATE, PIXMAN_a8r8g8b8, 0x172ff00, PIXMAN_a4r4g4b4, 0xff61, PIXMAN_r3g3b2, 0xc }, { PIXMAN_OP_DISJOINT_ATOP, PIXMAN_a8r8g8b8, 0x281ffc8, PIXMAN_r5g6b5, 0x39b8, PIXMAN_r5g6b5, 0x13 }, { PIXMAN_OP_CONJOINT_IN, PIXMAN_a8r8g8b8, 0x100a2ff, PIXMAN_a4r4g4b4, 0x6500, PIXMAN_a2r2g2b2, 0x5 }, { PIXMAN_OP_CONJOINT_IN, PIXMAN_a8r8g8b8, 0x1ffff51, PIXMAN_r5g6b5, 0x52ff, PIXMAN_a2r2g2b2, 0x14 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x150d500, PIXMAN_a8r8g8b8, 0x6200b7ff, PIXMAN_a8r8g8b8, 0x1f5200 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x2a9a700, PIXMAN_a8r8g8b8, 0xf7003400, PIXMAN_a8r8g8b8, 0x2200 }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x200ffff, PIXMAN_r5g6b5, 0x81ff, PIXMAN_r5g6b5, 0x1f }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x2ff00ff, PIXMAN_r5g6b5, 0x3f00, PIXMAN_r3g3b2, 0x20 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x3ff1aa4, PIXMAN_a4r4g4b4, 0x2200, PIXMAN_r5g6b5, 0x2000 }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0x280ff2c, PIXMAN_r3g3b2, 0xc6, PIXMAN_a8r8g8b8, 0xfdfd44fe }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x13aff1d, PIXMAN_a2r2g2b2, 0x4b, PIXMAN_r5g6b5, 0x12a1 }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0x2ffff88, PIXMAN_a8r8g8b8, 0xff3a49, PIXMAN_r5g6b5, 0xf7df }, { PIXMAN_OP_CONJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x1009700, PIXMAN_a2r2g2b2, 0x56, PIXMAN_a8r8g8b8, 0x0 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x1aacbff, PIXMAN_a4r4g4b4, 0x84, PIXMAN_r3g3b2, 0x1 }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0x100b1ff, PIXMAN_a2r2g2b2, 0xf5, PIXMAN_a8r8g8b8, 0xfea89cff }, { PIXMAN_OP_CONJOINT_XOR, PIXMAN_a8r8g8b8, 0x1ff0000, PIXMAN_r5g6b5, 0x6800, PIXMAN_a4r4g4b4, 0x0 }, { PIXMAN_OP_CONJOINT_XOR, PIXMAN_a8r8g8b8, 0x10064ff, PIXMAN_r3g3b2, 0x61, PIXMAN_a4r4g4b4, 0x0 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x1bb00ff, PIXMAN_r5g6b5, 0x76b5, PIXMAN_a4r4g4b4, 0x500 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x2ffff41, PIXMAN_r5g6b5, 0x7100, PIXMAN_a4r4g4b4, 0x20 }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0x1ff1231, PIXMAN_a8r8g8b8, 0x381089, PIXMAN_r5g6b5, 0x38a5 }, { PIXMAN_OP_CONJOINT_IN, PIXMAN_a8r8g8b8, 0x16e5c49, PIXMAN_a8r8g8b8, 0x4dfa3694, PIXMAN_a8r8g8b8, 0x211c16 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x134ff62, PIXMAN_a2r2g2b2, 0x14, PIXMAN_r3g3b2, 0x8 }, { PIXMAN_OP_CONJOINT_IN, PIXMAN_a8r8g8b8, 0x300ffeb, PIXMAN_r3g3b2, 0xc7, PIXMAN_a4r4g4b4, 0x20 }, { PIXMAN_OP_CONJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x3ff8bff, PIXMAN_r3g3b2, 0x3e, PIXMAN_a8r8g8b8, 0x3008baa }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x1ff00ff, PIXMAN_a4r4g4b4, 0x3466, PIXMAN_a4r4g4b4, 0x406 }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x1ddc027, PIXMAN_a4r4g4b4, 0x7d00, PIXMAN_r5g6b5, 0x0 }, { PIXMAN_OP_CONJOINT_IN, PIXMAN_a8r8g8b8, 0x2ffff00, PIXMAN_a8r8g8b8, 0xc92cfb52, PIXMAN_a4r4g4b4, 0x200 }, { PIXMAN_OP_CONJOINT_XOR, PIXMAN_a8r8g8b8, 0x1ff116a, PIXMAN_a4r4g4b4, 0x6000, PIXMAN_a4r4g4b4, 0x0 }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x1ffffff, PIXMAN_r5g6b5, 0x2f95, PIXMAN_r5g6b5, 0x795 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x2ffff00, PIXMAN_a4r4g4b4, 0x354a, PIXMAN_r5g6b5, 0x3180 }, { PIXMAN_OP_SATURATE, PIXMAN_a8r8g8b8, 0x1d7ff00, PIXMAN_a4r4g4b4, 0xd6ff, PIXMAN_a8r8g8b8, 0xffff0700 }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0x1bc5db7, PIXMAN_r5g6b5, 0x944f, PIXMAN_a4r4g4b4, 0xff05 }, { PIXMAN_OP_SATURATE, PIXMAN_a8r8g8b8, 0x185ffd9, PIXMAN_a2r2g2b2, 0x9c, PIXMAN_r5g6b5, 0x3c07 }, { PIXMAN_OP_DISJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x1fa7f61, PIXMAN_a8r8g8b8, 0xff31ff00, PIXMAN_r3g3b2, 0xd2 }, { PIXMAN_OP_CONJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x1c4ff00, PIXMAN_r3g3b2, 0xb, PIXMAN_a4r4g4b4, 0x0 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x2ff00ff, PIXMAN_a8r8g8b8, 0x3f3caeda, PIXMAN_r3g3b2, 0x20 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x100ff00, PIXMAN_r5g6b5, 0xff, PIXMAN_r5g6b5, 0xe0 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x1ff68ff, PIXMAN_a4r4g4b4, 0x8046, PIXMAN_r5g6b5, 0xec }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x100ff28, PIXMAN_a8r8g8b8, 0x4c00, PIXMAN_r5g6b5, 0x260 }, { PIXMAN_OP_CONJOINT_IN, PIXMAN_a8r8g8b8, 0x1ffff00, PIXMAN_a4r4g4b4, 0xd92a, PIXMAN_a8r8g8b8, 0x2200 }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x100289a, PIXMAN_a8r8g8b8, 0x74ffb8ff, PIXMAN_r5g6b5, 0x0 }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0x1baff00, PIXMAN_r5g6b5, 0x4e9d, PIXMAN_r5g6b5, 0x3000 }, { PIXMAN_OP_CONJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x1fcffad, PIXMAN_r5g6b5, 0x42d7, PIXMAN_a8r8g8b8, 0x1c6ffe5 }, { PIXMAN_OP_CONJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x183ff00, PIXMAN_r3g3b2, 0x7e, PIXMAN_a4r4g4b4, 0xff }, { PIXMAN_OP_SATURATE, PIXMAN_a8r8g8b8, 0x2ff0076, PIXMAN_a8r8g8b8, 0x2a0000, PIXMAN_r3g3b2, 0x20 }, { PIXMAN_OP_CONJOINT_OUT, PIXMAN_a8r8g8b8, 0x3d8bbff, PIXMAN_r5g6b5, 0x6900, PIXMAN_a8r8g8b8, 0x35b0000 }, { PIXMAN_OP_CONJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x14f00ff, PIXMAN_r5g6b5, 0xd48, PIXMAN_a4r4g4b4, 0x0 }, { PIXMAN_OP_CONJOINT_IN, PIXMAN_a8r8g8b8, 0x28c72df, PIXMAN_a8r8g8b8, 0xff5cff31, PIXMAN_a4r4g4b4, 0x2 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x2ffffff, PIXMAN_a8r8g8b8, 0xffad8020, PIXMAN_r5g6b5, 0x4 }, { PIXMAN_OP_DISJOINT_ATOP, PIXMAN_a8r8g8b8, 0x100ff00, PIXMAN_a2r2g2b2, 0x76, PIXMAN_r3g3b2, 0x0 }, { PIXMAN_OP_CONJOINT_XOR, PIXMAN_a8r8g8b8, 0x1005d00, PIXMAN_r5g6b5, 0x7b04, PIXMAN_a8r8g8b8, 0x1000000 }, { PIXMAN_OP_CONJOINT_ATOP, PIXMAN_a8r8g8b8, 0x3cdfc3e, PIXMAN_a8r8g8b8, 0x69ec21d3, PIXMAN_a4r4g4b4, 0x20 }, { PIXMAN_OP_DISJOINT_ATOP, PIXMAN_a8r8g8b8, 0x200ffff, PIXMAN_r5g6b5, 0x30ff, PIXMAN_r5g6b5, 0x60ff }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0x532fff4, PIXMAN_r5g6b5, 0xcb, PIXMAN_r5g6b5, 0xd9a1 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x1ffffff, PIXMAN_r3g3b2, 0x5f, PIXMAN_a2r2g2b2, 0x10 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x1ffffff, PIXMAN_a8r8g8b8, 0xffd60052, PIXMAN_r3g3b2, 0x1 }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0x1ff6491, PIXMAN_a8r8g8b8, 0x1e53ff00, PIXMAN_r5g6b5, 0x1862 }, { PIXMAN_OP_CONJOINT_IN, PIXMAN_a8r8g8b8, 0x1ffff00, PIXMAN_r3g3b2, 0xc7, PIXMAN_a4r4g4b4, 0x20 }, { PIXMAN_OP_CONJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x29d0fff, PIXMAN_a4r4g4b4, 0x25ff, PIXMAN_a8r8g8b8, 0x0 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x141760a, PIXMAN_a4r4g4b4, 0x7ec2, PIXMAN_a4r4g4b4, 0x130 }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0x1abedff, PIXMAN_a8r8g8b8, 0x75520068, PIXMAN_r3g3b2, 0x87 }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0x10000ff, PIXMAN_a8r8g8b8, 0xff00e652, PIXMAN_r3g3b2, 0x1 }, { PIXMAN_OP_CONJOINT_ATOP, PIXMAN_a8r8g8b8, 0x16006075, PIXMAN_r5g6b5, 0xc00, PIXMAN_a8r8g8b8, 0x27f0900 }, { PIXMAN_OP_DISJOINT_ATOP, PIXMAN_a8r8g8b8, 0x200ff00, PIXMAN_a8r8g8b8, 0xd1b83f57, PIXMAN_a4r4g4b4, 0xff75 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x14000c4, PIXMAN_a4r4g4b4, 0x96, PIXMAN_a2r2g2b2, 0x1 }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x1ff00d1, PIXMAN_r3g3b2, 0x79, PIXMAN_a2r2g2b2, 0x0 }, { PIXMAN_OP_CONJOINT_ATOP, PIXMAN_a8r8g8b8, 0x1ff00dc, PIXMAN_a4r4g4b4, 0xc5ff, PIXMAN_a2r2g2b2, 0x10 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x1ffffb2, PIXMAN_a8r8g8b8, 0x4cff5700, PIXMAN_r3g3b2, 0x48 }, { PIXMAN_OP_CONJOINT_XOR, PIXMAN_a8r8g8b8, 0x1327482, PIXMAN_a8r8g8b8, 0x247ff, PIXMAN_a8r8g8b8, 0x82 }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0x1d0ff00, PIXMAN_r3g3b2, 0xc9, PIXMAN_r5g6b5, 0x240 }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0x13d35ff, PIXMAN_a2r2g2b2, 0x6d, PIXMAN_r3g3b2, 0x1 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x1ffc6b2, PIXMAN_a8r8g8b8, 0x5abe8e3c, PIXMAN_r5g6b5, 0x5a27 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x15700ff, PIXMAN_r3g3b2, 0xdd, PIXMAN_a8r8g8b8, 0x55 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x1ff11ff, PIXMAN_r3g3b2, 0x30, PIXMAN_r5g6b5, 0x2000 }, { PIXMAN_OP_DISJOINT_ATOP, PIXMAN_a8r8g8b8, 0x1ff00ff, PIXMAN_a2r2g2b2, 0x6d, PIXMAN_r3g3b2, 0x0 }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x1421d5f, PIXMAN_a4r4g4b4, 0xff85, PIXMAN_a8r8g8b8, 0x1420f00 }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x1d2ffff, PIXMAN_r5g6b5, 0xfc, PIXMAN_r5g6b5, 0x1c }, { PIXMAN_OP_CONJOINT_ATOP, PIXMAN_a8r8g8b8, 0x1ffff42, PIXMAN_a4r4g4b4, 0x7100, PIXMAN_a4r4g4b4, 0x771 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x25ae3d4, PIXMAN_a8r8g8b8, 0x39ffc99a, PIXMAN_a8r8g8b8, 0x14332f }, { PIXMAN_OP_DISJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x1ff0643, PIXMAN_a8r8g8b8, 0x4c000000, PIXMAN_r5g6b5, 0x4802 }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x1966a00, PIXMAN_r3g3b2, 0x46, PIXMAN_r5g6b5, 0x0 }, { PIXMAN_OP_DISJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x387ff59, PIXMAN_r5g6b5, 0x512c, PIXMAN_r5g6b5, 0x120 }, { PIXMAN_OP_CONJOINT_XOR, PIXMAN_a8r8g8b8, 0x1f7ffb0, PIXMAN_r5g6b5, 0x63b8, PIXMAN_a8r8g8b8, 0x1000089 }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x185841c, PIXMAN_a2r2g2b2, 0x5c, PIXMAN_a8r8g8b8, 0x8400 }, { PIXMAN_OP_DISJOINT_ATOP, PIXMAN_a8r8g8b8, 0x1ffc3ff, PIXMAN_a8r8g8b8, 0xff7b, PIXMAN_a8r8g8b8, 0xff00c300 }, { PIXMAN_OP_CONJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x1ff7500, PIXMAN_a2r2g2b2, 0x47, PIXMAN_a4r4g4b4, 0xff }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x1002361, PIXMAN_a2r2g2b2, 0x7e, PIXMAN_r5g6b5, 0x64 }, { PIXMAN_OP_SATURATE, PIXMAN_a8r8g8b8, 0x10000b6, PIXMAN_a8r8g8b8, 0x59004463, PIXMAN_a4r4g4b4, 0xffa7 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x1ff5a49, PIXMAN_a8r8g8b8, 0xff3fff2b, PIXMAN_a8r8g8b8, 0x13f000c }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x3ffecfc, PIXMAN_r3g3b2, 0x3c, PIXMAN_r5g6b5, 0x2000 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x1630044, PIXMAN_a2r2g2b2, 0x63, PIXMAN_r3g3b2, 0x20 }, { PIXMAN_OP_CONJOINT_OUT, PIXMAN_a8r8g8b8, 0x1d2ff58, PIXMAN_a8r8g8b8, 0x8f77ff, PIXMAN_a4r4g4b4, 0x705 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x14dffff, PIXMAN_a2r2g2b2, 0x9a, PIXMAN_a8r8g8b8, 0x1a0000 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x100ff92, PIXMAN_a4r4g4b4, 0x540c, PIXMAN_r5g6b5, 0x2a6 }, { PIXMAN_OP_CONJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x1ffffff, PIXMAN_a4r4g4b4, 0xddd5, PIXMAN_a4r4g4b4, 0xdd0 }, { PIXMAN_OP_CONJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x1ffffff, PIXMAN_r5g6b5, 0xff8c, PIXMAN_a4r4g4b4, 0xff0 }, { PIXMAN_OP_DISJOINT_ATOP, PIXMAN_a8r8g8b8, 0x1ffffff, PIXMAN_r3g3b2, 0x66, PIXMAN_r5g6b5, 0x7d1f }, { PIXMAN_OP_CONJOINT_ATOP, PIXMAN_a8r8g8b8, 0x1ffff00, PIXMAN_a4r4g4b4, 0xff5b, PIXMAN_a8r8g8b8, 0x5500 }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0x2ed2dff, PIXMAN_r5g6b5, 0x7ae7, PIXMAN_r3g3b2, 0xce }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x1b13205, PIXMAN_a8r8g8b8, 0x35ffff00, PIXMAN_r5g6b5, 0x2040 }, { PIXMAN_OP_CONJOINT_IN, PIXMAN_a8r8g8b8, 0x1e60dff, PIXMAN_a4r4g4b4, 0x760f, PIXMAN_a2r2g2b2, 0x11 }, { PIXMAN_OP_CONJOINT_OUT, PIXMAN_a8r8g8b8, 0x10000ff, PIXMAN_a4r4g4b4, 0x3, PIXMAN_a8r8g8b8, 0x0 }, { PIXMAN_OP_CONJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x100ffff, PIXMAN_a8r8g8b8, 0x6600, PIXMAN_a4r4g4b4, 0x0 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x30000fa, PIXMAN_a4r4g4b4, 0x23b7, PIXMAN_a8r8g8b8, 0x21 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x1ffffff, PIXMAN_r3g3b2, 0x60, PIXMAN_r3g3b2, 0x60 }, { PIXMAN_OP_CONJOINT_OUT, PIXMAN_a8r8g8b8, 0x3b31b30, PIXMAN_r3g3b2, 0x2e, PIXMAN_a8r8g8b8, 0x3000c20 }, { PIXMAN_OP_DISJOINT_ATOP, PIXMAN_a8r8g8b8, 0x160ffff, PIXMAN_a4r4g4b4, 0xff42, PIXMAN_r3g3b2, 0xed }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x172ffff, PIXMAN_a4r4g4b4, 0x5100, PIXMAN_r3g3b2, 0x29 }, { PIXMAN_OP_CONJOINT_ATOP, PIXMAN_a8r8g8b8, 0x16300ff, PIXMAN_a4r4g4b4, 0x5007, PIXMAN_a8r8g8b8, 0x77 }, { PIXMAN_OP_CONJOINT_IN, PIXMAN_a8r8g8b8, 0x2ffff3a, PIXMAN_a8r8g8b8, 0x26640083, PIXMAN_a4r4g4b4, 0x220 }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x106ff60, PIXMAN_r5g6b5, 0xdce, PIXMAN_a8r8g8b8, 0x100ba00 }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x100e7ff, PIXMAN_r5g6b5, 0xa00, PIXMAN_r5g6b5, 0x0 }, { PIXMAN_OP_CONJOINT_XOR, PIXMAN_a8r8g8b8, 0x2b500f1, PIXMAN_a4r4g4b4, 0x7339, PIXMAN_a8r8g8b8, 0x1000091 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x1ff00ff, PIXMAN_a4r4g4b4, 0xc863, PIXMAN_r5g6b5, 0x6 }, { PIXMAN_OP_SATURATE, PIXMAN_a8r8g8b8, 0x1ffffca, PIXMAN_a8r8g8b8, 0x8b4cf000, PIXMAN_r3g3b2, 0xd2 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x1fffe00, PIXMAN_r3g3b2, 0x88, PIXMAN_r3g3b2, 0x8 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x16f0000, PIXMAN_a2r2g2b2, 0x59, PIXMAN_r5g6b5, 0x2000 }, { PIXMAN_OP_CONJOINT_OUT, PIXMAN_a8r8g8b8, 0x377ff43, PIXMAN_a4r4g4b4, 0x2a, PIXMAN_a8r8g8b8, 0x2d }, { PIXMAN_OP_DISJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x11dffff, PIXMAN_r3g3b2, 0xcb, PIXMAN_r3g3b2, 0x8 }, { PIXMAN_OP_CONJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x1ffffff, PIXMAN_r5g6b5, 0xbdab, PIXMAN_a4r4g4b4, 0xbb0 }, { PIXMAN_OP_CONJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x1ff3343, PIXMAN_a8r8g8b8, 0x7a00ffff, PIXMAN_a2r2g2b2, 0xd }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0x1ebff4b, PIXMAN_r3g3b2, 0x26, PIXMAN_r3g3b2, 0x24 }, { PIXMAN_OP_DISJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x2c1b3ff, PIXMAN_a8r8g8b8, 0x3000152a, PIXMAN_r3g3b2, 0x24 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x1a7ffff, PIXMAN_r3g3b2, 0x9, PIXMAN_r5g6b5, 0x24a }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x4ff00ec, PIXMAN_a8r8g8b8, 0x1da4961e, PIXMAN_a8r8g8b8, 0x0 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x1ff25ff, PIXMAN_a8r8g8b8, 0x64b0ff00, PIXMAN_r5g6b5, 0x606c }, { PIXMAN_OP_DISJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x1fd62ff, PIXMAN_a4r4g4b4, 0x76b1, PIXMAN_r5g6b5, 0x716e }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x194ffde, PIXMAN_r5g6b5, 0x47ff, PIXMAN_r5g6b5, 0x2000 }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0x108ffff, PIXMAN_a8r8g8b8, 0xffffff66, PIXMAN_r5g6b5, 0xff0c }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0x5ffffff, PIXMAN_r5g6b5, 0xdf, PIXMAN_r5g6b5, 0xc0 }, { PIXMAN_OP_CONJOINT_IN, PIXMAN_a8r8g8b8, 0x100ad31, PIXMAN_a2r2g2b2, 0xc5, PIXMAN_a4r4g4b4, 0x31 }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x1ffff34, PIXMAN_a8r8g8b8, 0x6a57c491, PIXMAN_r3g3b2, 0x0 }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x1fffff1, PIXMAN_r3g3b2, 0xaf, PIXMAN_r5g6b5, 0xb01e }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x1ff67ff, PIXMAN_a4r4g4b4, 0x50ff, PIXMAN_a8r8g8b8, 0x552255 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x11bffff, PIXMAN_r5g6b5, 0xef0c, PIXMAN_r5g6b5, 0xc }, { PIXMAN_OP_DISJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x16cf37d, PIXMAN_a4r4g4b4, 0xc561, PIXMAN_r5g6b5, 0x2301 }, { PIXMAN_OP_CONJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x2ffff9c, PIXMAN_a4r4g4b4, 0x2700, PIXMAN_a8r8g8b8, 0xffff }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x200f322, PIXMAN_a8r8g8b8, 0xff3c7e, PIXMAN_r5g6b5, 0x2 }, { PIXMAN_OP_DISJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x1f14a33, PIXMAN_a8r8g8b8, 0x26cff79, PIXMAN_r3g3b2, 0xf9 }, { PIXMAN_OP_CONJOINT_ATOP, PIXMAN_a8r8g8b8, 0x11d922c, PIXMAN_r3g3b2, 0xab, PIXMAN_a4r4g4b4, 0x20 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x100ffff, PIXMAN_a2r2g2b2, 0xf5, PIXMAN_r3g3b2, 0x9 }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0x18697ff, PIXMAN_a4r4g4b4, 0x5700, PIXMAN_r5g6b5, 0xfa6d }, { PIXMAN_OP_CONJOINT_IN, PIXMAN_a8r8g8b8, 0x12000fc, PIXMAN_a2r2g2b2, 0x41, PIXMAN_a8r8g8b8, 0xb0054 }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0x100ccff, PIXMAN_a4r4g4b4, 0x657e, PIXMAN_r5g6b5, 0x3b1 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x1ffff1f, PIXMAN_a2r2g2b2, 0xa6, PIXMAN_r5g6b5, 0x2a0 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x11fff82, PIXMAN_a4r4g4b4, 0xff94, PIXMAN_a8r8g8b8, 0x1010123 }, { PIXMAN_OP_CONJOINT_XOR, PIXMAN_a8r8g8b8, 0x154bd19, PIXMAN_a4r4g4b4, 0xb600, PIXMAN_a8r8g8b8, 0x1000000 }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x10000ff, PIXMAN_r5g6b5, 0x8e, PIXMAN_r5g6b5, 0x0 }, { PIXMAN_OP_DISJOINT_ATOP, PIXMAN_a8r8g8b8, 0x21aff00, PIXMAN_r5g6b5, 0x71ff, PIXMAN_r3g3b2, 0xf2 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x2ad00a7, PIXMAN_a4r4g4b4, 0x23, PIXMAN_a8r8g8b8, 0x21 }, { PIXMAN_OP_SATURATE, PIXMAN_a8r8g8b8, 0x100ff00, PIXMAN_r5g6b5, 0xb343, PIXMAN_r3g3b2, 0xc }, { PIXMAN_OP_SATURATE, PIXMAN_a8r8g8b8, 0x3ffa500, PIXMAN_a8r8g8b8, 0x1af5b4, PIXMAN_a8r8g8b8, 0xff1abc00 }, { PIXMAN_OP_CONJOINT_OUT, PIXMAN_a8r8g8b8, 0x2ffff11, PIXMAN_a8r8g8b8, 0x9f334f, PIXMAN_a8r8g8b8, 0x9f0005 }, { PIXMAN_OP_CONJOINT_IN, PIXMAN_a8r8g8b8, 0x2c75971, PIXMAN_a4r4g4b4, 0x3900, PIXMAN_a4r4g4b4, 0x211 }, { PIXMAN_OP_DISJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x100ff49, PIXMAN_a8r8g8b8, 0x813dc25e, PIXMAN_r5g6b5, 0x667d }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x10000ff, PIXMAN_a4r4g4b4, 0x4bff, PIXMAN_a8r8g8b8, 0x0 }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0x20ebcff, PIXMAN_r5g6b5, 0xc9ff, PIXMAN_r3g3b2, 0x4 }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0x1ffff00, PIXMAN_r5g6b5, 0x51ff, PIXMAN_r3g3b2, 0x44 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x1ffd158, PIXMAN_a8r8g8b8, 0x7d88ffce, PIXMAN_r3g3b2, 0x6c }, { PIXMAN_OP_SATURATE, PIXMAN_a8r8g8b8, 0x1425e21, PIXMAN_a2r2g2b2, 0xa5, PIXMAN_r5g6b5, 0xe1 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x14b00ff, PIXMAN_a8r8g8b8, 0xbe95004b, PIXMAN_r5g6b5, 0x9 }, { PIXMAN_OP_CONJOINT_OUT, PIXMAN_a8r8g8b8, 0x14fc0cd, PIXMAN_a8r8g8b8, 0x2d12b78b, PIXMAN_a8r8g8b8, 0x0 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x1ff8230, PIXMAN_a2r2g2b2, 0x4c, PIXMAN_r3g3b2, 0x44 }, { PIXMAN_OP_CONJOINT_ATOP, PIXMAN_a8r8g8b8, 0x1ff31ff, PIXMAN_a2r2g2b2, 0x14, PIXMAN_a8r8g8b8, 0x551000 }, { PIXMAN_OP_CONJOINT_IN, PIXMAN_a8r8g8b8, 0x17800ff, PIXMAN_a4r4g4b4, 0x22, PIXMAN_a8r8g8b8, 0x22 }, { PIXMAN_OP_SATURATE, PIXMAN_a8r8g8b8, 0x14500ff, PIXMAN_a4r4g4b4, 0x6400, PIXMAN_r5g6b5, 0xff78 }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x100ff9d, PIXMAN_r3g3b2, 0xcd, PIXMAN_r3g3b2, 0x0 }, { PIXMAN_OP_CONJOINT_ATOP, PIXMAN_a8r8g8b8, 0x3ff00ff, PIXMAN_a4r4g4b4, 0xf269, PIXMAN_a4r4g4b4, 0x200 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x2ff28b8, PIXMAN_a4r4g4b4, 0x33ff, PIXMAN_r5g6b5, 0x3000 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x1006278, PIXMAN_a8r8g8b8, 0x8a7f18, PIXMAN_r3g3b2, 0x4 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x1ffcb00, PIXMAN_a4r4g4b4, 0x7900, PIXMAN_a2r2g2b2, 0x14 }, { PIXMAN_OP_CONJOINT_OUT, PIXMAN_a8r8g8b8, 0x115ff00, PIXMAN_a8r8g8b8, 0x508d, PIXMAN_a4r4g4b4, 0x0 }, { PIXMAN_OP_SATURATE, PIXMAN_a8r8g8b8, 0x3ff30b5, PIXMAN_r5g6b5, 0x2e60, PIXMAN_r3g3b2, 0x20 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x182fffb, PIXMAN_r3g3b2, 0x1, PIXMAN_a8r8g8b8, 0x1000054 }, { PIXMAN_OP_CONJOINT_ATOP, PIXMAN_a8r8g8b8, 0x16fff00, PIXMAN_r5g6b5, 0x7bc0, PIXMAN_a8r8g8b8, 0x367900 }, { PIXMAN_OP_DISJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x1d95dd8, PIXMAN_a4r4g4b4, 0xfff5, PIXMAN_r5g6b5, 0xff09 }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x1ff3cdc, PIXMAN_a8r8g8b8, 0x3bda45ff, PIXMAN_r3g3b2, 0x0 }, { PIXMAN_OP_DISJOINT_ATOP, PIXMAN_a8r8g8b8, 0x13900f8, PIXMAN_a8r8g8b8, 0x7e00ffff, PIXMAN_a4r4g4b4, 0xff00 }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0x10ea9ff, PIXMAN_a8r8g8b8, 0xff34ff22, PIXMAN_r5g6b5, 0xff52 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x2002e99, PIXMAN_a4r4g4b4, 0x3000, PIXMAN_r5g6b5, 0x43 }, { PIXMAN_OP_DISJOINT_ATOP, PIXMAN_a8r8g8b8, 0x100ffff, PIXMAN_r5g6b5, 0x19ff, PIXMAN_r3g3b2, 0x3 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x1ffff00, PIXMAN_a8r8g8b8, 0xffff4251, PIXMAN_a2r2g2b2, 0x4 }, { PIXMAN_OP_CONJOINT_ATOP, PIXMAN_a8r8g8b8, 0x121c9ff, PIXMAN_a4r4g4b4, 0xd2, PIXMAN_a4r4g4b4, 0x2 }, { PIXMAN_OP_CONJOINT_IN, PIXMAN_a8r8g8b8, 0x100ff4d, PIXMAN_a2r2g2b2, 0x5e, PIXMAN_a2r2g2b2, 0x4 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x29ab4ff, PIXMAN_r3g3b2, 0x47, PIXMAN_a8r8g8b8, 0x1900 }, { PIXMAN_OP_CONJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x1ffc1ac, PIXMAN_a8r8g8b8, 0xee4ed0ac, PIXMAN_a8r8g8b8, 0x1009d74 }, { PIXMAN_OP_CONJOINT_IN_REVERSE, PIXMAN_a8r8g8b8, 0x269dffdc, PIXMAN_a8r8g8b8, 0xff0b00e0, PIXMAN_a8r8g8b8, 0x2a200ff }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x2ffffff, PIXMAN_a4r4g4b4, 0x3200, PIXMAN_r3g3b2, 0x24 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x100ffed, PIXMAN_a8r8g8b8, 0x67004eff, PIXMAN_a2r2g2b2, 0x5 }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0x2fffd6a, PIXMAN_a8r8g8b8, 0xc9003bff, PIXMAN_r3g3b2, 0x4 }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0x253ff00, PIXMAN_r5g6b5, 0xff, PIXMAN_r5g6b5, 0xe0 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x13600ad, PIXMAN_r5g6b5, 0x35ae, PIXMAN_r3g3b2, 0x1 }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x1ffa8ff, PIXMAN_a8r8g8b8, 0xff5f00, PIXMAN_r3g3b2, 0xe0 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x10067ff, PIXMAN_a4r4g4b4, 0x450d, PIXMAN_a2r2g2b2, 0x1 }, { PIXMAN_OP_SATURATE, PIXMAN_a8r8g8b8, 0x1ff01ff, PIXMAN_r3g3b2, 0x77, PIXMAN_r5g6b5, 0x6800 }, { PIXMAN_OP_CONJOINT_ATOP, PIXMAN_a8r8g8b8, 0x11da4ff, PIXMAN_r5g6b5, 0x83c9, PIXMAN_a4r4g4b4, 0x44 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x1ffd4ff, PIXMAN_r3g3b2, 0xaa, PIXMAN_r3g3b2, 0x4 }, { PIXMAN_OP_CONJOINT_IN, PIXMAN_a8r8g8b8, 0x1ff0000, PIXMAN_a8r8g8b8, 0x71002a, PIXMAN_a4r4g4b4, 0x700 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x1d7ffff, PIXMAN_r5g6b5, 0x3696, PIXMAN_a4r4g4b4, 0x200 }, { PIXMAN_OP_CONJOINT_IN, PIXMAN_a8r8g8b8, 0x1ffffc8, PIXMAN_r5g6b5, 0xe900, PIXMAN_a8r8g8b8, 0x2000 }, { PIXMAN_OP_CONJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x1ff004a, PIXMAN_r3g3b2, 0x48, PIXMAN_a8r8g8b8, 0x1000000 }, { PIXMAN_OP_DISJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x3ffe969, PIXMAN_r5g6b5, 0xff, PIXMAN_r5g6b5, 0xc0 }, { PIXMAN_OP_CONJOINT_XOR, PIXMAN_a8r8g8b8, 0x300ff73, PIXMAN_r5g6b5, 0xff, PIXMAN_a8r8g8b8, 0x3000073 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x2ff93ff, PIXMAN_a8r8g8b8, 0x61fc7d2b, PIXMAN_a4r4g4b4, 0x2 }, { PIXMAN_OP_DISJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x11bffff, PIXMAN_a4r4g4b4, 0xffb4, PIXMAN_r5g6b5, 0x8 }, { PIXMAN_OP_CONJOINT_IN, PIXMAN_a8r8g8b8, 0x1e9e100, PIXMAN_a2r2g2b2, 0x56, PIXMAN_a2r2g2b2, 0x14 }, { PIXMAN_OP_SATURATE, PIXMAN_a8r8g8b8, 0x3ffb656, PIXMAN_r3g3b2, 0x4, PIXMAN_a4r4g4b4, 0xff99 }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x100ff00, PIXMAN_r3g3b2, 0x68, PIXMAN_r3g3b2, 0x0 }, { PIXMAN_OP_SATURATE, PIXMAN_a8r8g8b8, 0x1006dff, PIXMAN_a2r2g2b2, 0x5d, PIXMAN_a8r8g8b8, 0xff00ff55 }, { PIXMAN_OP_CONJOINT_IN, PIXMAN_a8r8g8b8, 0x11c00cb, PIXMAN_a2r2g2b2, 0x44, PIXMAN_a4r4g4b4, 0x4 }, { PIXMAN_OP_CONJOINT_ATOP, PIXMAN_a8r8g8b8, 0x1d0ff86, PIXMAN_r3g3b2, 0x5c, PIXMAN_a8r8g8b8, 0x3c0000 }, { PIXMAN_OP_CONJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x2f25fff, PIXMAN_r3g3b2, 0x36, PIXMAN_a8r8g8b8, 0x2a444aa }, { PIXMAN_OP_DISJOINT_ATOP, PIXMAN_a8r8g8b8, 0x134af85, PIXMAN_r3g3b2, 0x29, PIXMAN_r5g6b5, 0xf300 }, { PIXMAN_OP_CONJOINT_ATOP, PIXMAN_a8r8g8b8, 0x13398af, PIXMAN_r3g3b2, 0xa5, PIXMAN_a4r4g4b4, 0x13 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x1ff57ff, PIXMAN_a4r4g4b4, 0x252c, PIXMAN_r3g3b2, 0x40 }, { PIXMAN_OP_DISJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x115ffff, PIXMAN_r5g6b5, 0xffe3, PIXMAN_r5g6b5, 0x3303 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x1ffff00, PIXMAN_r5g6b5, 0x6300, PIXMAN_r3g3b2, 0x6c }, { PIXMAN_OP_CONJOINT_XOR, PIXMAN_a8r8g8b8, 0x4ccff9c, PIXMAN_r5g6b5, 0xcc, PIXMAN_a8r8g8b8, 0x400003d }, { PIXMAN_OP_DISJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x1ffc6dd, PIXMAN_r5g6b5, 0x9bff, PIXMAN_r5g6b5, 0x5bff }, { PIXMAN_OP_CONJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x14fff95, PIXMAN_r3g3b2, 0x46, PIXMAN_a8r8g8b8, 0x1000063 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x1e6b700, PIXMAN_r5g6b5, 0xc1ff, PIXMAN_r3g3b2, 0x4 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x1ffff54, PIXMAN_a8r8g8b8, 0x2e00ff, PIXMAN_r5g6b5, 0x2800 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x3ffffff, PIXMAN_r5g6b5, 0xff, PIXMAN_r5g6b5, 0xe0 }, { PIXMAN_OP_DISJOINT_ATOP, PIXMAN_a8r8g8b8, 0x1003550, PIXMAN_r5g6b5, 0xffcc, PIXMAN_r5g6b5, 0x1e0 }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0x1ffff74, PIXMAN_r3g3b2, 0x28, PIXMAN_a8r8g8b8, 0xfe2f49d7 }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0x1e35100, PIXMAN_r3g3b2, 0x57, PIXMAN_r5g6b5, 0x4000 }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x268ffa3, PIXMAN_a4r4g4b4, 0x30, PIXMAN_a4r4g4b4, 0x0 }, { PIXMAN_OP_DISJOINT_ATOP, PIXMAN_a8r8g8b8, 0x35700f8, PIXMAN_r5g6b5, 0xa4, PIXMAN_r5g6b5, 0x0 }, { PIXMAN_OP_CONJOINT_ATOP, PIXMAN_a8r8g8b8, 0x3ce1dff, PIXMAN_r5g6b5, 0x2a5e, PIXMAN_a8r8g8b8, 0x210000 }, { PIXMAN_OP_CONJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x494a7ff, PIXMAN_a8r8g8b8, 0x1bffe400, PIXMAN_a8r8g8b8, 0x0 }, { PIXMAN_OP_SATURATE, PIXMAN_a8r8g8b8, 0x10026d9, PIXMAN_a8r8g8b8, 0xec00621f, PIXMAN_r5g6b5, 0x63 }, { PIXMAN_OP_CONJOINT_IN, PIXMAN_a8r8g8b8, 0x100ff99, PIXMAN_a8r8g8b8, 0xf334ff, PIXMAN_a4r4g4b4, 0x30 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x2ffc200, PIXMAN_a8r8g8b8, 0x1e0000ff, PIXMAN_a8r8g8b8, 0x1e1700 }, { PIXMAN_OP_DISJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x1ff00ff, PIXMAN_r3g3b2, 0x4b, PIXMAN_r5g6b5, 0x4818 }, { PIXMAN_OP_CONJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x2e800ff, PIXMAN_a4r4g4b4, 0xd3, PIXMAN_a4r4g4b4, 0xec }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x19a001f, PIXMAN_r3g3b2, 0x76, PIXMAN_r3g3b2, 0x0 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x1cb00c3, PIXMAN_a4r4g4b4, 0x5cff, PIXMAN_r5g6b5, 0x4008 }, { PIXMAN_OP_DISJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x1ff0000, PIXMAN_r3g3b2, 0x2a, PIXMAN_r5g6b5, 0xc5fb }, { PIXMAN_OP_DISJOINT_ATOP, PIXMAN_a8r8g8b8, 0x1ffffff, PIXMAN_a8r8g8b8, 0xea005a88, PIXMAN_r3g3b2, 0xb3 }, { PIXMAN_OP_CONJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x100ffea, PIXMAN_a4r4g4b4, 0x54eb, PIXMAN_a8r8g8b8, 0x0 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x179ffff, PIXMAN_r3g3b2, 0xa4, PIXMAN_a8r8g8b8, 0x2400 }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0x17ad226, PIXMAN_r3g3b2, 0xa4, PIXMAN_r5g6b5, 0xe0 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x100ff01, PIXMAN_a2r2g2b2, 0x25, PIXMAN_a4r4g4b4, 0x50 }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x20000ff, PIXMAN_a8r8g8b8, 0x2b00c127, PIXMAN_r5g6b5, 0x0 }, { PIXMAN_OP_SATURATE, PIXMAN_a8r8g8b8, 0x200ff96, PIXMAN_a4r4g4b4, 0x2300, PIXMAN_r3g3b2, 0x6 }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0x200ffff, PIXMAN_r3g3b2, 0x87, PIXMAN_r5g6b5, 0x5bc8 }, { PIXMAN_OP_CONJOINT_XOR, PIXMAN_a8r8g8b8, 0x1fffff2, PIXMAN_r3g3b2, 0x7e, PIXMAN_a2r2g2b2, 0xe }, { PIXMAN_OP_SATURATE, PIXMAN_a8r8g8b8, 0x1ff8b00, PIXMAN_a4r4g4b4, 0xd500, PIXMAN_r3g3b2, 0x40 }, { PIXMAN_OP_CONJOINT_XOR, PIXMAN_a8r8g8b8, 0x1ffffff, PIXMAN_a8r8g8b8, 0x1bff38, PIXMAN_a4r4g4b4, 0xf0 }, { PIXMAN_OP_CONJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x158ff39, PIXMAN_a4r4g4b4, 0x75dd, PIXMAN_a8r8g8b8, 0xdd31 }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0x1009b70, PIXMAN_a4r4g4b4, 0xff40, PIXMAN_r3g3b2, 0x4 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x12fb43f, PIXMAN_a4r4g4b4, 0x69ff, PIXMAN_a2r2g2b2, 0x4 }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x1ffff95, PIXMAN_a2r2g2b2, 0x84, PIXMAN_r5g6b5, 0x0 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x200d188, PIXMAN_r5g6b5, 0xde6, PIXMAN_r5g6b5, 0x3 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x2c70000, PIXMAN_r5g6b5, 0x24fa, PIXMAN_a8r8g8b8, 0x21a0000 }, { PIXMAN_OP_CONJOINT_OUT, PIXMAN_a8r8g8b8, 0x100ff24, PIXMAN_a4r4g4b4, 0x835, PIXMAN_a4r4g4b4, 0x0 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x10000cd, PIXMAN_a2r2g2b2, 0x7f, PIXMAN_a2r2g2b2, 0x1 }, { PIXMAN_OP_SATURATE, PIXMAN_a8r8g8b8, 0x379ffff, PIXMAN_a8r8g8b8, 0x23ffff00, PIXMAN_r5g6b5, 0x4eda }, { PIXMAN_OP_SATURATE, PIXMAN_a8r8g8b8, 0x172e3ff, PIXMAN_r3g3b2, 0xa6, PIXMAN_r5g6b5, 0x100 }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x100f5ad, PIXMAN_a4r4g4b4, 0x7908, PIXMAN_a2r2g2b2, 0x0 }, { PIXMAN_OP_DISJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x100fff9, PIXMAN_a2r2g2b2, 0xf1, PIXMAN_r3g3b2, 0x1 }, { PIXMAN_OP_CONJOINT_XOR, PIXMAN_a8r8g8b8, 0x1abff00, PIXMAN_r5g6b5, 0x31ff, PIXMAN_a8r8g8b8, 0x1000000 }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0x112ffd1, PIXMAN_r3g3b2, 0x9, PIXMAN_a2r2g2b2, 0xdd }, { PIXMAN_OP_CONJOINT_IN, PIXMAN_a8r8g8b8, 0x100ffbf, PIXMAN_r3g3b2, 0x2c, PIXMAN_a4r4g4b4, 0x60 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x1ffb7ff, PIXMAN_r3g3b2, 0x6b, PIXMAN_a4r4g4b4, 0x630 }, { PIXMAN_OP_SATURATE, PIXMAN_a8r8g8b8, 0x20005ff, PIXMAN_a4r4g4b4, 0x8462, PIXMAN_r5g6b5, 0xb1e8 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x1ff5b00, PIXMAN_r5g6b5, 0x70ff, PIXMAN_r3g3b2, 0x60 }, { PIXMAN_OP_CONJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x2ffffc3, PIXMAN_r3g3b2, 0x39, PIXMAN_a8r8g8b8, 0x200db41 }, { PIXMAN_OP_CONJOINT_OUT, PIXMAN_a8r8g8b8, 0x306ffff, PIXMAN_a8r8g8b8, 0xdcffff1f, PIXMAN_a8r8g8b8, 0x306ff00 }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x193daff, PIXMAN_a8r8g8b8, 0x69000000, PIXMAN_r3g3b2, 0x0 }, { PIXMAN_OP_SATURATE, PIXMAN_a8r8g8b8, 0x2a200ff, PIXMAN_a8r8g8b8, 0x183aff00, PIXMAN_r5g6b5, 0x2000 }, { PIXMAN_OP_DISJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x100f1a5, PIXMAN_a8r8g8b8, 0xb5fc21ff, PIXMAN_r5g6b5, 0xfe00 }, { PIXMAN_OP_SATURATE, PIXMAN_a8r8g8b8, 0x1630019, PIXMAN_a8r8g8b8, 0x6affc400, PIXMAN_r5g6b5, 0x56ff }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x1ff8bc2, PIXMAN_r3g3b2, 0xee, PIXMAN_r5g6b5, 0x1c0 }, { PIXMAN_OP_SATURATE, PIXMAN_a8r8g8b8, 0x260ffff, PIXMAN_a4r4g4b4, 0x3f00, PIXMAN_r3g3b2, 0x4 }, { PIXMAN_OP_CONJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x169ffed, PIXMAN_a8r8g8b8, 0xffffff3f, PIXMAN_a8r8g8b8, 0x169ff00 }, { PIXMAN_OP_CONJOINT_XOR, PIXMAN_a8r8g8b8, 0x154c181, PIXMAN_a4r4g4b4, 0x5100, PIXMAN_a4r4g4b4, 0x0 }, { PIXMAN_OP_CONJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x1e09c00, PIXMAN_r5g6b5, 0xca00, PIXMAN_a4r4g4b4, 0xb00 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x2ff8dff, PIXMAN_a8r8g8b8, 0x610038ff, PIXMAN_a8r8g8b8, 0x1001f02 }, { PIXMAN_OP_SATURATE, PIXMAN_a8r8g8b8, 0x1e400ff, PIXMAN_a4r4g4b4, 0x66bd, PIXMAN_r3g3b2, 0x68 }, { PIXMAN_OP_CONJOINT_IN, PIXMAN_a8r8g8b8, 0x25362ff, PIXMAN_a4r4g4b4, 0x31ff, PIXMAN_a8r8g8b8, 0x111433 }, { PIXMAN_OP_CONJOINT_OUT, PIXMAN_a8r8g8b8, 0x3ad0039, PIXMAN_r3g3b2, 0x26, PIXMAN_a8r8g8b8, 0x3000026 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x2e442ef, PIXMAN_r3g3b2, 0x32, PIXMAN_r3g3b2, 0x20 }, { PIXMAN_OP_SATURATE, PIXMAN_a8r8g8b8, 0x1720000, PIXMAN_a8r8g8b8, 0x55fdea00, PIXMAN_r3g3b2, 0x20 }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x14bb0d7, PIXMAN_a8r8g8b8, 0x7fffff47, PIXMAN_a2r2g2b2, 0x0 }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x13dffff, PIXMAN_a8r8g8b8, 0xa3860672, PIXMAN_r3g3b2, 0x20 }, { PIXMAN_OP_CONJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x120495a, PIXMAN_a4r4g4b4, 0x407e, PIXMAN_a8r8g8b8, 0x54 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x1ff8fff, PIXMAN_a2r2g2b2, 0x29, PIXMAN_r5g6b5, 0xa }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x100a31a, PIXMAN_a4r4g4b4, 0xde4c, PIXMAN_a4r4g4b4, 0x1 }, { PIXMAN_OP_CONJOINT_OUT, PIXMAN_a8r8g8b8, 0x1d4008c, PIXMAN_r3g3b2, 0x79, PIXMAN_a8r8g8b8, 0x1000000 }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x1ff0000, PIXMAN_a4r4g4b4, 0x7de4, PIXMAN_r5g6b5, 0x0 }, { PIXMAN_OP_DISJOINT_ATOP, PIXMAN_a8r8g8b8, 0x1b27e62, PIXMAN_a4r4g4b4, 0x7941, PIXMAN_r3g3b2, 0x0 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x300ff00, PIXMAN_a8r8g8b8, 0xfcff255e, PIXMAN_r3g3b2, 0x4 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x2ff00b8, PIXMAN_a8r8g8b8, 0x19ff718d, PIXMAN_r5g6b5, 0x1802 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x235ff13, PIXMAN_a8r8g8b8, 0x34bcd9ff, PIXMAN_r3g3b2, 0x4 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x1006400, PIXMAN_a4r4g4b4, 0x7000, PIXMAN_a4r4g4b4, 0x20 }, { PIXMAN_OP_CONJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x1ff8bff, PIXMAN_a4r4g4b4, 0xfff4, PIXMAN_a4r4g4b4, 0xf80 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x24630ff, PIXMAN_a8r8g8b8, 0x1f00000b, PIXMAN_a8r8g8b8, 0x9061f }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x1ff8a00, PIXMAN_a8r8g8b8, 0x79ffab00, PIXMAN_r5g6b5, 0x7a00 }, { PIXMAN_OP_DISJOINT_ATOP, PIXMAN_a8r8g8b8, 0x19807ff, PIXMAN_a4r4g4b4, 0x6794, PIXMAN_a8r8g8b8, 0xff002e00 }, { PIXMAN_OP_CONJOINT_OUT, PIXMAN_a8r8g8b8, 0x10000da, PIXMAN_a4r4g4b4, 0xf864, PIXMAN_a8r8g8b8, 0x1000000 }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x1ffffde, PIXMAN_a2r2g2b2, 0x94, PIXMAN_a8r8g8b8, 0x1000000 }, { PIXMAN_OP_CONJOINT_ATOP, PIXMAN_a8r8g8b8, 0x200c800, PIXMAN_r5g6b5, 0xe9d4, PIXMAN_a8r8g8b8, 0x2c00 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x1ff00c9, PIXMAN_r3g3b2, 0x4c, PIXMAN_r5g6b5, 0x4800 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x122d5ff, PIXMAN_r5g6b5, 0x418b, PIXMAN_a4r4g4b4, 0x25 }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x1ffff55, PIXMAN_a2r2g2b2, 0x1c, PIXMAN_a8r8g8b8, 0xff00 }, { PIXMAN_OP_DISJOINT_ATOP, PIXMAN_a8r8g8b8, 0x135ffff, PIXMAN_r5g6b5, 0x39c4, PIXMAN_r5g6b5, 0xb7 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x100d2c3, PIXMAN_r3g3b2, 0x2a, PIXMAN_a8r8g8b8, 0x3c00 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x17268ff, PIXMAN_a8r8g8b8, 0x7c00ffff, PIXMAN_r5g6b5, 0x318f }, { PIXMAN_OP_DISJOINT_ATOP, PIXMAN_a8r8g8b8, 0x1ff00ff, PIXMAN_r3g3b2, 0x68, PIXMAN_r3g3b2, 0xb4 }, { PIXMAN_OP_CONJOINT_OUT, PIXMAN_a8r8g8b8, 0x200ffff, PIXMAN_r5g6b5, 0xff86, PIXMAN_a8r8g8b8, 0x200f300 }, { PIXMAN_OP_CONJOINT_ATOP, PIXMAN_a8r8g8b8, 0x18a23ff, PIXMAN_a2r2g2b2, 0x44, PIXMAN_a4r4g4b4, 0x205 }, { PIXMAN_OP_DISJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x16bff23, PIXMAN_a8r8g8b8, 0x31fd00ff, PIXMAN_r3g3b2, 0x7 }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x137d1ff, PIXMAN_a4r4g4b4, 0x56c1, PIXMAN_r5g6b5, 0x0 }, { PIXMAN_OP_CONJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x1ff5bff, PIXMAN_a4r4g4b4, 0xfff4, PIXMAN_a4r4g4b4, 0xf50 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x15c6b00, PIXMAN_a8r8g8b8, 0x7d008a, PIXMAN_a4r4g4b4, 0x200 }, { PIXMAN_OP_CONJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x12091ff, PIXMAN_a8r8g8b8, 0xb74cff6b, PIXMAN_a2r2g2b2, 0x8 }, { PIXMAN_OP_CONJOINT_ATOP, PIXMAN_a8r8g8b8, 0x1ff5bff, PIXMAN_a8r8g8b8, 0xff6ddce8, PIXMAN_a2r2g2b2, 0x10 }, { PIXMAN_OP_CONJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x100ffff, PIXMAN_a4r4g4b4, 0xffb7, PIXMAN_a4r4g4b4, 0xb0 }, { PIXMAN_OP_CONJOINT_IN, PIXMAN_a8r8g8b8, 0x173ffff, PIXMAN_r5g6b5, 0xff2c, PIXMAN_a4r4g4b4, 0x6 }, { PIXMAN_OP_CONJOINT_IN, PIXMAN_a8r8g8b8, 0x17102ff, PIXMAN_a8r8g8b8, 0x955bff66, PIXMAN_a8r8g8b8, 0x280066 }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0x3c7ff24, PIXMAN_r5g6b5, 0xc4, PIXMAN_r5g6b5, 0x163 }, { PIXMAN_OP_CONJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x100c2a6, PIXMAN_r5g6b5, 0xa9b9, PIXMAN_a4r4g4b4, 0x8 }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0x26049ff, PIXMAN_a4r4g4b4, 0xb2, PIXMAN_r5g6b5, 0x8904 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x2f100ff, PIXMAN_r3g3b2, 0x30, PIXMAN_a8r8g8b8, 0x2220100 }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0x1ffff88, PIXMAN_r3g3b2, 0x7e, PIXMAN_r3g3b2, 0x60 }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x153ffab, PIXMAN_a8r8g8b8, 0xfd10725a, PIXMAN_r3g3b2, 0x0 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x1ff00d2, PIXMAN_r5g6b5, 0xff6b, PIXMAN_a8r8g8b8, 0x101014a }, { PIXMAN_OP_DISJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x100d965, PIXMAN_a8r8g8b8, 0xff007b00, PIXMAN_r3g3b2, 0xc }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x1ec0000, PIXMAN_r5g6b5, 0x6fff, PIXMAN_r5g6b5, 0x6000 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x19d59a2, PIXMAN_a8r8g8b8, 0x4a00ff7a, PIXMAN_a8r8g8b8, 0x2e1a2f }, { PIXMAN_OP_DISJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x1eb0000, PIXMAN_a4r4g4b4, 0x72bc, PIXMAN_r5g6b5, 0x1800 }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x100ffff, PIXMAN_a4r4g4b4, 0xc034, PIXMAN_a4r4g4b4, 0x0 }, { PIXMAN_OP_SATURATE, PIXMAN_a8r8g8b8, 0x195ff15, PIXMAN_a4r4g4b4, 0xb7b1, PIXMAN_r5g6b5, 0x4000 }, { PIXMAN_OP_DISJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x1ffdf94, PIXMAN_a4r4g4b4, 0x78, PIXMAN_r3g3b2, 0xc }, { PIXMAN_OP_DISJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x26f00ff, PIXMAN_a4r4g4b4, 0xff93, PIXMAN_r5g6b5, 0x1dd2 }, { PIXMAN_OP_CONJOINT_IN, PIXMAN_a8r8g8b8, 0x2ff3fc5, PIXMAN_r3g3b2, 0x2f, PIXMAN_a8r8g8b8, 0x240000 }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x1ff696e, PIXMAN_a4r4g4b4, 0x22ff, PIXMAN_r5g6b5, 0x34d }, { PIXMAN_OP_CONJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x10033d9, PIXMAN_a8r8g8b8, 0x38650000, PIXMAN_a8r8g8b8, 0x0 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x2ffff00, PIXMAN_a4r4g4b4, 0x2070, PIXMAN_r5g6b5, 0x2100 }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0x1008746, PIXMAN_a8r8g8b8, 0xb56971, PIXMAN_r5g6b5, 0xc25c }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x144d200, PIXMAN_a4r4g4b4, 0xff42, PIXMAN_r3g3b2, 0x4 }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0x1ffffd0, PIXMAN_r5g6b5, 0x5b00, PIXMAN_r3g3b2, 0x4c }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x10000ff, PIXMAN_a8r8g8b8, 0xff006f, PIXMAN_r5g6b5, 0xd }, { PIXMAN_OP_DISJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x10666ff, PIXMAN_a4r4g4b4, 0x39b2, PIXMAN_r5g6b5, 0xa6 }, { PIXMAN_OP_CONJOINT_OUT, PIXMAN_a8r8g8b8, 0x11a007d, PIXMAN_r3g3b2, 0xf9, PIXMAN_a8r8g8b8, 0x11a0000 }, { PIXMAN_OP_CONJOINT_ATOP, PIXMAN_a8r8g8b8, 0x1eb90ee, PIXMAN_r5g6b5, 0xd, PIXMAN_a2r2g2b2, 0x1 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x1ff42d5, PIXMAN_a4r4g4b4, 0x3400, PIXMAN_r3g3b2, 0x40 }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x1dfff00, PIXMAN_a8r8g8b8, 0x3ffff9d2, PIXMAN_r5g6b5, 0x0 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x1ff6500, PIXMAN_a2r2g2b2, 0x56, PIXMAN_r3g3b2, 0x44 }, { PIXMAN_OP_DISJOINT_ATOP, PIXMAN_a8r8g8b8, 0x119ffe6, PIXMAN_r3g3b2, 0x8d, PIXMAN_a4r4g4b4, 0xff00 }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x100cd00, PIXMAN_r5g6b5, 0x33ff, PIXMAN_a4r4g4b4, 0x0 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x569ffd7, PIXMAN_r5g6b5, 0x8cc, PIXMAN_r5g6b5, 0xc0 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x100876a, PIXMAN_a8r8g8b8, 0x575447a5, PIXMAN_r5g6b5, 0x164 }, { PIXMAN_OP_CONJOINT_OUT, PIXMAN_a8r8g8b8, 0x12d00ff, PIXMAN_a4r4g4b4, 0x3fff, PIXMAN_a4r4g4b4, 0x0 }, { PIXMAN_OP_DISJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x2ff953b, PIXMAN_a4r4g4b4, 0x2914, PIXMAN_r5g6b5, 0x20a1 }, { PIXMAN_OP_DISJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x1ffead4, PIXMAN_a8r8g8b8, 0xff00ea4e, PIXMAN_r3g3b2, 0x5a }, { PIXMAN_OP_DISJOINT_ATOP, PIXMAN_a8r8g8b8, 0x1ff6400, PIXMAN_a2r2g2b2, 0x99, PIXMAN_r5g6b5, 0xa620 }, { PIXMAN_OP_CONJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x17b0084, PIXMAN_r3g3b2, 0xbd, PIXMAN_a4r4g4b4, 0x500 }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0x4f90bbb, PIXMAN_a8r8g8b8, 0xff00d21f, PIXMAN_a8r8g8b8, 0xfb00fc4a }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x1ffbb1d, PIXMAN_a8r8g8b8, 0x2dff79ff, PIXMAN_r5g6b5, 0x2c0 }, { PIXMAN_OP_CONJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x100ffff, PIXMAN_a2r2g2b2, 0x43, PIXMAN_a4r4g4b4, 0x6f }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x1f000ff, PIXMAN_a4r4g4b4, 0xb393, PIXMAN_r3g3b2, 0x20 }, { PIXMAN_OP_CONJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x1c60020, PIXMAN_a8r8g8b8, 0x6bffffff, PIXMAN_a8r8g8b8, 0x0 }, { PIXMAN_OP_CONJOINT_OUT, PIXMAN_a8r8g8b8, 0x1727d00, PIXMAN_a2r2g2b2, 0x67, PIXMAN_a4r4g4b4, 0x400 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x14a5194, PIXMAN_a4r4g4b4, 0xd7ff, PIXMAN_r5g6b5, 0x2000 }, { PIXMAN_OP_DISJOINT_ATOP, PIXMAN_a8r8g8b8, 0x20003fa, PIXMAN_a4r4g4b4, 0x24ff, PIXMAN_a8r8g8b8, 0xffff1550 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x1a6ff83, PIXMAN_a4r4g4b4, 0xf400, PIXMAN_r5g6b5, 0x2800 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x2ffcf00, PIXMAN_r5g6b5, 0x71ff, PIXMAN_a4r4g4b4, 0x30 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x333ffff, PIXMAN_a4r4g4b4, 0x2c00, PIXMAN_r3g3b2, 0x4 }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0x1c2ffe8, PIXMAN_r5g6b5, 0xc200, PIXMAN_a8r8g8b8, 0xfeca41ff }, { PIXMAN_OP_CONJOINT_ATOP, PIXMAN_a2r2g2b2, 0x47, PIXMAN_a8r8g8b8, 0x2ffff00, PIXMAN_a8r8g8b8, 0x3aa0102 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x1ffeb00, PIXMAN_a4r4g4b4, 0xb493, PIXMAN_a4r4g4b4, 0x400 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x2afffff, PIXMAN_r5g6b5, 0xcb, PIXMAN_r5g6b5, 0xc0 }, { PIXMAN_OP_SATURATE, PIXMAN_a8r8g8b8, 0x183ff00, PIXMAN_r3g3b2, 0x87, PIXMAN_r5g6b5, 0xae91 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x3ffff00, PIXMAN_a4r4g4b4, 0x2ba4, PIXMAN_r5g6b5, 0x2100 }, { PIXMAN_OP_CONJOINT_OUT, PIXMAN_a8r8g8b8, 0x215cbc2, PIXMAN_a4r4g4b4, 0xafd3, PIXMAN_a8r8g8b8, 0x115b000 }, { PIXMAN_OP_SATURATE, PIXMAN_a8r8g8b8, 0x1853f65, PIXMAN_a8r8g8b8, 0xc68cdc41, PIXMAN_r5g6b5, 0x3 }, { PIXMAN_OP_CONJOINT_IN, PIXMAN_a8r8g8b8, 0x3ffff8f, PIXMAN_a4r4g4b4, 0x8824, PIXMAN_a4r4g4b4, 0x20 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x28e08e6, PIXMAN_a8r8g8b8, 0x2cffff31, PIXMAN_r5g6b5, 0x1805 }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x1b500be, PIXMAN_r5g6b5, 0xd946, PIXMAN_r5g6b5, 0x9800 }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x133ffb3, PIXMAN_a2r2g2b2, 0x42, PIXMAN_a8r8g8b8, 0x11553c }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0x21aff81, PIXMAN_r3g3b2, 0xc7, PIXMAN_r5g6b5, 0x120 }, { PIXMAN_OP_CONJOINT_ATOP, PIXMAN_a8r8g8b8, 0x12e004f, PIXMAN_a4r4g4b4, 0xf617, PIXMAN_a4r4g4b4, 0x102 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x164861f, PIXMAN_r3g3b2, 0x4e, PIXMAN_r5g6b5, 0x19c0 }, { PIXMAN_OP_DISJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x1ff0eff, PIXMAN_a8r8g8b8, 0xff5c00aa, PIXMAN_r5g6b5, 0x5800 }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0x1e4c60f, PIXMAN_a8r8g8b8, 0x38ff0e0c, PIXMAN_a4r4g4b4, 0xff2a }, { PIXMAN_OP_DISJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x1ff0000, PIXMAN_a8r8g8b8, 0x9f3d6700, PIXMAN_r5g6b5, 0xf3ff }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x205ffd0, PIXMAN_a8r8g8b8, 0xffc22b3b, PIXMAN_a8r8g8b8, 0x2040000 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x1ff0059, PIXMAN_r5g6b5, 0x74ff, PIXMAN_a8r8g8b8, 0x1730101 }, { PIXMAN_OP_CONJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x29affb8, PIXMAN_r5g6b5, 0xff, PIXMAN_a8r8g8b8, 0x2d25cff }, { PIXMAN_OP_DISJOINT_OUT, PIXMAN_a8r8g8b8, 0x1ffff8b, PIXMAN_a4r4g4b4, 0xff7b, PIXMAN_r5g6b5, 0x3a0 }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x2a86ad7, PIXMAN_a4r4g4b4, 0xdc22, PIXMAN_a8r8g8b8, 0x2860000 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x3ff00ff, PIXMAN_r3g3b2, 0x33, PIXMAN_r5g6b5, 0x2000 }, { PIXMAN_OP_SATURATE, PIXMAN_a8r8g8b8, 0x1e50063, PIXMAN_a8r8g8b8, 0x35ff95d7, PIXMAN_r3g3b2, 0x20 }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x2ffe6ff, PIXMAN_a8r8g8b8, 0x153ef297, PIXMAN_r5g6b5, 0x6d2 }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x34ffeff, PIXMAN_a4r4g4b4, 0x2e, PIXMAN_r5g6b5, 0x1d }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0x2ffeada, PIXMAN_r5g6b5, 0xabc6, PIXMAN_a8r8g8b8, 0xfd15b256 }, { PIXMAN_OP_CONJOINT_ATOP, PIXMAN_a8r8g8b8, 0x100ff00, PIXMAN_a8r8g8b8, 0xcff3f32, PIXMAN_a8r8g8b8, 0x3f00 }, { PIXMAN_OP_SATURATE, PIXMAN_a8r8g8b8, 0x1e1b0f1, PIXMAN_a8r8g8b8, 0xff63ff54, PIXMAN_r3g3b2, 0x5d }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0x2ffff23, PIXMAN_a8r8g8b8, 0x380094ff, PIXMAN_r5g6b5, 0x3a4b }, { PIXMAN_OP_CONJOINT_ATOP, PIXMAN_a4r4g4b4, 0x1000, PIXMAN_r5g6b5, 0xca, PIXMAN_a8r8g8b8, 0x3434500 }, { PIXMAN_OP_DISJOINT_IN, PIXMAN_a8r8g8b8, 0x195ffe5, PIXMAN_a4r4g4b4, 0x3a29, PIXMAN_a8r8g8b8, 0x0 }, { PIXMAN_OP_DISJOINT_ATOP, PIXMAN_a8r8g8b8, 0x139007a, PIXMAN_a4r4g4b4, 0x4979, PIXMAN_r5g6b5, 0x84 }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_a2r2g2b2, 0xa9, PIXMAN_a4r4g4b4, 0xfa18, PIXMAN_a8r8g8b8, 0xabff67ff }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_a2r2g2b2, 0x94, PIXMAN_a4r4g4b4, 0x5109, PIXMAN_a8r8g8b8, 0x3affffff }, { PIXMAN_OP_COLOR_BURN, PIXMAN_r5g6b5, 0xd038, PIXMAN_r5g6b5, 0xff00, PIXMAN_r5g6b5, 0xf9a5 }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_a8r8g8b8, 0x543128ff, PIXMAN_a8r8g8b8, 0x7029ff, PIXMAN_a8r8g8b8, 0x316b1d7 }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_r5g6b5, 0x53ff, PIXMAN_r5g6b5, 0x72ff, PIXMAN_a8r8g8b8, 0xffffdeff }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_a8r8g8b8, 0x5b00002b, PIXMAN_a4r4g4b4, 0xc3, PIXMAN_a8r8g8b8, 0x23530be }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_a8r8g8b8, 0xcefc0041, PIXMAN_a8r8g8b8, 0xf60d02, PIXMAN_a8r8g8b8, 0x1f2ffe5 }, { PIXMAN_OP_COLOR_DODGE, PIXMAN_r5g6b5, 0xffdb, PIXMAN_r5g6b5, 0xc700, PIXMAN_r5g6b5, 0x654 }, { PIXMAN_OP_COLOR_DODGE, PIXMAN_r5g6b5, 0xffc6, PIXMAN_r5g6b5, 0xff09, PIXMAN_r5g6b5, 0xfe58 }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_a2r2g2b2, 0x95, PIXMAN_r5g6b5, 0x1b4a, PIXMAN_a8r8g8b8, 0xab234cff }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_a2r2g2b2, 0x95, PIXMAN_a4r4g4b4, 0x5e99, PIXMAN_a8r8g8b8, 0x3b1c1cdd }, { PIXMAN_OP_COLOR_BURN, PIXMAN_r5g6b5, 0x22, PIXMAN_r5g6b5, 0xd00, PIXMAN_r5g6b5, 0xfbb1 }, { PIXMAN_OP_COLOR_DODGE, PIXMAN_r5g6b5, 0xffc8, PIXMAN_a8r8g8b8, 0xa1a3ffff, PIXMAN_r5g6b5, 0x44a }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_a8r8g8b8, 0xffff7cff, PIXMAN_r5g6b5, 0x900, PIXMAN_a8r8g8b8, 0xffff94ec }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_a2r2g2b2, 0xa7, PIXMAN_r5g6b5, 0xff, PIXMAN_a8r8g8b8, 0xaa00cffe }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_a2r2g2b2, 0x85, PIXMAN_r5g6b5, 0xffb3, PIXMAN_a8r8g8b8, 0xaaffff4a }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_a8r8g8b8, 0x3500a118, PIXMAN_a4r4g4b4, 0x9942, PIXMAN_a8r8g8b8, 0x01ff405e }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_a2r2g2b2, 0xb5, PIXMAN_x4a4, 0xe, PIXMAN_a8r8g8b8, 0xffbaff }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_a4r4g4b4, 0xe872, PIXMAN_x2r10g10b10, 0xa648ff00, PIXMAN_a2r10g10b10, 0x14ff00e8, }, { PIXMAN_OP_SATURATE, PIXMAN_a8r8g8b8, 0x4d2db34, PIXMAN_a8, 0x19, PIXMAN_r5g6b5, 0x9700, }, { PIXMAN_OP_SATURATE, PIXMAN_a8r8g8b8, 0x2ff0076, PIXMAN_a8r8g8b8, 0x2a0000, PIXMAN_r3g3b2, 0x0, }, { PIXMAN_OP_CONJOINT_OVER_REVERSE, PIXMAN_a8r8g8b8, 0x14f00ff, PIXMAN_r5g6b5, 0xd48, PIXMAN_a4r4g4b4, 0x0, }, { PIXMAN_OP_CONJOINT_OUT, PIXMAN_a8r8g8b8, 0x3d8bbff, PIXMAN_r5g6b5, 0x6900, PIXMAN_a8r8g8b8, 0x0, }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x2ff00ff, PIXMAN_a4r4g4b4, 0x2300, PIXMAN_r3g3b2, 0x0, }, { PIXMAN_OP_SATURATE, PIXMAN_a8r8g8b8, 0x4d2db34, PIXMAN_a8r8g8b8, 0xff0019ff, PIXMAN_r5g6b5, 0x9700, }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0x100ac05, PIXMAN_r3g3b2, 0xef, PIXMAN_a2r2g2b2, 0xff, }, { PIXMAN_OP_EXCLUSION, PIXMAN_a2r2g2b2, 0xbf, PIXMAN_null, 0x00, PIXMAN_r5g6b5, 0x7e }, { PIXMAN_OP_DIFFERENCE, PIXMAN_r5g6b5, 0xffff, PIXMAN_null, 0x00, PIXMAN_a2r2g2b2, 0x33 }, { PIXMAN_OP_HARD_LIGHT, PIXMAN_a8r8g8b8, 0x84c4ffd7, PIXMAN_null, 0x00, PIXMAN_a8r8g8b8, 0xffddff }, { PIXMAN_OP_EXCLUSION, PIXMAN_a8r8g8b8, 0xff6e56, PIXMAN_null, 0x00, PIXMAN_a8r8g8b8, 0x20ff1ade }, { PIXMAN_OP_OVERLAY, PIXMAN_a4r4g4b4, 0xfe0, PIXMAN_null, 0x00, PIXMAN_a4r4g4b4, 0xbdff }, { PIXMAN_OP_SCREEN, PIXMAN_a8r8g8b8, 0x9671ff, PIXMAN_null, 0x00, PIXMAN_a2r2g2b2, 0x43 }, { PIXMAN_OP_EXCLUSION, PIXMAN_a2r2g2b2, 0xff, PIXMAN_null, 0x00, PIXMAN_a4r4g4b4, 0x39ff }, { PIXMAN_OP_EXCLUSION, PIXMAN_r5g6b5, 0xffff, PIXMAN_null, 0x00, PIXMAN_a4r4g4b4, 0x1968 }, { PIXMAN_OP_EXCLUSION, PIXMAN_a4r4g4b4, 0x4247, PIXMAN_null, 0x00, PIXMAN_a8r8g8b8, 0xd8ffff }, { PIXMAN_OP_EXCLUSION, PIXMAN_r5g6b5, 0xff00, PIXMAN_null, 0x00, PIXMAN_a2r2g2b2, 0x79 }, { PIXMAN_OP_DIFFERENCE, PIXMAN_r3g3b2, 0xe0, PIXMAN_null, 0x00, PIXMAN_a2r2g2b2, 0x39 }, { PIXMAN_OP_EXCLUSION, PIXMAN_a8r8g8b8, 0xfff8, PIXMAN_null, 0x00, PIXMAN_r3g3b2, 0xff }, { PIXMAN_OP_COLOR_DODGE, PIXMAN_r5g6b5, 0x75fc, PIXMAN_null, 0x00, PIXMAN_r5g6b5, 0x11ff, }, { PIXMAN_OP_COLOR_BURN, PIXMAN_r3g3b2, 0x52, PIXMAN_null, 0x00, PIXMAN_r5g6b5, 0xc627 }, { PIXMAN_OP_HARD_LIGHT, PIXMAN_r5g6b5, 0x9f2b, PIXMAN_null, 0x00, PIXMAN_a8r8g8b8, 0x4b00e7f5 }, { PIXMAN_OP_OVERLAY, PIXMAN_a8r8g8b8, 0x00dfff5c, PIXMAN_null, 0x00, PIXMAN_r5g6b5, 0x5e0f, }, { PIXMAN_OP_COLOR_BURN, PIXMAN_a8r8g8b8, 0xff00121b, PIXMAN_null, 0x00, PIXMAN_r5g6b5, 0x3776 }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_r5g6b5, 0x03e0, PIXMAN_null, 0x00, PIXMAN_a8r8g8b8, 0x01003c00, }, { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, 0x0f00c300, PIXMAN_null, 0x00, PIXMAN_x14r6g6b6, 0x003c0, }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a4r4g4b4, 0xd0c0, PIXMAN_null, 0x00, PIXMAN_a8r8g8b8, 0x5300ea00, }, { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, 0x20c6bf00, PIXMAN_null, 0x00, PIXMAN_r5g6b5, 0xb9ff }, { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, 0x204ac7ff, PIXMAN_null, 0x00, PIXMAN_r5g6b5, 0xc1ff }, { PIXMAN_OP_OVER_REVERSE, PIXMAN_r5g6b5, 0xffc3, PIXMAN_null, 0x00, PIXMAN_a8r8g8b8, 0x102d00dd }, { PIXMAN_OP_OVER_REVERSE, PIXMAN_r5g6b5, 0x1f00, PIXMAN_null, 0x00, PIXMAN_a8r8g8b8, 0x1bdf0c89 }, { PIXMAN_OP_OVER_REVERSE, PIXMAN_r5g6b5, 0xf9d2, PIXMAN_null, 0x00, PIXMAN_a8r8g8b8, 0x1076bcf7 }, { PIXMAN_OP_OVER_REVERSE, PIXMAN_r5g6b5, 0x00c3, PIXMAN_null, 0x00, PIXMAN_a8r8g8b8, 0x1bfe9ae5 }, { PIXMAN_OP_OVER_REVERSE, PIXMAN_r5g6b5, 0x09ff, PIXMAN_null, 0x00, PIXMAN_a8r8g8b8, 0x0b00c16c }, { PIXMAN_OP_DISJOINT_ATOP, PIXMAN_a2r2g2b2, 0xbc, PIXMAN_null, 0x00, PIXMAN_a8r8g8b8, 0x9efff1ff }, { PIXMAN_OP_DISJOINT_ATOP, PIXMAN_a4r4g4b4, 0xae5f, PIXMAN_null, 0x00, PIXMAN_a8r8g8b8, 0xf215b675 }, { PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_a8r8g8b8, 0xce007980, PIXMAN_null, 0x00, PIXMAN_a8r8g8b8, 0x80ffe4ad }, { PIXMAN_OP_DISJOINT_XOR, PIXMAN_a8r8g8b8, 0xb8b07bea, PIXMAN_null, 0x00, PIXMAN_a4r4g4b4, 0x939c }, { PIXMAN_OP_CONJOINT_ATOP_REVERSE, PIXMAN_r5g6b5, 0x0063, PIXMAN_null, 0x00, PIXMAN_a8r8g8b8, 0x10bb1ed7, }, { PIXMAN_OP_EXCLUSION, PIXMAN_a2r2g2b2, 0xbf, PIXMAN_null, 0x00, PIXMAN_r5g6b5, 0x7e }, { PIXMAN_OP_LIGHTEN, PIXMAN_a8r8g8b8, 0xffffff, PIXMAN_null, 0x00, PIXMAN_a8r8g8b8, 0xff3fffff }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_r3g3b2, 0x38, PIXMAN_null, 0x00, PIXMAN_a2r2g2b2, 0x5b }, { PIXMAN_OP_COLOR_DODGE, PIXMAN_a8r8g8b8, 0x2e9effff, PIXMAN_null, 0x00, PIXMAN_a2r2g2b2, 0x77 }, { PIXMAN_OP_DIFFERENCE, PIXMAN_r5g6b5, 0xffff, PIXMAN_null, 0x00, PIXMAN_a2r2g2b2, 0x33 }, { PIXMAN_OP_OVERLAY, PIXMAN_a8r8g8b8, 0xd0089ff, PIXMAN_null, 0x00, PIXMAN_r3g3b2, 0xb1 }, { PIXMAN_OP_OVERLAY, PIXMAN_r3g3b2, 0x8a, PIXMAN_null, 0x00, PIXMAN_a8r8g8b8, 0xcd0004 }, { PIXMAN_OP_COLOR_BURN, PIXMAN_a8r8g8b8, 0xffff1e3a, PIXMAN_null, 0x00, PIXMAN_a4r4g4b4, 0xcf00 }, { PIXMAN_OP_HARD_LIGHT, PIXMAN_a8r8g8b8, 0x84c4ffd7, PIXMAN_null, 0x00, PIXMAN_a8r8g8b8, 0xffddff }, { PIXMAN_OP_DIFFERENCE, PIXMAN_a4r4g4b4, 0xfd75, PIXMAN_null, 0x00, PIXMAN_a2r2g2b2, 0x7f }, { PIXMAN_OP_LIGHTEN, PIXMAN_r3g3b2, 0xff, PIXMAN_null, 0x00, PIXMAN_a4r4g4b4, 0x63ff }, { PIXMAN_OP_EXCLUSION, PIXMAN_a8r8g8b8, 0xff6e56, PIXMAN_null, 0x00, PIXMAN_a8r8g8b8, 0x20ff1ade }, { PIXMAN_OP_OVERLAY, PIXMAN_a4r4g4b4, 0xfe0, PIXMAN_null, 0x00, PIXMAN_a4r4g4b4, 0xbdff }, { PIXMAN_OP_OVERLAY, PIXMAN_r5g6b5, 0x9799, PIXMAN_null, 0x00, PIXMAN_a4r4g4b4, 0x8d }, { PIXMAN_OP_HARD_LIGHT, PIXMAN_a8r8g8b8, 0xe8ff1c33, PIXMAN_null, 0x00, PIXMAN_r5g6b5, 0x6200 }, { PIXMAN_OP_DIFFERENCE, PIXMAN_a8r8g8b8, 0x22ffffff, PIXMAN_null, 0x00, PIXMAN_a2r2g2b2, 0x63 }, { PIXMAN_OP_SCREEN, PIXMAN_a8r8g8b8, 0x9671ff, PIXMAN_null, 0x00, PIXMAN_a2r2g2b2, 0x43 }, { PIXMAN_OP_LIGHTEN, PIXMAN_a2r2g2b2, 0x83, PIXMAN_null, 0x00, PIXMAN_r5g6b5, 0xff }, { PIXMAN_OP_OVERLAY, PIXMAN_r3g3b2, 0x0, PIXMAN_null, 0x00, PIXMAN_a2r2g2b2, 0x97 }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_r5g6b5, 0xb900, PIXMAN_null, 0x00, PIXMAN_a8r8g8b8, 0x6800ff00 }, { PIXMAN_OP_OVERLAY, PIXMAN_a4r4g4b4, 0xff, PIXMAN_null, 0x00, PIXMAN_r3g3b2, 0x8e }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_a4r4g4b4, 0xff00, PIXMAN_null, 0x00, PIXMAN_a2r2g2b2, 0xbc }, { PIXMAN_OP_DIFFERENCE, PIXMAN_r5g6b5, 0xfffe, PIXMAN_null, 0x00, PIXMAN_a4r4g4b4, 0x90 }, { PIXMAN_OP_LIGHTEN, PIXMAN_r3g3b2, 0xff, PIXMAN_null, 0x00, PIXMAN_a8r8g8b8, 0xc35f }, { PIXMAN_OP_EXCLUSION, PIXMAN_a2r2g2b2, 0xff, PIXMAN_null, 0x00, PIXMAN_a4r4g4b4, 0x39ff }, { PIXMAN_OP_LIGHTEN, PIXMAN_a2r2g2b2, 0x1e, PIXMAN_null, 0x00, PIXMAN_a4r4g4b4, 0xbaff }, { PIXMAN_OP_LIGHTEN, PIXMAN_a8r8g8b8, 0xb4ffff26, PIXMAN_null, 0x00, PIXMAN_r5g6b5, 0xff }, { PIXMAN_OP_COLOR_DODGE, PIXMAN_a4r4g4b4, 0xe3ff, PIXMAN_null, 0x00, PIXMAN_a4r4g4b4, 0x878b }, { PIXMAN_OP_OVERLAY, PIXMAN_a8r8g8b8, 0xff700044, PIXMAN_null, 0x00, PIXMAN_a2r2g2b2, 0x6 }, { PIXMAN_OP_DARKEN, PIXMAN_a2r2g2b2, 0xb6, PIXMAN_null, 0x00, PIXMAN_a4r4g4b4, 0xcd00 }, { PIXMAN_OP_HARD_LIGHT, PIXMAN_a2r2g2b2, 0xfe, PIXMAN_null, 0x00, PIXMAN_a2r2g2b2, 0x12 }, { PIXMAN_OP_LIGHTEN, PIXMAN_a8r8g8b8, 0xb1ff006c, PIXMAN_null, 0x00, PIXMAN_a4r4g4b4, 0xff7c }, { PIXMAN_OP_HARD_LIGHT, PIXMAN_r3g3b2, 0x4e, PIXMAN_null, 0x00, PIXMAN_a2r2g2b2, 0x3c }, { PIXMAN_OP_EXCLUSION, PIXMAN_r5g6b5, 0xffff, PIXMAN_null, 0x00, PIXMAN_a4r4g4b4, 0x1968 }, { PIXMAN_OP_COLOR_BURN, PIXMAN_r3g3b2, 0xe7, PIXMAN_null, 0x00, PIXMAN_a8r8g8b8, 0x8cced6ac }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_a4r4g4b4, 0xa500, PIXMAN_null, 0x00, PIXMAN_a8r8g8b8, 0x1bff009d }, { PIXMAN_OP_DIFFERENCE, PIXMAN_r5g6b5, 0x45ff, PIXMAN_null, 0x00, PIXMAN_a2r2g2b2, 0x32 }, { PIXMAN_OP_OVERLAY, PIXMAN_a2r2g2b2, 0x18, PIXMAN_null, 0x00, PIXMAN_r5g6b5, 0xdc00 }, { PIXMAN_OP_EXCLUSION, PIXMAN_a4r4g4b4, 0x4247, PIXMAN_null, 0x00, PIXMAN_a8r8g8b8, 0xd8ffff }, { PIXMAN_OP_EXCLUSION, PIXMAN_r5g6b5, 0xff00, PIXMAN_null, 0x00, PIXMAN_a2r2g2b2, 0x79 }, { PIXMAN_OP_COLOR_BURN, PIXMAN_r3g3b2, 0xf, PIXMAN_null, 0x00, PIXMAN_a8r8g8b8, 0x9fff00ff }, { PIXMAN_OP_EXCLUSION, PIXMAN_a2r2g2b2, 0x93, PIXMAN_null, 0x00, PIXMAN_a4r4g4b4, 0xff }, { PIXMAN_OP_LIGHTEN, PIXMAN_a2r2g2b2, 0xa3, PIXMAN_null, 0x00, PIXMAN_r3g3b2, 0xca }, { PIXMAN_OP_DIFFERENCE, PIXMAN_r3g3b2, 0xe0, PIXMAN_null, 0x00, PIXMAN_a2r2g2b2, 0x39 }, { PIXMAN_OP_HARD_LIGHT, PIXMAN_r3g3b2, 0x16, PIXMAN_null, 0x00, PIXMAN_a8r8g8b8, 0x98ffff }, { PIXMAN_OP_LIGHTEN, PIXMAN_r3g3b2, 0x96, PIXMAN_null, 0x00, PIXMAN_a8r8g8b8, 0x225f6c }, { PIXMAN_OP_HARD_LIGHT, PIXMAN_a4r4g4b4, 0x12c7, PIXMAN_null, 0x00, PIXMAN_a4r4g4b4, 0xb100 }, { PIXMAN_OP_LIGHTEN, PIXMAN_a8r8g8b8, 0xffda91, PIXMAN_null, 0x00, PIXMAN_r3g3b2, 0x6a }, { PIXMAN_OP_EXCLUSION, PIXMAN_a8r8g8b8, 0xfff8, PIXMAN_null, 0x00, PIXMAN_r3g3b2, 0xff }, { PIXMAN_OP_SOFT_LIGHT, PIXMAN_a2r2g2b2, 0xff, PIXMAN_null, 0x00, PIXMAN_a8r8g8b8, 0xf0ff48ca }, { PIXMAN_OP_HARD_LIGHT, PIXMAN_r5g6b5, 0xf1ff, PIXMAN_r5g6b5, 0x6eff, PIXMAN_a8r8g8b8, 0xffffff, }, { PIXMAN_OP_HARD_LIGHT, PIXMAN_r5g6b5, 0xf1ff, PIXMAN_a8, 0xdf, PIXMAN_a8r8g8b8, 0xffffff, }, { PIXMAN_OP_HARD_LIGHT, PIXMAN_r5g6b5, 0xf1ff, PIXMAN_null, 0x00, PIXMAN_a8r8g8b8, 0xffffff, }, { PIXMAN_OP_HARD_LIGHT, PIXMAN_r5g6b5, 0xb867, PIXMAN_a4r4g4b4, 0x82d9, PIXMAN_a8r8g8b8, 0xffc5, }, { PIXMAN_OP_HARD_LIGHT, PIXMAN_r5g6b5, 0xa9f5, PIXMAN_r5g6b5, 0xadff, PIXMAN_a8r8g8b8, 0xffff00, }, { PIXMAN_OP_HARD_LIGHT, PIXMAN_r5g6b5, 0x4900, PIXMAN_r5g6b5, 0x865c, PIXMAN_a8r8g8b8, 0xebff, }, { PIXMAN_OP_HARD_LIGHT, PIXMAN_r5g6b5, 0xd9ff, PIXMAN_a8r8g8b8, 0xffffffff, PIXMAN_a8r8g8b8, 0x8ff0d, }, { PIXMAN_OP_HARD_LIGHT, PIXMAN_r5g6b5, 0x41ff, PIXMAN_a4r4g4b4, 0xcff, PIXMAN_a8r8g8b8, 0xe1ff00, }, { PIXMAN_OP_HARD_LIGHT, PIXMAN_r5g6b5, 0x91ff, PIXMAN_a2r2g2b2, 0xf3, PIXMAN_a8r8g8b8, 0xe4ffb4, }, { PIXMAN_OP_HARD_LIGHT, PIXMAN_r5g6b5, 0xb9ff, PIXMAN_a2r2g2b2, 0xff, PIXMAN_a8r8g8b8, 0xffff, }, { PIXMAN_OP_OVERLAY, PIXMAN_a8r8g8b8, 0x473affff, PIXMAN_r5g6b5, 0x2b00, PIXMAN_r5g6b5, 0x1ff, }, { PIXMAN_OP_OVERLAY, PIXMAN_a8r8g8b8, 0xe4ff, PIXMAN_r3g3b2, 0xff, PIXMAN_r5g6b5, 0x89ff, }, }; static void fill (pixman_image_t *image, uint32_t pixel) { uint8_t *data = (uint8_t *)pixman_image_get_data (image); int bytes_per_pixel = PIXMAN_FORMAT_BPP (pixman_image_get_format (image)) / 8; int n_bytes = pixman_image_get_stride (image) * pixman_image_get_height (image); int i; switch (bytes_per_pixel) { case 4: for (i = 0; i < n_bytes / 4; ++i) ((uint32_t *)data)[i] = pixel; break; case 2: pixel &= 0xffff; for (i = 0; i < n_bytes / 2; ++i) ((uint16_t *)data)[i] = pixel; break; case 1: pixel &= 0xff; for (i = 0; i < n_bytes; ++i) ((uint8_t *)data)[i] = pixel; break; default: assert (0); break; } } static uint32_t access (pixman_image_t *image, int x, int y) { int bytes_per_pixel; int stride; uint32_t result; uint8_t *location; if (x < 0 || x >= image->bits.width || y < 0 || y >= image->bits.height) return 0; bytes_per_pixel = PIXMAN_FORMAT_BPP (image->bits.format) / 8; stride = image->bits.rowstride * 4; location = (uint8_t *)image->bits.bits + y * stride + x * bytes_per_pixel; if (bytes_per_pixel == 4) result = *(uint32_t *)location; else if (bytes_per_pixel == 2) result = *(uint16_t *)location; else if (bytes_per_pixel == 1) result = *(uint8_t *)location; else assert (0); return result; } static pixman_bool_t verify (int test_no, const pixel_combination_t *combination, int size, pixman_bool_t component_alpha) { pixman_image_t *src, *mask, *dest; pixel_checker_t src_checker, mask_checker, dest_checker; color_t source_color, mask_color, dest_color, reference_color; pixman_bool_t have_mask = (combination->mask_format != PIXMAN_null); pixman_bool_t result = TRUE; int i, j; /* Compute reference color */ pixel_checker_init (&src_checker, combination->src_format); if (have_mask) pixel_checker_init (&mask_checker, combination->mask_format); pixel_checker_init (&dest_checker, combination->dest_format); pixel_checker_convert_pixel_to_color ( &src_checker, combination->src_pixel, &source_color); if (combination->mask_format != PIXMAN_null) { pixel_checker_convert_pixel_to_color ( &mask_checker, combination->mask_pixel, &mask_color); } pixel_checker_convert_pixel_to_color ( &dest_checker, combination->dest_pixel, &dest_color); do_composite (combination->op, &source_color, have_mask? &mask_color : NULL, &dest_color, &reference_color, component_alpha); src = pixman_image_create_bits ( combination->src_format, size, size, NULL, -1); if (have_mask) { mask = pixman_image_create_bits ( combination->mask_format, size, size, NULL, -1); pixman_image_set_component_alpha (mask, component_alpha); } dest = pixman_image_create_bits ( combination->dest_format, size, size, NULL, -1); fill (src, combination->src_pixel); if (have_mask) fill (mask, combination->mask_pixel); fill (dest, combination->dest_pixel); pixman_image_composite32 ( combination->op, src, have_mask ? mask : NULL, dest, 0, 0, 0, 0, 0, 0, size, size); for (j = 0; j < size; ++j) { for (i = 0; i < size; ++i) { uint32_t computed = access (dest, i, j); int32_t a, r, g, b; if (!pixel_checker_check (&dest_checker, computed, &reference_color)) { printf ("----------- Test %d failed ----------\n", test_no); printf (" operator: %s (%s)\n", operator_name (combination->op), have_mask? component_alpha ? "component alpha" : "unified alpha" : "no mask"); printf (" src format: %s\n", format_name (combination->src_format)); if (have_mask != PIXMAN_null) printf (" mask format: %s\n", format_name (combination->mask_format)); printf (" dest format: %s\n", format_name (combination->dest_format)); printf (" - source ARGB: %f %f %f %f (pixel: %8x)\n", source_color.a, source_color.r, source_color.g, source_color.b, combination->src_pixel); pixel_checker_split_pixel (&src_checker, combination->src_pixel, &a, &r, &g, &b); printf (" %8d %8d %8d %8d\n", a, r, g, b); if (have_mask) { printf (" - mask ARGB: %f %f %f %f (pixel: %8x)\n", mask_color.a, mask_color.r, mask_color.g, mask_color.b, combination->mask_pixel); pixel_checker_split_pixel (&mask_checker, combination->mask_pixel, &a, &r, &g, &b); printf (" %8d %8d %8d %8d\n", a, r, g, b); } printf (" - dest ARGB: %f %f %f %f (pixel: %8x)\n", dest_color.a, dest_color.r, dest_color.g, dest_color.b, combination->dest_pixel); pixel_checker_split_pixel (&dest_checker, combination->dest_pixel, &a, &r, &g, &b); printf (" %8d %8d %8d %8d\n", a, r, g, b); pixel_checker_split_pixel (&dest_checker, computed, &a, &r, &g, &b); printf (" - expected ARGB: %f %f %f %f\n", reference_color.a, reference_color.r, reference_color.g, reference_color.b); pixel_checker_get_min (&dest_checker, &reference_color, &a, &r, &g, &b); printf (" min acceptable: %8d %8d %8d %8d\n", a, r, g, b); pixel_checker_split_pixel (&dest_checker, computed, &a, &r, &g, &b); printf (" got: %8d %8d %8d %8d (pixel: %8x)\n", a, r, g, b, computed); pixel_checker_get_max (&dest_checker, &reference_color, &a, &r, &g, &b); printf (" max acceptable: %8d %8d %8d %8d\n", a, r, g, b); result = FALSE; goto done; } } } done: pixman_image_unref (src); pixman_image_unref (dest); if (have_mask) pixman_image_unref (mask); return result; } int main (int argc, char **argv) { int result = 0; int i, j; int lo, hi; if (argc > 1) { lo = atoi (argv[1]); hi = lo + 1; } else { lo = 0; hi = ARRAY_LENGTH (regressions); } for (i = lo; i < hi; ++i) { const pixel_combination_t *combination = &(regressions[i]); for (j = 1; j < 34; ++j) { int k, ca; ca = combination->mask_format == PIXMAN_null ? 1 : 2; for (k = 0; k < ca; ++k) { if (!verify (i, combination, j, k)) { result = 1; goto next_regression; } } } next_regression: ; } return result; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/prng-test.c0000664000175000017500000001242214712446423016165 0ustar00mattst88mattst88/* * Copyright Âİ 2012 Siarhei Siamashka * * Based on the public domain implementation of small noncryptographic PRNG * authored by Bob Jenkins: http://burtleburtle.net/bob/rand/smallprng.html * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include #include #include "utils-prng.h" #include "utils.h" /* The original code from http://www.burtleburtle.net/bob/rand/smallprng.html */ typedef uint32_t u4; typedef struct ranctx { u4 a; u4 b; u4 c; u4 d; } ranctx; #define rot(x,k) (((x)<<(k))|((x)>>(32-(k)))) u4 ranval( ranctx *x ) { u4 e = x->a - rot(x->b, 27); x->a = x->b ^ rot(x->c, 17); x->b = x->c + x->d; x->c = x->d + e; x->d = e + x->a; return x->d; } void raninit( ranctx *x, u4 seed ) { u4 i; x->a = 0xf1ea5eed, x->b = x->c = x->d = seed; for (i=0; i<20; ++i) { (void)ranval(x); } } /*****************************************************************************/ #define BUFSIZE (8 * 1024 * 1024) #define N 50 void bench (void) { double t1, t2; int i; prng_t prng; uint8_t *buf = aligned_malloc (16, BUFSIZE + 1); prng_srand_r (&prng, 1234); t1 = gettime(); for (i = 0; i < N; i++) prng_randmemset_r (&prng, buf, BUFSIZE, 0); t2 = gettime(); printf ("aligned randmemset : %.2f MB/s\n", (double)BUFSIZE * N / 1000000. / (t2 - t1)); t1 = gettime(); for (i = 0; i < N; i++) prng_randmemset_r (&prng, buf + 1, BUFSIZE, 0); t2 = gettime(); printf ("unaligned randmemset : %.2f MB/s\n", (double)BUFSIZE * N / 1000000. / (t2 - t1)); t1 = gettime(); for (i = 0; i < N; i++) { prng_randmemset_r (&prng, buf, BUFSIZE, RANDMEMSET_MORE_00_AND_FF); } t2 = gettime (); printf ("aligned randmemset (more 00 and FF) : %.2f MB/s\n", (double)BUFSIZE * N / 1000000. / (t2 - t1)); t1 = gettime(); for (i = 0; i < N; i++) { prng_randmemset_r (&prng, buf + 1, BUFSIZE, RANDMEMSET_MORE_00_AND_FF); } t2 = gettime (); printf ("unaligned randmemset (more 00 and FF) : %.2f MB/s\n", (double)BUFSIZE * N / 1000000. / (t2 - t1)); free (buf); } #define SMALLBUFSIZE 100 int main (int argc, char *argv[]) { const uint32_t ref_crc[RANDMEMSET_MORE_00_AND_FF + 1] = { 0xBA06763D, 0x103FC550, 0x8B59ABA5, 0xD82A0F39, 0xD2321099, 0xFD8C5420, 0xD3B7C42A, 0xFC098093, 0x85E01DE0, 0x6680F8F7, 0x4D32DD3C, 0xAE52382B, 0x149E6CB5, 0x8B336987, 0x15DCB2B3, 0x8A71B781 }; uint32_t crc1, crc2; uint32_t ref, seed, seed0, seed1, seed2, seed3; prng_rand_128_data_t buf; uint8_t *bytebuf = aligned_malloc(16, SMALLBUFSIZE + 1); ranctx x; prng_t prng; prng_randmemset_flags_t flags; if (argc > 1 && strcmp(argv[1], "-bench") == 0) { bench (); return 0; } /* basic test */ raninit (&x, 0); prng_srand_r (&prng, 0); assert (ranval (&x) == prng_rand_r (&prng)); /* test for simd code */ seed = 0; prng_srand_r (&prng, seed); seed0 = (seed = seed * 1103515245 + 12345); seed1 = (seed = seed * 1103515245 + 12345); seed2 = (seed = seed * 1103515245 + 12345); seed3 = (seed = seed * 1103515245 + 12345); prng_rand_128_r (&prng, &buf); raninit (&x, seed0); ref = ranval (&x); assert (ref == buf.w[0]); raninit (&x, seed1); ref = ranval (&x); assert (ref == buf.w[1]); raninit (&x, seed2); ref = ranval (&x); assert (ref == buf.w[2]); raninit (&x, seed3); ref = ranval (&x); assert (ref == buf.w[3]); /* test for randmemset */ for (flags = 0; flags <= RANDMEMSET_MORE_00_AND_FF; flags++) { prng_srand_r (&prng, 1234); prng_randmemset_r (&prng, bytebuf, 16, flags); prng_randmemset_r (&prng, bytebuf + 16, SMALLBUFSIZE - 17, flags); crc1 = compute_crc32 (0, bytebuf, SMALLBUFSIZE - 1); prng_srand_r (&prng, 1234); prng_randmemset_r (&prng, bytebuf + 1, SMALLBUFSIZE - 1, flags); crc2 = compute_crc32 (0, bytebuf + 1, SMALLBUFSIZE - 1); assert (ref_crc[flags] == crc1); assert (ref_crc[flags] == crc2); } free (bytebuf); return 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/radial-invalid.c0000664000175000017500000000241714712446423017125 0ustar00mattst88mattst88#include #include #include #include "utils.h" #define WIDTH 100 #define HEIGHT 100 int main () { pixman_image_t *radial; pixman_image_t *dest = pixman_image_create_bits ( PIXMAN_a8r8g8b8, WIDTH, HEIGHT, NULL, -1); static const pixman_transform_t xform = { { { 0x346f7, 0x0, 0x0 }, { 0x0, 0x346f7, 0x0 }, { 0x0, 0x0, 0x10000 } }, }; static const pixman_gradient_stop_t stops[] = { { 0xde61, { 0x4481, 0x96e8, 0x1e6a, 0x29e1 } }, { 0xfdd5, { 0xfa10, 0xcc26, 0xbc43, 0x1eb7 } }, { 0xfe1e, { 0xd257, 0x5bac, 0x6fc2, 0xa33b } }, }; static const pixman_point_fixed_t inner = { 0x320000, 0x320000 }; static const pixman_point_fixed_t outer = { 0x320000, 0x3cb074 }; enable_divbyzero_exceptions (); enable_invalid_exceptions (); radial = pixman_image_create_radial_gradient ( &inner, &outer, 0xab074, /* inner radius */ 0x0, /* outer radius */ stops, sizeof (stops) / sizeof (stops[0])); pixman_image_set_repeat (radial, PIXMAN_REPEAT_REFLECT); pixman_image_set_transform (radial, &xform); pixman_image_composite ( PIXMAN_OP_OVER, radial, NULL, dest, 0, 0, 0, 0, 0, 0, WIDTH, HEIGHT); pixman_image_unref (radial); pixman_image_unref (dest); return 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/radial-perf-test.c0000664000175000017500000000311614712446423017405 0ustar00mattst88mattst88#include "utils.h" #include int main () { static const pixman_point_fixed_t inner = { 0x0000, 0x0000 }; static const pixman_point_fixed_t outer = { 0x0000, 0x0000 }; static const pixman_fixed_t r_inner = 0; static const pixman_fixed_t r_outer = 64 << 16; static const pixman_gradient_stop_t stops[] = { { 0x00000, { 0x6666, 0x6666, 0x6666, 0xffff } }, { 0x10000, { 0x0000, 0x0000, 0x0000, 0xffff } } }; static const pixman_transform_t transform = { { { 0x0, 0x26ee, 0x0}, { 0xffffeeef, 0x0, 0x0}, { 0x0, 0x0, 0x10000} } }; static const pixman_color_t z = { 0x0000, 0x0000, 0x0000, 0x0000 }; pixman_image_t *dest, *radial, *zero; int i; double before, after; dest = pixman_image_create_bits ( PIXMAN_x8r8g8b8, 640, 429, NULL, -1); zero = pixman_image_create_solid_fill (&z); radial = pixman_image_create_radial_gradient ( &inner, &outer, r_inner, r_outer, stops, ARRAY_LENGTH (stops)); pixman_image_set_transform (radial, &transform); pixman_image_set_repeat (radial, PIXMAN_REPEAT_PAD); #define N_COMPOSITE 500 before = gettime(); for (i = 0; i < N_COMPOSITE; ++i) { before -= gettime(); pixman_image_composite ( PIXMAN_OP_SRC, zero, NULL, dest, 0, 0, 0, 0, 0, 0, 640, 429); before += gettime(); pixman_image_composite32 ( PIXMAN_OP_OVER, radial, NULL, dest, - 150, -158, 0, 0, 0, 0, 640, 361); } after = gettime(); write_png (dest, "radial.png"); printf ("Average time to composite: %f\n", (after - before) / N_COMPOSITE); return 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/region-contains-test.c0000664000175000017500000000670514712446423020325 0ustar00mattst88mattst88#include #include #include "utils.h" static void make_random_region (pixman_region32_t *region) { int n_boxes; pixman_region32_init (region); n_boxes = prng_rand_n (64); while (n_boxes--) { int32_t x, y; uint32_t w, h; x = (int32_t)prng_rand() >> 2; y = (int32_t)prng_rand() >> 2; w = prng_rand() >> 2; h = prng_rand() >> 2; pixman_region32_union_rect (region, region, x, y, w, h); } } static void print_box (pixman_box32_t *box) { printf (" %d %d %d %d\n", box->x1, box->y1, box->x2, box->y2); } static int32_t random_coord (pixman_region32_t *region, pixman_bool_t x) { pixman_box32_t *b, *bb; int n_boxes; int begin, end; if (prng_rand_n (14)) { bb = pixman_region32_rectangles (region, &n_boxes); if (n_boxes == 0) goto use_extent; b = bb + prng_rand_n (n_boxes); } else { use_extent: b = pixman_region32_extents (region); n_boxes = 1; } if (x) { begin = b->x1; end = b->x2; } else { begin = b->y1; end = b->y2; } switch (prng_rand_n (5)) { case 0: return begin - prng_rand(); case 1: return end + prng_rand (); case 2: return end; case 3: return begin; default: return (end - begin) / 2 + begin; } return 0; } static uint32_t compute_crc32_u32 (uint32_t crc32, uint32_t v) { if (!is_little_endian()) { v = ((v & 0xff000000) >> 24) | ((v & 0x00ff0000) >> 8) | ((v & 0x0000ff00) << 8) | ((v & 0x000000ff) << 24); } return compute_crc32 (crc32, &v, sizeof (int32_t)); } static uint32_t crc32_box32 (uint32_t crc32, pixman_box32_t *box) { crc32 = compute_crc32_u32 (crc32, box->x1); crc32 = compute_crc32_u32 (crc32, box->y1); crc32 = compute_crc32_u32 (crc32, box->x2); crc32 = compute_crc32_u32 (crc32, box->y2); return crc32; } static uint32_t test_region_contains_rectangle (int i, int verbose) { pixman_box32_t box; pixman_box32_t rbox = { 0, 0, 0, 0 }; pixman_region32_t region; uint32_t r, r1, r2, r3, r4, crc32; prng_srand (i); make_random_region (®ion); box.x1 = random_coord (®ion, TRUE); box.x2 = box.x1 + prng_rand (); box.y1 = random_coord (®ion, FALSE); box.y2 = box.y1 + prng_rand (); if (verbose) { int n_rects; pixman_box32_t *boxes; boxes = pixman_region32_rectangles (®ion, &n_rects); printf ("region:\n"); while (n_rects--) print_box (boxes++); printf ("box:\n"); print_box (&box); } crc32 = 0; r1 = pixman_region32_contains_point (®ion, box.x1, box.y1, &rbox); crc32 = crc32_box32 (crc32, &rbox); r2 = pixman_region32_contains_point (®ion, box.x1, box.y2, &rbox); crc32 = crc32_box32 (crc32, &rbox); r3 = pixman_region32_contains_point (®ion, box.x2, box.y1, &rbox); crc32 = crc32_box32 (crc32, &rbox); r4 = pixman_region32_contains_point (®ion, box.x2, box.y2, &rbox); crc32 = crc32_box32 (crc32, &rbox); r = pixman_region32_contains_rectangle (®ion, &box); r = (i << 8) | (r << 4) | (r1 << 3) | (r2 << 2) | (r3 << 1) | (r4 << 0); crc32 = compute_crc32_u32 (crc32, r); if (verbose) printf ("results: %d %d %d %d %d\n", (r & 0xf0) >> 4, r1, r2, r3, r4); pixman_region32_fini (®ion); return crc32; } int main (int argc, const char *argv[]) { return fuzzer_test_main ("region_contains", 1000000, 0x548E0F3F, test_region_contains_rectangle, argc, argv); } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/region-test.c0000664000175000017500000000556014712446423016507 0ustar00mattst88mattst88#include #include #include #include "utils.h" int main () { pixman_region32_t r1; pixman_region32_t r2; pixman_region32_t r3; pixman_box32_t boxes[] = { { 10, 10, 20, 20 }, { 30, 30, 30, 40 }, { 50, 45, 60, 44 }, }; pixman_box32_t boxes2[] = { { 2, 6, 7, 6 }, { 4, 1, 6, 7 }, }; pixman_box32_t boxes3[] = { { 2, 6, 7, 6 }, { 4, 1, 6, 1 }, }; int i, j; pixman_box32_t *b; pixman_image_t *image, *fill; pixman_color_t white = { 0xffff, 0xffff, 0xffff, 0xffff }; prng_srand (0); /* This used to go into an infinite loop before pixman-region.c * was fixed to not use explict "short" variables */ pixman_region32_init_rect (&r1, 0, 0, 20, 64000); pixman_region32_init_rect (&r2, 0, 0, 20, 64000); pixman_region32_init_rect (&r3, 0, 0, 20, 64000); pixman_region32_subtract (&r1, &r2, &r3); /* This would produce a region containing an empty * rectangle in it. Such regions are considered malformed, * but using an empty rectangle for initialization should * work. */ pixman_region32_init_rects (&r1, boxes, 3); b = pixman_region32_rectangles (&r1, &i); assert (i == 1); while (i--) { assert (b[i].x1 < b[i].x2); assert (b[i].y1 < b[i].y2); } /* This would produce a rectangle containing the bounding box * of the two rectangles. The correct result is to eliminate * the broken rectangle. */ pixman_region32_init_rects (&r1, boxes2, 2); b = pixman_region32_rectangles (&r1, &i); assert (i == 1); assert (b[0].x1 == 4); assert (b[0].y1 == 1); assert (b[0].x2 == 6); assert (b[0].y2 == 7); /* This should produce an empty region */ pixman_region32_init_rects (&r1, boxes3, 2); b = pixman_region32_rectangles (&r1, &i); assert (i == 0); fill = pixman_image_create_solid_fill (&white); for (i = 0; i < 100; i++) { int image_size = 128; pixman_region32_init (&r1); /* Add some random rectangles */ for (j = 0; j < 64; j++) pixman_region32_union_rect (&r1, &r1, prng_rand_n (image_size), prng_rand_n (image_size), prng_rand_n (25), prng_rand_n (25)); /* Clip to image size */ pixman_region32_init_rect (&r2, 0, 0, image_size, image_size); pixman_region32_intersect (&r1, &r1, &r2); pixman_region32_fini (&r2); /* render region to a1 mask */ image = pixman_image_create_bits (PIXMAN_a1, image_size, image_size, NULL, 0); pixman_image_set_clip_region32 (image, &r1); pixman_image_composite32 (PIXMAN_OP_SRC, fill, NULL, image, 0, 0, 0, 0, 0, 0, image_size, image_size); pixman_region32_init_from_image (&r2, image); pixman_image_unref (image); assert (pixman_region32_equal (&r1, &r2)); pixman_region32_fini (&r1); pixman_region32_fini (&r2); } pixman_image_unref (fill); return 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/region-translate-test.c0000664000175000017500000000134714712446423020501 0ustar00mattst88mattst88#include #include "utils.h" /* Pixman had a bug where 32bit regions where clipped to 16bit sizes when * pixman_region32_translate() was called. This test exercises that bug. */ #define LARGE 32000 int main (int argc, char **argv) { pixman_box32_t rect = { -LARGE, -LARGE, LARGE, LARGE }; pixman_region32_t r1, r2; pixman_region32_init_rects (&r1, &rect, 1); pixman_region32_init_rect (&r2, rect.x1, rect.y1, rect.x2 - rect.x1, rect.y2 - rect.y1); assert (pixman_region32_equal (&r1, &r2)); pixman_region32_translate (&r1, -LARGE, LARGE); pixman_region32_translate (&r1, LARGE, -LARGE); assert (pixman_region32_equal (&r1, &r2)); pixman_region32_fini (&r1); pixman_region32_fini (&r2); return 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/rotate-test.c0000664000175000017500000000532714712446423016523 0ustar00mattst88mattst88#include #include "utils.h" #define WIDTH 32 #define HEIGHT 32 static const pixman_format_code_t formats[] = { PIXMAN_a8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_x8r8g8b8, PIXMAN_x8b8g8r8, PIXMAN_r5g6b5, PIXMAN_b5g6r5, PIXMAN_a8, PIXMAN_a1, }; static const pixman_op_t ops[] = { PIXMAN_OP_OVER, PIXMAN_OP_SRC, PIXMAN_OP_ADD, }; #define TRANSFORM(v00, v01, v10, v11) \ { { { v00, v01, WIDTH * pixman_fixed_1 / 2 }, \ { v10, v11, HEIGHT * pixman_fixed_1 / 2 }, \ { 0, 0, pixman_fixed_1 } } } #define F1 pixman_fixed_1 static const pixman_transform_t transforms[] = { TRANSFORM (0, -1, 1, 0), /* wrong 90 degree rotation */ TRANSFORM (0, 1, -1, 0), /* wrong 270 degree rotation */ TRANSFORM (1, 0, 0, 1), /* wrong identity */ TRANSFORM (-1, 0, 0, -1), /* wrong 180 degree rotation */ TRANSFORM (0, -F1, F1, 0), /* correct 90 degree rotation */ TRANSFORM (0, F1, -F1, 0), /* correct 270 degree rotation */ TRANSFORM (F1, 0, 0, F1), /* correct identity */ TRANSFORM (-F1, 0, 0, -F1), /* correct 180 degree rotation */ }; #define RANDOM_FORMAT() \ (formats[prng_rand_n (ARRAY_LENGTH (formats))]) #define RANDOM_OP() \ (ops[prng_rand_n (ARRAY_LENGTH (ops))]) #define RANDOM_TRANSFORM() \ (&(transforms[prng_rand_n (ARRAY_LENGTH (transforms))])) static void on_destroy (pixman_image_t *image, void *data) { free (data); } static pixman_image_t * make_image (void) { pixman_format_code_t format = RANDOM_FORMAT(); uint32_t *bytes, *orig; pixman_image_t *image; int stride; orig = bytes = malloc (WIDTH * HEIGHT * 4); prng_randmemset (bytes, WIDTH * HEIGHT * 4, 0); stride = WIDTH * 4; if (prng_rand_n (2) == 0) { bytes += (stride / 4) * (HEIGHT - 1); stride = - stride; } image = pixman_image_create_bits ( format, WIDTH, HEIGHT, bytes, stride); pixman_image_set_transform (image, RANDOM_TRANSFORM()); pixman_image_set_destroy_function (image, on_destroy, orig); pixman_image_set_repeat (image, PIXMAN_REPEAT_NORMAL); image_endian_swap (image); return image; } static uint32_t test_transform (int testnum, int verbose) { pixman_image_t *src, *dest; uint32_t crc; prng_srand (testnum); src = make_image (); dest = make_image (); pixman_image_composite (RANDOM_OP(), src, NULL, dest, 0, 0, 0, 0, WIDTH / 2, HEIGHT / 2, WIDTH, HEIGHT); crc = compute_crc32_for_image (0, dest); pixman_image_unref (src); pixman_image_unref (dest); return crc; } int main (int argc, const char *argv[]) { return fuzzer_test_main ("rotate", 15000, 0x81E9EC2F, test_transform, argc, argv); } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/scaling-bench.c0000664000175000017500000000370314712446423016741 0ustar00mattst88mattst88#include #include "utils.h" #define SOURCE_WIDTH 320 #define SOURCE_HEIGHT 240 #define TEST_REPEATS 3 static pixman_image_t * make_source (void) { size_t n_bytes = (SOURCE_WIDTH + 2) * (SOURCE_HEIGHT + 2) * 4; uint32_t *data = malloc (n_bytes); pixman_image_t *source; prng_randmemset (data, n_bytes, 0); source = pixman_image_create_bits ( PIXMAN_a8r8g8b8, SOURCE_WIDTH + 2, SOURCE_HEIGHT + 2, data, (SOURCE_WIDTH + 2) * 4); pixman_image_set_filter (source, PIXMAN_FILTER_BILINEAR, NULL, 0); return source; } int main () { double scale; pixman_image_t *src; prng_srand (23874); src = make_source (); printf ("# %-6s %-22s %-14s %-12s\n", "ratio", "resolutions", "time / ms", "time per pixel / ns"); for (scale = 0.1; scale < 10.005; scale += 0.01) { int i; int dest_width = SOURCE_WIDTH * scale + 0.5; int dest_height = SOURCE_HEIGHT * scale + 0.5; int dest_byte_stride = (dest_width * 4 + 15) & ~15; pixman_fixed_t s = (1 / scale) * 65536.0 + 0.5; pixman_transform_t transform; pixman_image_t *dest; double t1, t2, t = -1; uint32_t *dest_buf = aligned_malloc (16, dest_byte_stride * dest_height); memset (dest_buf, 0, dest_byte_stride * dest_height); pixman_transform_init_scale (&transform, s, s); pixman_image_set_transform (src, &transform); dest = pixman_image_create_bits ( PIXMAN_a8r8g8b8, dest_width, dest_height, dest_buf, dest_byte_stride); for (i = 0; i < TEST_REPEATS; i++) { t1 = gettime(); pixman_image_composite ( PIXMAN_OP_OVER, src, NULL, dest, scale, scale, 0, 0, 0, 0, dest_width, dest_height); t2 = gettime(); if (t < 0 || t2 - t1 < t) t = t2 - t1; } printf ("%6.2f : %4dx%-4d => %4dx%-4d : %12.4f : %12.4f\n", scale, SOURCE_WIDTH, SOURCE_HEIGHT, dest_width, dest_height, t * 1000, (t / (dest_width * dest_height)) * 1000000000); pixman_image_unref (dest); free (dest_buf); } return 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/scaling-crash-test.c0000664000175000017500000001263314712446423017741 0ustar00mattst88mattst88#include #include #include #include #include "utils.h" /* * We have a source image filled with solid color, set NORMAL or PAD repeat, * and some transform which results in nearest neighbour scaling. * * The expected result is either that the destination image filled with this solid * color or, if the transformation is such that we can't composite anything at * all, that nothing has changed in the destination. * * The surrounding memory of the source image is a different solid color so that * we are sure to get failures if we access it. */ static int run_test (int32_t dst_width, int32_t dst_height, int32_t src_width, int32_t src_height, int32_t src_x, int32_t src_y, int32_t scale_x, int32_t scale_y, pixman_filter_t filter, pixman_repeat_t repeat) { pixman_image_t * src_img; pixman_image_t * dst_img; pixman_transform_t transform; uint32_t * srcbuf; uint32_t * dstbuf; pixman_color_t color_cc = { 0xcccc, 0xcccc, 0xcccc, 0xcccc }; pixman_image_t * solid; int result; int i; static const pixman_fixed_t kernel[] = { #define D(f) (pixman_double_to_fixed (f) + 0x0001) pixman_int_to_fixed (5), pixman_int_to_fixed (5), D(1/25.0), D(1/25.0), D(1/25.0), D(1/25.0), D(1/25.0), D(1/25.0), D(1/25.0), D(1/25.0), D(1/25.0), D(1/25.0), D(1/25.0), D(1/25.0), D(1/25.0), D(1/25.0), D(1/25.0), D(1/25.0), D(1/25.0), D(1/25.0), D(1/25.0), D(1/25.0), D(1/25.0), D(1/25.0), D(1/25.0), D(1/25.0), D(1/25.0) }; result = 0; srcbuf = (uint32_t *)malloc ((src_width + 10) * (src_height + 10) * 4); dstbuf = (uint32_t *)malloc (dst_width * dst_height * 4); memset (srcbuf, 0x88, src_width * src_height * 4); memset (dstbuf, 0x33, dst_width * dst_height * 4); src_img = pixman_image_create_bits ( PIXMAN_a8r8g8b8, src_width, src_height, srcbuf + (src_width + 10) * 5 + 5, (src_width + 10) * 4); solid = pixman_image_create_solid_fill (&color_cc); pixman_image_composite32 (PIXMAN_OP_SRC, solid, NULL, src_img, 0, 0, 0, 0, 0, 0, src_width, src_height); pixman_image_unref (solid); dst_img = pixman_image_create_bits ( PIXMAN_a8r8g8b8, dst_width, dst_height, dstbuf, dst_width * 4); pixman_transform_init_scale (&transform, scale_x, scale_y); pixman_image_set_transform (src_img, &transform); pixman_image_set_repeat (src_img, repeat); if (filter == PIXMAN_FILTER_CONVOLUTION) pixman_image_set_filter (src_img, filter, kernel, 27); else pixman_image_set_filter (src_img, filter, NULL, 0); pixman_image_composite (PIXMAN_OP_SRC, src_img, NULL, dst_img, src_x, src_y, 0, 0, 0, 0, dst_width, dst_height); pixman_image_unref (src_img); pixman_image_unref (dst_img); for (i = 0; i < dst_width * dst_height; i++) { if (dstbuf[i] != 0xCCCCCCCC && dstbuf[i] != 0x33333333) { result = 1; break; } } free (srcbuf); free (dstbuf); return result; } typedef struct filter_info_t filter_info_t; struct filter_info_t { pixman_filter_t value; char name[28]; }; static const filter_info_t filters[] = { { PIXMAN_FILTER_NEAREST, "NEAREST" }, { PIXMAN_FILTER_BILINEAR, "BILINEAR" }, { PIXMAN_FILTER_CONVOLUTION, "CONVOLUTION" }, }; typedef struct repeat_info_t repeat_info_t; struct repeat_info_t { pixman_repeat_t value; char name[28]; }; static const repeat_info_t repeats[] = { { PIXMAN_REPEAT_PAD, "PAD" }, { PIXMAN_REPEAT_REFLECT, "REFLECT" }, { PIXMAN_REPEAT_NORMAL, "NORMAL" } }; static int do_test (int32_t dst_size, int32_t src_size, int32_t src_offs, int32_t scale_factor) { int i, j; for (i = 0; i < ARRAY_LENGTH (filters); ++i) { for (j = 0; j < ARRAY_LENGTH (repeats); ++j) { /* horizontal test */ if (run_test (dst_size, 1, src_size, 1, src_offs, 0, scale_factor, 65536, filters[i].value, repeats[j].value) != 0) { printf ("Vertical test failed with %s filter and repeat mode %s\n", filters[i].name, repeats[j].name); return 1; } /* vertical test */ if (run_test (1, dst_size, 1, src_size, 0, src_offs, 65536, scale_factor, filters[i].value, repeats[j].value) != 0) { printf ("Vertical test failed with %s filter and repeat mode %s\n", filters[i].name, repeats[j].name); return 1; } } } return 0; } int main (int argc, char *argv[]) { int i; pixman_disable_out_of_bounds_workaround (); /* can potentially crash */ assert (do_test ( 48000, 32767, 1, 65536 * 128) == 0); /* can potentially get into a deadloop */ assert (do_test ( 16384, 65536, 32, 32768) == 0); /* can potentially access memory outside source image buffer */ assert (do_test ( 10, 10, 0, 1) == 0); assert (do_test ( 10, 10, 0, 0) == 0); for (i = 0; i < 100; ++i) { pixman_fixed_t one_seventh = (((pixman_fixed_48_16_t)pixman_fixed_1) << 16) / (7 << 16); assert (do_test ( 1, 7, 3, one_seventh + i - 50) == 0); } for (i = 0; i < 100; ++i) { pixman_fixed_t scale = (((pixman_fixed_48_16_t)pixman_fixed_1) << 16) / (32767 << 16); assert (do_test ( 1, 32767, 16383, scale + i - 50) == 0); } /* can potentially provide invalid results (out of range matrix stuff) */ assert (do_test ( 48000, 32767, 16384, 65536 * 128) == 0); return 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/scaling-helpers-test.c0000664000175000017500000000400114712446423020271 0ustar00mattst88mattst88#include #include #include #include "utils.h" #include "pixman-inlines.h" /* A trivial reference implementation for * 'bilinear_pad_repeat_get_scanline_bounds' */ static void bilinear_pad_repeat_get_scanline_bounds_ref (int32_t source_image_width, pixman_fixed_t vx_, pixman_fixed_t unit_x, int32_t * left_pad, int32_t * left_tz, int32_t * width, int32_t * right_tz, int32_t * right_pad) { int w = *width; int64_t vx = vx_; *left_pad = 0; *left_tz = 0; *width = 0; *right_tz = 0; *right_pad = 0; while (--w >= 0) { if (vx < 0) { if (vx + pixman_fixed_1 < 0) *left_pad += 1; else *left_tz += 1; } else if (vx + pixman_fixed_1 >= pixman_int_to_fixed (source_image_width)) { if (vx >= pixman_int_to_fixed (source_image_width)) *right_pad += 1; else *right_tz += 1; } else { *width += 1; } vx += unit_x; } } int main (void) { int i; prng_srand (0); for (i = 0; i < 10000; i++) { int32_t left_pad1, left_tz1, width1, right_tz1, right_pad1; int32_t left_pad2, left_tz2, width2, right_tz2, right_pad2; pixman_fixed_t vx = prng_rand_n(10000 << 16) - (3000 << 16); int32_t width = prng_rand_n(10000); int32_t source_image_width = prng_rand_n(10000) + 1; pixman_fixed_t unit_x = prng_rand_n(10 << 16) + 1; width1 = width2 = width; bilinear_pad_repeat_get_scanline_bounds_ref (source_image_width, vx, unit_x, &left_pad1, &left_tz1, &width1, &right_tz1, &right_pad1); bilinear_pad_repeat_get_scanline_bounds (source_image_width, vx, unit_x, &left_pad2, &left_tz2, &width2, &right_tz2, &right_pad2); assert (left_pad1 == left_pad2); assert (left_tz1 == left_tz2); assert (width1 == width2); assert (right_tz1 == right_tz2); assert (right_pad1 == right_pad2); } return 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/scaling-test.c0000664000175000017500000002632414712446423016645 0ustar00mattst88mattst88/* * Test program, which can detect some problems with nearest neighbour * and bilinear scaling in pixman. Testing is done by running lots * of random SRC and OVER compositing operations a8r8g8b8, x8a8r8g8b8 * and r5g6b5 color formats. * * Script 'fuzzer-find-diff.pl' can be used to narrow down the problem in * the case of test failure. */ #include #include #include "utils.h" #define MAX_SRC_WIDTH 48 #define MAX_SRC_HEIGHT 8 #define MAX_DST_WIDTH 48 #define MAX_DST_HEIGHT 8 #define MAX_STRIDE 4 /* * Composite operation with pseudorandom images */ static pixman_format_code_t get_format (int bpp) { if (bpp == 4) { switch (prng_rand_n (4)) { default: case 0: return PIXMAN_a8r8g8b8; case 1: return PIXMAN_x8r8g8b8; case 2: return PIXMAN_a8b8g8r8; case 3: return PIXMAN_x8b8g8r8; } } else { return PIXMAN_r5g6b5; } } uint32_t test_composite (int testnum, int verbose) { int i; pixman_image_t * src_img; pixman_image_t * mask_img; pixman_image_t * dst_img; pixman_transform_t transform; pixman_region16_t clip; int src_width, src_height; int mask_width, mask_height; int dst_width, dst_height; int src_stride, mask_stride, dst_stride; int src_x, src_y; int mask_x, mask_y; int dst_x, dst_y; int src_bpp; int mask_bpp = 1; int dst_bpp; int w, h; pixman_fixed_t scale_x = 65536, scale_y = 65536; pixman_fixed_t translate_x = 0, translate_y = 0; pixman_fixed_t mask_scale_x = 65536, mask_scale_y = 65536; pixman_fixed_t mask_translate_x = 0, mask_translate_y = 0; pixman_op_t op; pixman_repeat_t repeat = PIXMAN_REPEAT_NONE; pixman_repeat_t mask_repeat = PIXMAN_REPEAT_NONE; pixman_format_code_t src_fmt, mask_fmt, dst_fmt; uint32_t * srcbuf; uint32_t * dstbuf; uint32_t * maskbuf; uint32_t crc32; FLOAT_REGS_CORRUPTION_DETECTOR_START (); prng_srand (testnum); src_bpp = (prng_rand_n (2) == 0) ? 2 : 4; dst_bpp = (prng_rand_n (2) == 0) ? 2 : 4; switch (prng_rand_n (3)) { case 0: op = PIXMAN_OP_SRC; break; case 1: op = PIXMAN_OP_OVER; break; default: op = PIXMAN_OP_ADD; break; } src_width = prng_rand_n (MAX_SRC_WIDTH) + 1; src_height = prng_rand_n (MAX_SRC_HEIGHT) + 1; if (prng_rand_n (2)) { mask_width = prng_rand_n (MAX_SRC_WIDTH) + 1; mask_height = prng_rand_n (MAX_SRC_HEIGHT) + 1; } else { mask_width = mask_height = 1; } dst_width = prng_rand_n (MAX_DST_WIDTH) + 1; dst_height = prng_rand_n (MAX_DST_HEIGHT) + 1; src_stride = src_width * src_bpp + prng_rand_n (MAX_STRIDE) * src_bpp; mask_stride = mask_width * mask_bpp + prng_rand_n (MAX_STRIDE) * mask_bpp; dst_stride = dst_width * dst_bpp + prng_rand_n (MAX_STRIDE) * dst_bpp; if (src_stride & 3) src_stride += 2; if (mask_stride & 1) mask_stride += 1; if (mask_stride & 2) mask_stride += 2; if (dst_stride & 3) dst_stride += 2; src_x = -(src_width / 4) + prng_rand_n (src_width * 3 / 2); src_y = -(src_height / 4) + prng_rand_n (src_height * 3 / 2); mask_x = -(mask_width / 4) + prng_rand_n (mask_width * 3 / 2); mask_y = -(mask_height / 4) + prng_rand_n (mask_height * 3 / 2); dst_x = -(dst_width / 4) + prng_rand_n (dst_width * 3 / 2); dst_y = -(dst_height / 4) + prng_rand_n (dst_height * 3 / 2); w = prng_rand_n (dst_width * 3 / 2 - dst_x); h = prng_rand_n (dst_height * 3 / 2 - dst_y); srcbuf = (uint32_t *)malloc (src_stride * src_height); maskbuf = (uint32_t *)malloc (mask_stride * mask_height); dstbuf = (uint32_t *)malloc (dst_stride * dst_height); prng_randmemset (srcbuf, src_stride * src_height, 0); prng_randmemset (maskbuf, mask_stride * mask_height, 0); prng_randmemset (dstbuf, dst_stride * dst_height, 0); src_fmt = get_format (src_bpp); mask_fmt = PIXMAN_a8; dst_fmt = get_format (dst_bpp); if (prng_rand_n (2)) { srcbuf += (src_stride / 4) * (src_height - 1); src_stride = - src_stride; } if (prng_rand_n (2)) { maskbuf += (mask_stride / 4) * (mask_height - 1); mask_stride = - mask_stride; } if (prng_rand_n (2)) { dstbuf += (dst_stride / 4) * (dst_height - 1); dst_stride = - dst_stride; } src_img = pixman_image_create_bits ( src_fmt, src_width, src_height, srcbuf, src_stride); mask_img = pixman_image_create_bits ( mask_fmt, mask_width, mask_height, maskbuf, mask_stride); dst_img = pixman_image_create_bits ( dst_fmt, dst_width, dst_height, dstbuf, dst_stride); image_endian_swap (src_img); image_endian_swap (dst_img); if (prng_rand_n (4) > 0) { scale_x = -32768 * 3 + prng_rand_n (65536 * 5); scale_y = -32768 * 3 + prng_rand_n (65536 * 5); translate_x = prng_rand_n (65536); translate_y = prng_rand_n (65536); pixman_transform_init_scale (&transform, scale_x, scale_y); pixman_transform_translate (&transform, NULL, translate_x, translate_y); pixman_image_set_transform (src_img, &transform); } if (prng_rand_n (2) > 0) { mask_scale_x = -32768 * 3 + prng_rand_n (65536 * 5); mask_scale_y = -32768 * 3 + prng_rand_n (65536 * 5); mask_translate_x = prng_rand_n (65536); mask_translate_y = prng_rand_n (65536); pixman_transform_init_scale (&transform, mask_scale_x, mask_scale_y); pixman_transform_translate (&transform, NULL, mask_translate_x, mask_translate_y); pixman_image_set_transform (mask_img, &transform); } switch (prng_rand_n (4)) { case 0: mask_repeat = PIXMAN_REPEAT_NONE; break; case 1: mask_repeat = PIXMAN_REPEAT_NORMAL; break; case 2: mask_repeat = PIXMAN_REPEAT_PAD; break; case 3: mask_repeat = PIXMAN_REPEAT_REFLECT; break; default: break; } pixman_image_set_repeat (mask_img, mask_repeat); switch (prng_rand_n (4)) { case 0: repeat = PIXMAN_REPEAT_NONE; break; case 1: repeat = PIXMAN_REPEAT_NORMAL; break; case 2: repeat = PIXMAN_REPEAT_PAD; break; case 3: repeat = PIXMAN_REPEAT_REFLECT; break; default: break; } pixman_image_set_repeat (src_img, repeat); if (prng_rand_n (2)) pixman_image_set_filter (src_img, PIXMAN_FILTER_NEAREST, NULL, 0); else pixman_image_set_filter (src_img, PIXMAN_FILTER_BILINEAR, NULL, 0); if (prng_rand_n (2)) pixman_image_set_filter (mask_img, PIXMAN_FILTER_NEAREST, NULL, 0); else pixman_image_set_filter (mask_img, PIXMAN_FILTER_BILINEAR, NULL, 0); if (prng_rand_n (8) == 0) { pixman_box16_t clip_boxes[2]; int n = prng_rand_n (2) + 1; for (i = 0; i < n; i++) { clip_boxes[i].x1 = prng_rand_n (src_width); clip_boxes[i].y1 = prng_rand_n (src_height); clip_boxes[i].x2 = clip_boxes[i].x1 + prng_rand_n (src_width - clip_boxes[i].x1); clip_boxes[i].y2 = clip_boxes[i].y1 + prng_rand_n (src_height - clip_boxes[i].y1); if (verbose) { printf ("source clip box: [%d,%d-%d,%d]\n", clip_boxes[i].x1, clip_boxes[i].y1, clip_boxes[i].x2, clip_boxes[i].y2); } } pixman_region_init_rects (&clip, clip_boxes, n); pixman_image_set_clip_region (src_img, &clip); pixman_image_set_source_clipping (src_img, 1); pixman_region_fini (&clip); } if (prng_rand_n (8) == 0) { pixman_box16_t clip_boxes[2]; int n = prng_rand_n (2) + 1; for (i = 0; i < n; i++) { clip_boxes[i].x1 = prng_rand_n (mask_width); clip_boxes[i].y1 = prng_rand_n (mask_height); clip_boxes[i].x2 = clip_boxes[i].x1 + prng_rand_n (mask_width - clip_boxes[i].x1); clip_boxes[i].y2 = clip_boxes[i].y1 + prng_rand_n (mask_height - clip_boxes[i].y1); if (verbose) { printf ("mask clip box: [%d,%d-%d,%d]\n", clip_boxes[i].x1, clip_boxes[i].y1, clip_boxes[i].x2, clip_boxes[i].y2); } } pixman_region_init_rects (&clip, clip_boxes, n); pixman_image_set_clip_region (mask_img, &clip); pixman_image_set_source_clipping (mask_img, 1); pixman_region_fini (&clip); } if (prng_rand_n (8) == 0) { pixman_box16_t clip_boxes[2]; int n = prng_rand_n (2) + 1; for (i = 0; i < n; i++) { clip_boxes[i].x1 = prng_rand_n (dst_width); clip_boxes[i].y1 = prng_rand_n (dst_height); clip_boxes[i].x2 = clip_boxes[i].x1 + prng_rand_n (dst_width - clip_boxes[i].x1); clip_boxes[i].y2 = clip_boxes[i].y1 + prng_rand_n (dst_height - clip_boxes[i].y1); if (verbose) { printf ("destination clip box: [%d,%d-%d,%d]\n", clip_boxes[i].x1, clip_boxes[i].y1, clip_boxes[i].x2, clip_boxes[i].y2); } } pixman_region_init_rects (&clip, clip_boxes, n); pixman_image_set_clip_region (dst_img, &clip); pixman_region_fini (&clip); } if (prng_rand_n (2) == 0) { mask_fmt = PIXMAN_null; pixman_image_unref (mask_img); mask_img = NULL; mask_x = 0; mask_y = 0; } if (verbose) { printf ("op=%s, src_fmt=%s, mask_fmt=%s, dst_fmt=%s\n", operator_name (op), format_name (src_fmt), format_name (mask_fmt), format_name (dst_fmt)); printf ("scale_x=%d, scale_y=%d, repeat=%d, filter=%d\n", scale_x, scale_y, repeat, src_img->common.filter); printf ("translate_x=%d, translate_y=%d\n", translate_x, translate_y); if (mask_fmt != PIXMAN_null) { printf ("mask_scale_x=%d, mask_scale_y=%d, " "mask_repeat=%d, mask_filter=%d\n", mask_scale_x, mask_scale_y, mask_repeat, mask_img->common.filter); printf ("mask_translate_x=%d, mask_translate_y=%d\n", mask_translate_x, mask_translate_y); } printf ("src_width=%d, src_height=%d, src_x=%d, src_y=%d\n", src_width, src_height, src_x, src_y); if (mask_fmt != PIXMAN_null) { printf ("mask_width=%d, mask_height=%d, mask_x=%d, mask_y=%d\n", mask_width, mask_height, mask_x, mask_y); } printf ("dst_width=%d, dst_height=%d, dst_x=%d, dst_y=%d\n", dst_width, dst_height, dst_x, dst_y); printf ("w=%d, h=%d\n", w, h); } pixman_image_composite (op, src_img, mask_img, dst_img, src_x, src_y, mask_x, mask_y, dst_x, dst_y, w, h); crc32 = compute_crc32_for_image (0, dst_img); if (verbose) print_image (dst_img); pixman_image_unref (src_img); if (mask_img != NULL) pixman_image_unref (mask_img); pixman_image_unref (dst_img); if (src_stride < 0) srcbuf += (src_stride / 4) * (src_height - 1); if (mask_stride < 0) maskbuf += (mask_stride / 4) * (mask_height - 1); if (dst_stride < 0) dstbuf += (dst_stride / 4) * (dst_height - 1); free (srcbuf); free (maskbuf); free (dstbuf); FLOAT_REGS_CORRUPTION_DETECTOR_FINISH (); return crc32; } #if BILINEAR_INTERPOLATION_BITS == 7 #define CHECKSUM 0x92E0F068 #elif BILINEAR_INTERPOLATION_BITS == 4 #define CHECKSUM 0x8EFFA1E5 #else #define CHECKSUM 0x00000000 #endif int main (int argc, const char *argv[]) { pixman_disable_out_of_bounds_workaround (); return fuzzer_test_main("scaling", 8000000, CHECKSUM, test_composite, argc, argv); } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/solid-test.c0000664000175000017500000002442114712446423016333 0ustar00mattst88mattst88/* * Copyright Âİ 2015 RISC OS Open Ltd * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of the copyright holders not be used in * advertising or publicity pertaining to distribution of the software without * specific, written prior permission. The copyright holders make no * representations about the suitability of this software for any purpose. It * is provided "as is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS * SOFTWARE. * * Author: Ben Avison (bavison@riscosopen.org) * */ #include "utils.h" #include #include #include #define WIDTH 32 #define HEIGHT 32 static const pixman_op_t op_list[] = { PIXMAN_OP_SRC, PIXMAN_OP_OVER, PIXMAN_OP_ADD, PIXMAN_OP_CLEAR, PIXMAN_OP_SRC, PIXMAN_OP_DST, PIXMAN_OP_OVER, PIXMAN_OP_OVER_REVERSE, PIXMAN_OP_IN, PIXMAN_OP_IN_REVERSE, PIXMAN_OP_OUT, PIXMAN_OP_OUT_REVERSE, PIXMAN_OP_ATOP, PIXMAN_OP_ATOP_REVERSE, PIXMAN_OP_XOR, PIXMAN_OP_ADD, PIXMAN_OP_MULTIPLY, PIXMAN_OP_SCREEN, PIXMAN_OP_OVERLAY, PIXMAN_OP_DARKEN, PIXMAN_OP_LIGHTEN, PIXMAN_OP_HARD_LIGHT, PIXMAN_OP_DIFFERENCE, PIXMAN_OP_EXCLUSION, #if 0 /* these use floating point math and are not always bitexact on different platforms */ PIXMAN_OP_SATURATE, PIXMAN_OP_DISJOINT_CLEAR, PIXMAN_OP_DISJOINT_SRC, PIXMAN_OP_DISJOINT_DST, PIXMAN_OP_DISJOINT_OVER, PIXMAN_OP_DISJOINT_OVER_REVERSE, PIXMAN_OP_DISJOINT_IN, PIXMAN_OP_DISJOINT_IN_REVERSE, PIXMAN_OP_DISJOINT_OUT, PIXMAN_OP_DISJOINT_OUT_REVERSE, PIXMAN_OP_DISJOINT_ATOP, PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_OP_DISJOINT_XOR, PIXMAN_OP_CONJOINT_CLEAR, PIXMAN_OP_CONJOINT_SRC, PIXMAN_OP_CONJOINT_DST, PIXMAN_OP_CONJOINT_OVER, PIXMAN_OP_CONJOINT_OVER_REVERSE, PIXMAN_OP_CONJOINT_IN, PIXMAN_OP_CONJOINT_IN_REVERSE, PIXMAN_OP_CONJOINT_OUT, PIXMAN_OP_CONJOINT_OUT_REVERSE, PIXMAN_OP_CONJOINT_ATOP, PIXMAN_OP_CONJOINT_ATOP_REVERSE, PIXMAN_OP_CONJOINT_XOR, PIXMAN_OP_COLOR_DODGE, PIXMAN_OP_COLOR_BURN, PIXMAN_OP_SOFT_LIGHT, PIXMAN_OP_HSL_HUE, PIXMAN_OP_HSL_SATURATION, PIXMAN_OP_HSL_COLOR, PIXMAN_OP_HSL_LUMINOSITY, #endif }; /* The first eight format in the list are by far the most widely * used formats, so we test those more than the others */ #define N_MOST_LIKELY_FORMATS 8 static const pixman_format_code_t img_fmt_list[] = { PIXMAN_a8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_x8r8g8b8, PIXMAN_x8b8g8r8, PIXMAN_r5g6b5, PIXMAN_b5g6r5, PIXMAN_a8, PIXMAN_a1, PIXMAN_r3g3b2, PIXMAN_b8g8r8a8, PIXMAN_b8g8r8x8, PIXMAN_r8g8b8a8, PIXMAN_r8g8b8x8, PIXMAN_x14r6g6b6, PIXMAN_r8g8b8, PIXMAN_b8g8r8, #if 0 /* These are going to use floating point in the near future */ PIXMAN_x2r10g10b10, PIXMAN_a2r10g10b10, PIXMAN_x2b10g10r10, PIXMAN_a2b10g10r10, #endif PIXMAN_a1r5g5b5, PIXMAN_x1r5g5b5, PIXMAN_a1b5g5r5, PIXMAN_x1b5g5r5, PIXMAN_a4r4g4b4, PIXMAN_x4r4g4b4, PIXMAN_a4b4g4r4, PIXMAN_x4b4g4r4, PIXMAN_r3g3b2, PIXMAN_b2g3r3, PIXMAN_a2r2g2b2, PIXMAN_a2b2g2r2, PIXMAN_c8, PIXMAN_g8, PIXMAN_x4c4, PIXMAN_x4g4, PIXMAN_c4, PIXMAN_g4, PIXMAN_g1, PIXMAN_x4a4, PIXMAN_a4, PIXMAN_r1g2b1, PIXMAN_b1g2r1, PIXMAN_a1r1g1b1, PIXMAN_a1b1g1r1, PIXMAN_null }; static const pixman_format_code_t mask_fmt_list[] = { PIXMAN_a8r8g8b8, PIXMAN_a8, PIXMAN_a4, PIXMAN_a1, PIXMAN_null }; static pixman_indexed_t rgb_palette[9]; static pixman_indexed_t y_palette[9]; static pixman_format_code_t random_format (const pixman_format_code_t *allowed_formats) { int n = 0; while (allowed_formats[n] != PIXMAN_null) n++; if (n > N_MOST_LIKELY_FORMATS && prng_rand_n (4) != 0) n = N_MOST_LIKELY_FORMATS; return allowed_formats[prng_rand_n (n)]; } static pixman_image_t * create_multi_pixel_image (const pixman_format_code_t *allowed_formats, uint32_t *buffer, pixman_format_code_t *used_fmt) { pixman_format_code_t fmt; pixman_image_t *img; int stride; fmt = random_format (allowed_formats); stride = (WIDTH * PIXMAN_FORMAT_BPP (fmt) + 31) / 32 * 4; img = pixman_image_create_bits (fmt, WIDTH, HEIGHT, buffer, stride); if (PIXMAN_FORMAT_TYPE (fmt) == PIXMAN_TYPE_COLOR) pixman_image_set_indexed (img, &(rgb_palette[PIXMAN_FORMAT_BPP (fmt)])); else if (PIXMAN_FORMAT_TYPE (fmt) == PIXMAN_TYPE_GRAY) pixman_image_set_indexed (img, &(y_palette[PIXMAN_FORMAT_BPP (fmt)])); prng_randmemset (buffer, WIDTH * HEIGHT * 4, 0); image_endian_swap (img); if (used_fmt) *used_fmt = fmt; return img; } static pixman_image_t * create_solid_image (const pixman_format_code_t *allowed_formats, uint32_t *buffer, pixman_format_code_t *used_fmt) { if (prng_rand_n (2)) { /* Use a repeating 1x1 bitmap image for solid */ pixman_format_code_t fmt; pixman_image_t *img, *dummy_img; uint32_t bpp, dummy_buf; fmt = random_format (allowed_formats); bpp = PIXMAN_FORMAT_BPP (fmt); img = pixman_image_create_bits (fmt, 1, 1, buffer, 4); pixman_image_set_repeat (img, PIXMAN_REPEAT_NORMAL); if (PIXMAN_FORMAT_TYPE (fmt) == PIXMAN_TYPE_COLOR) pixman_image_set_indexed (img, &(rgb_palette[bpp])); else if (PIXMAN_FORMAT_TYPE (fmt) == PIXMAN_TYPE_GRAY) pixman_image_set_indexed (img, &(y_palette[bpp])); /* Force the flags to be calculated for image with initial * bitmap contents of 0 or 2^bpp-1 by plotting from it into a * separate throwaway image. It is simplest to write all 0s * or all 1s to the first word irrespective of the colour * depth even though we actually only care about the first * pixel since the stride has to be a whole number of words. */ *buffer = prng_rand_n (2) ? 0xFFFFFFFFu : 0; dummy_img = pixman_image_create_bits (PIXMAN_a8r8g8b8, 1, 1, &dummy_buf, 4); pixman_image_composite (PIXMAN_OP_SRC, img, NULL, dummy_img, 0, 0, 0, 0, 0, 0, 1, 1); pixman_image_unref (dummy_img); /* Now set the bitmap contents to a random value */ prng_randmemset (buffer, 4, 0); image_endian_swap (img); if (used_fmt) *used_fmt = fmt; return img; } else { /* Use a native solid image */ pixman_color_t color; pixman_image_t *img; color.alpha = prng_rand_n (UINT16_MAX + 1); color.red = prng_rand_n (UINT16_MAX + 1); color.green = prng_rand_n (UINT16_MAX + 1); color.blue = prng_rand_n (UINT16_MAX + 1); img = pixman_image_create_solid_fill (&color); if (used_fmt) *used_fmt = PIXMAN_solid; return img; } } static uint32_t test_solid (int testnum, int verbose) { pixman_op_t op; uint32_t src_buf[WIDTH * HEIGHT]; uint32_t dst_buf[WIDTH * HEIGHT]; uint32_t mask_buf[WIDTH * HEIGHT]; pixman_image_t *src_img; pixman_image_t *dst_img; pixman_image_t *mask_img = NULL; pixman_format_code_t src_fmt, dst_fmt, mask_fmt = PIXMAN_null; pixman_bool_t ca = 0; uint32_t crc32; prng_srand (testnum); op = op_list[prng_rand_n (ARRAY_LENGTH (op_list))]; dst_img = create_multi_pixel_image (img_fmt_list, dst_buf, &dst_fmt); switch (prng_rand_n (3)) { case 0: /* Solid source, no mask */ src_img = create_solid_image (img_fmt_list, src_buf, &src_fmt); break; case 1: /* Solid source, bitmap mask */ src_img = create_solid_image (img_fmt_list, src_buf, &src_fmt); mask_img = create_multi_pixel_image (mask_fmt_list, mask_buf, &mask_fmt); break; case 2: /* Bitmap image, solid mask */ src_img = create_multi_pixel_image (img_fmt_list, src_buf, &src_fmt); mask_img = create_solid_image (mask_fmt_list, mask_buf, &mask_fmt); break; default: abort (); } if (mask_img) { ca = prng_rand_n (2); pixman_image_set_component_alpha (mask_img, ca); } if (verbose) { printf ("op=%s\n", operator_name (op)); printf ("src_fmt=%s, dst_fmt=%s, mask_fmt=%s\n", format_name (src_fmt), format_name (dst_fmt), format_name (mask_fmt)); printf ("src_size=%u, mask_size=%u, component_alpha=%u\n", src_fmt == PIXMAN_solid ? 1 : src_img->bits.width, !mask_img || mask_fmt == PIXMAN_solid ? 1 : mask_img->bits.width, ca); } pixman_image_composite (op, src_img, mask_img, dst_img, 0, 0, 0, 0, 0, 0, WIDTH, HEIGHT); if (verbose) print_image (dst_img); crc32 = compute_crc32_for_image (0, dst_img); pixman_image_unref (src_img); pixman_image_unref (dst_img); if (mask_img) pixman_image_unref (mask_img); return crc32; } int main (int argc, const char *argv[]) { int i; prng_srand (0); for (i = 1; i <= 8; i++) { initialize_palette (&(rgb_palette[i]), i, TRUE); initialize_palette (&(y_palette[i]), i, FALSE); } return fuzzer_test_main ("solid", 500000, 0xC30FD380, test_solid, argc, argv); } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/stress-test.c0000664000175000017500000005315614712446423016553 0ustar00mattst88mattst88#include #include #include "utils.h" #include #if 0 #define fence_malloc malloc #define fence_free free #define make_random_bytes malloc #endif static const pixman_format_code_t image_formats[] = { PIXMAN_rgba_float, PIXMAN_rgb_float, PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, PIXMAN_r5g6b5, PIXMAN_r3g3b2, PIXMAN_a8, PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, PIXMAN_b8g8r8a8, PIXMAN_b8g8r8x8, PIXMAN_r8g8b8a8, PIXMAN_r8g8b8x8, PIXMAN_x14r6g6b6, PIXMAN_r8g8b8, PIXMAN_b8g8r8, PIXMAN_a8r8g8b8_sRGB, PIXMAN_r8g8b8_sRGB, PIXMAN_r5g6b5, PIXMAN_b5g6r5, PIXMAN_x2r10g10b10, PIXMAN_a2r10g10b10, PIXMAN_x2b10g10r10, PIXMAN_a2b10g10r10, PIXMAN_a1r5g5b5, PIXMAN_x1r5g5b5, PIXMAN_a1b5g5r5, PIXMAN_x1b5g5r5, PIXMAN_a4r4g4b4, PIXMAN_x4r4g4b4, PIXMAN_a4b4g4r4, PIXMAN_x4b4g4r4, PIXMAN_a8, PIXMAN_r3g3b2, PIXMAN_b2g3r3, PIXMAN_a2r2g2b2, PIXMAN_a2b2g2r2, PIXMAN_c8, PIXMAN_g8, PIXMAN_x4c4, PIXMAN_x4g4, PIXMAN_c4, PIXMAN_g4, PIXMAN_g1, PIXMAN_x4a4, PIXMAN_a4, PIXMAN_r1g2b1, PIXMAN_b1g2r1, PIXMAN_a1r1g1b1, PIXMAN_a1b1g1r1, PIXMAN_a1 }; static pixman_filter_t filters[] = { PIXMAN_FILTER_NEAREST, PIXMAN_FILTER_BILINEAR, PIXMAN_FILTER_FAST, PIXMAN_FILTER_GOOD, PIXMAN_FILTER_BEST, PIXMAN_FILTER_CONVOLUTION }; static int get_size (void) { switch (prng_rand_n (28)) { case 0: return 1; case 1: return 2; default: case 2: return prng_rand_n (100); case 4: return prng_rand_n (2000) + 1000; case 5: return 65535; case 6: return 65536; case 7: return prng_rand_n (64000) + 63000; } } static uint32_t real_reader (const void *src, int size); static void *xor_ptr(const void *ptr) { return (void *)(((intptr_t)ptr) ^ (intptr_t)0x8000000080000000); } static void destroy (pixman_image_t *image, void *data) { if (image->type == BITS && image->bits.free_me != image->bits.bits) { uint32_t *bits; if (image->bits.bits != (void *)0x01) { bits = image->bits.bits; if (image->bits.rowstride < 0) bits -= (- image->bits.rowstride * (image->bits.height - 1)); if (image->bits.read_func == real_reader) bits = xor_ptr(bits); fence_free (bits); } } free (data); } static uint32_t real_reader (const void *src, int size) { src = xor_ptr(src); switch (size) { case 1: return *(uint8_t *)src; case 2: return *(uint16_t *)src; case 4: return *(uint32_t *)src; default: assert (0); return 0; /* silence MSVC */ } } static void real_writer (void *src, uint32_t value, int size) { src = xor_ptr(src); switch (size) { case 1: *(uint8_t *)src = value; break; case 2: *(uint16_t *)src = value; break; case 4: *(uint32_t *)src = value; break; default: assert (0); break; } } static uint32_t fake_reader (const void *src, int size) { uint32_t r = prng_rand (); assert (size == 1 || size == 2 || size == 4); return r >> (32 - (size * 8)); } static void fake_writer (void *src, uint32_t value, int size) { assert (size == 1 || size == 2 || size == 4); } static int32_t log_rand (void) { uint32_t mask; mask = (1 << prng_rand_n (10)) - 1; return (prng_rand () & mask) - (mask >> 1); } static int32_t rand_x (pixman_image_t *image) { if (image->type == BITS) return prng_rand_n (image->bits.width); else return log_rand (); } static int32_t rand_y (pixman_image_t *image) { if (image->type == BITS) return prng_rand_n (image->bits.height); else return log_rand (); } typedef enum { DONT_CARE, PREFER_ALPHA, REQUIRE_ALPHA } alpha_preference_t; static pixman_format_code_t random_format (alpha_preference_t alpha) { pixman_format_code_t format; int n = prng_rand_n (ARRAY_LENGTH (image_formats)); if (alpha >= PREFER_ALPHA && (alpha == REQUIRE_ALPHA || prng_rand_n (4) != 0)) { do { format = image_formats[n++ % ARRAY_LENGTH (image_formats)]; } while (PIXMAN_FORMAT_TYPE (format) != PIXMAN_TYPE_A); } else { format = image_formats[n]; } return format; } static pixman_image_t * create_random_bits_image (alpha_preference_t alpha_preference) { pixman_format_code_t format; pixman_indexed_t *indexed; pixman_image_t *image; int width, height, stride; uint32_t *bits; pixman_read_memory_func_t read_func = NULL; pixman_write_memory_func_t write_func = NULL; pixman_filter_t filter; pixman_fixed_t *coefficients = NULL; int n_coefficients = 0; int align_add, align_mask; /* format */ format = random_format (alpha_preference); switch (PIXMAN_FORMAT_BPP (format)) { case 128: align_mask = 15; align_add = align_mask + prng_rand_n (65); break; default: align_mask = 3; align_add = align_mask + prng_rand_n (17); break; } indexed = NULL; if (PIXMAN_FORMAT_TYPE (format) == PIXMAN_TYPE_COLOR) { indexed = malloc (sizeof (pixman_indexed_t)); initialize_palette (indexed, PIXMAN_FORMAT_BPP (format), TRUE); } else if (PIXMAN_FORMAT_TYPE (format) == PIXMAN_TYPE_GRAY) { indexed = malloc (sizeof (pixman_indexed_t)); initialize_palette (indexed, PIXMAN_FORMAT_BPP (format), FALSE); } else { indexed = NULL; } /* size */ width = get_size (); height = get_size (); while ((uint64_t)width * height > 200000) { if (prng_rand_n(2) == 0) height = 200000 / width; else width = 200000 / height; } if (height == 0) height = 1; if (width == 0) width = 1; /* bits */ switch (prng_rand_n (7)) { default: case 0: stride = (width * PIXMAN_FORMAT_BPP (format) + 7) / 8; stride = (stride + align_add) & (~align_mask); if (format == PIXMAN_rgb_float || format == PIXMAN_rgba_float) bits = (uint32_t *)make_random_floats (height * stride); else bits = (uint32_t *)make_random_bytes (height * stride); break; case 1: stride = 0; bits = NULL; break; case 2: /* Zero-filled */ stride = (width * PIXMAN_FORMAT_BPP (format) + 7) / 8; stride = (stride + align_add) & (~align_mask); bits = fence_malloc (height * stride); if (!bits) return NULL; memset (bits, 0, height * stride); break; case 3: /* Filled with 0xFF */ stride = (width * PIXMAN_FORMAT_BPP (format) + 7) / 8; stride = (stride + align_add) & (~align_mask); bits = fence_malloc (height * stride); if (!bits) return NULL; memset (bits, 0xff, height * stride); break; case 4: /* bits is a bad pointer, has read/write functions */ if (PIXMAN_FORMAT_BPP (format) <= 32) { stride = 232; bits = (void *)0x01; read_func = fake_reader; write_func = fake_writer; break; } case 5: /* bits is a real pointer, has read/write functions */ stride = (width * PIXMAN_FORMAT_BPP (format) + 7) / 8; stride = (stride + align_add) & (~align_mask); bits = fence_malloc (height * stride); if (!bits) return NULL; memset (bits, 0xff, height * stride); if (PIXMAN_FORMAT_BPP (format) <= 32) { bits = xor_ptr(bits); read_func = real_reader; write_func = real_writer; } break; case 6: /* bits is a real pointer, stride is negative */ stride = (width * PIXMAN_FORMAT_BPP (format) + 7) / 8; stride = (stride + align_add) & (~align_mask); if (format == PIXMAN_rgb_float || format == PIXMAN_rgba_float) bits = (uint32_t *)make_random_floats (height * stride); else bits = (uint32_t *)make_random_bytes (height * stride); if (!bits) return NULL; bits += ((height - 1) * stride) / 4; stride = - stride; break; } /* Filter */ filter = filters[prng_rand_n (ARRAY_LENGTH (filters))]; if (filter == PIXMAN_FILTER_CONVOLUTION) { int width = prng_rand_n (3); int height = prng_rand_n (4); n_coefficients = width * height + 2; coefficients = malloc (n_coefficients * sizeof (pixman_fixed_t)); if (coefficients) { int i; for (i = 0; i < width * height; ++i) coefficients[i + 2] = prng_rand(); coefficients[0] = width << 16; coefficients[1] = height << 16; } else { filter = PIXMAN_FILTER_BEST; } } /* Finally create the image */ image = pixman_image_create_bits (format, width, height, bits, stride); if (!image) return NULL; pixman_image_set_indexed (image, indexed); pixman_image_set_destroy_function (image, destroy, indexed); pixman_image_set_accessors (image, read_func, write_func); pixman_image_set_filter (image, filter, coefficients, n_coefficients); free (coefficients); return image; } static pixman_repeat_t repeats[] = { PIXMAN_REPEAT_NONE, PIXMAN_REPEAT_NORMAL, PIXMAN_REPEAT_REFLECT, PIXMAN_REPEAT_PAD }; static uint32_t absolute (int32_t i) { return i < 0? -i : i; } static void set_general_properties (pixman_image_t *image, pixman_bool_t allow_alpha_map) { pixman_repeat_t repeat; /* Set properties that are generic to all images */ /* Repeat */ repeat = repeats[prng_rand_n (ARRAY_LENGTH (repeats))]; pixman_image_set_repeat (image, repeat); /* Alpha map */ if (allow_alpha_map && prng_rand_n (4) == 0) { pixman_image_t *alpha_map; int16_t x, y; alpha_map = create_random_bits_image (DONT_CARE); if (alpha_map) { set_general_properties (alpha_map, FALSE); x = rand_x (image) - image->bits.width / 2; y = rand_y (image) - image->bits.height / 2; pixman_image_set_alpha_map (image, alpha_map, x, y); pixman_image_unref (alpha_map); } } /* Component alpha */ pixman_image_set_component_alpha (image, prng_rand_n (3) == 0); /* Clip region */ if (prng_rand_n (8) < 2) { pixman_region32_t region; int i, n_rects; pixman_region32_init (®ion); switch (prng_rand_n (12)) { case 0: n_rects = 0; break; case 1: case 2: case 3: n_rects = 1; break; case 4: case 5: n_rects = 2; break; case 6: case 7: n_rects = 3; break; default: n_rects = prng_rand_n (100); break; } for (i = 0; i < n_rects; ++i) { uint32_t width, height; int x, y; x = log_rand(); y = log_rand(); width = absolute (log_rand ()) + 1; height = absolute (log_rand ()) + 1; pixman_region32_union_rect ( ®ion, ®ion, x, y, width, height); } if (image->type == BITS && prng_rand_n (8) != 0) { uint32_t width, height; uint32_t x, y; int i; /* Also add a couple of clip rectangles inside the image * so that compositing will actually take place. */ for (i = 0; i < 5; ++i) { x = prng_rand_n (2 * image->bits.width) - image->bits.width; y = prng_rand_n (2 * image->bits.height) - image->bits.height; width = prng_rand_n (image->bits.width) - x + 10; height = prng_rand_n (image->bits.height) - y + 10; if (width + x < x) width = INT32_MAX - x; if (height + y < y) height = INT32_MAX - y; pixman_region32_union_rect ( ®ion, ®ion, x, y, width, height); } } pixman_image_set_clip_region32 (image, ®ion); pixman_region32_fini (®ion); } /* Whether source clipping is enabled */ pixman_image_set_source_clipping (image, !!prng_rand_n (2)); /* Client clip */ pixman_image_set_has_client_clip (image, !!prng_rand_n (2)); /* Transform */ if (prng_rand_n (5) < 2) { pixman_transform_t xform; int i, j, k; uint32_t tx, ty, sx, sy; uint32_t c, s; memset (&xform, 0, sizeof xform); xform.matrix[0][0] = pixman_fixed_1; xform.matrix[1][1] = pixman_fixed_1; xform.matrix[2][2] = pixman_fixed_1; for (k = 0; k < 3; ++k) { switch (prng_rand_n (4)) { case 0: /* rotation */ c = prng_rand_n (2 * 65536) - 65536; s = prng_rand_n (2 * 65536) - 65536; pixman_transform_rotate (&xform, NULL, c, s); break; case 1: /* translation */ tx = prng_rand(); ty = prng_rand(); pixman_transform_translate (&xform, NULL, tx, ty); break; case 2: /* scale */ sx = prng_rand(); sy = prng_rand(); pixman_transform_scale (&xform, NULL, sx, sy); break; case 3: if (prng_rand_n (16) == 0) { /* random */ for (i = 0; i < 3; ++i) for (j = 0; j < 3; ++j) xform.matrix[i][j] = prng_rand(); break; } else if (prng_rand_n (16) == 0) { /* zero */ memset (&xform, 0, sizeof xform); } break; } } pixman_image_set_transform (image, &xform); } } static pixman_color_t random_color (void) { pixman_color_t color = { prng_rand() & 0xffff, prng_rand() & 0xffff, prng_rand() & 0xffff, prng_rand() & 0xffff, }; return color; } static pixman_image_t * create_random_solid_image (void) { pixman_color_t color = random_color(); pixman_image_t *image = pixman_image_create_solid_fill (&color); return image; } static pixman_gradient_stop_t * create_random_stops (int *n_stops) { pixman_fixed_t step; pixman_fixed_t s; int i; pixman_gradient_stop_t *stops; *n_stops = prng_rand_n (50) + 1; step = pixman_fixed_1 / *n_stops; stops = malloc (*n_stops * sizeof (pixman_gradient_stop_t)); s = 0; for (i = 0; i < (*n_stops) - 1; ++i) { stops[i].x = s; stops[i].color = random_color(); s += step; } stops[*n_stops - 1].x = pixman_fixed_1; stops[*n_stops - 1].color = random_color(); return stops; } static pixman_point_fixed_t create_random_point (void) { pixman_point_fixed_t p; p.x = log_rand (); p.y = log_rand (); return p; } static pixman_image_t * create_random_linear_image (void) { int n_stops; pixman_gradient_stop_t *stops; pixman_point_fixed_t p1, p2; pixman_image_t *result; stops = create_random_stops (&n_stops); if (!stops) return NULL; p1 = create_random_point (); p2 = create_random_point (); result = pixman_image_create_linear_gradient (&p1, &p2, stops, n_stops); free (stops); return result; } static pixman_image_t * create_random_radial_image (void) { int n_stops; pixman_gradient_stop_t *stops; pixman_point_fixed_t inner_c, outer_c; pixman_fixed_t inner_r, outer_r; pixman_image_t *result; inner_c = create_random_point(); outer_c = create_random_point(); inner_r = prng_rand(); outer_r = prng_rand(); stops = create_random_stops (&n_stops); if (!stops) return NULL; result = pixman_image_create_radial_gradient ( &inner_c, &outer_c, inner_r, outer_r, stops, n_stops); free (stops); return result; } static pixman_image_t * create_random_conical_image (void) { pixman_gradient_stop_t *stops; int n_stops; pixman_point_fixed_t c; pixman_fixed_t angle; pixman_image_t *result; c = create_random_point(); angle = prng_rand(); stops = create_random_stops (&n_stops); if (!stops) return NULL; result = pixman_image_create_conical_gradient (&c, angle, stops, n_stops); free (stops); return result; } static pixman_image_t * create_random_image (void) { pixman_image_t *result; switch (prng_rand_n (5)) { default: case 0: result = create_random_bits_image (DONT_CARE); break; case 1: result = create_random_solid_image (); break; case 2: result = create_random_linear_image (); break; case 3: result = create_random_radial_image (); break; case 4: result = create_random_conical_image (); break; } if (result) set_general_properties (result, TRUE); return result; } static void random_line (pixman_line_fixed_t *line, int width, int height) { line->p1.x = prng_rand_n (width) << 16; line->p1.y = prng_rand_n (height) << 16; line->p2.x = prng_rand_n (width) << 16; line->p2.y = prng_rand_n (height) << 16; } static pixman_trapezoid_t * create_random_trapezoids (int *n_traps, int height, int width) { pixman_trapezoid_t *trapezoids; int i; *n_traps = prng_rand_n (16) + 1; trapezoids = malloc (sizeof (pixman_trapezoid_t) * *n_traps); for (i = 0; i < *n_traps; ++i) { pixman_trapezoid_t *t = &(trapezoids[i]); t->top = prng_rand_n (height) << 16; t->bottom = prng_rand_n (height) << 16; random_line (&t->left, height, width); random_line (&t->right, height, width); } return trapezoids; } static const pixman_op_t op_list[] = { PIXMAN_OP_SRC, PIXMAN_OP_OVER, PIXMAN_OP_ADD, PIXMAN_OP_CLEAR, PIXMAN_OP_SRC, PIXMAN_OP_DST, PIXMAN_OP_OVER, PIXMAN_OP_OVER_REVERSE, PIXMAN_OP_IN, PIXMAN_OP_IN_REVERSE, PIXMAN_OP_OUT, PIXMAN_OP_OUT_REVERSE, PIXMAN_OP_ATOP, PIXMAN_OP_ATOP_REVERSE, PIXMAN_OP_XOR, PIXMAN_OP_ADD, PIXMAN_OP_SATURATE, PIXMAN_OP_DISJOINT_CLEAR, PIXMAN_OP_DISJOINT_SRC, PIXMAN_OP_DISJOINT_DST, PIXMAN_OP_DISJOINT_OVER, PIXMAN_OP_DISJOINT_OVER_REVERSE, PIXMAN_OP_DISJOINT_IN, PIXMAN_OP_DISJOINT_IN_REVERSE, PIXMAN_OP_DISJOINT_OUT, PIXMAN_OP_DISJOINT_OUT_REVERSE, PIXMAN_OP_DISJOINT_ATOP, PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_OP_DISJOINT_XOR, PIXMAN_OP_CONJOINT_CLEAR, PIXMAN_OP_CONJOINT_SRC, PIXMAN_OP_CONJOINT_DST, PIXMAN_OP_CONJOINT_OVER, PIXMAN_OP_CONJOINT_OVER_REVERSE, PIXMAN_OP_CONJOINT_IN, PIXMAN_OP_CONJOINT_IN_REVERSE, PIXMAN_OP_CONJOINT_OUT, PIXMAN_OP_CONJOINT_OUT_REVERSE, PIXMAN_OP_CONJOINT_ATOP, PIXMAN_OP_CONJOINT_ATOP_REVERSE, PIXMAN_OP_CONJOINT_XOR, PIXMAN_OP_MULTIPLY, PIXMAN_OP_SCREEN, PIXMAN_OP_OVERLAY, PIXMAN_OP_DARKEN, PIXMAN_OP_LIGHTEN, PIXMAN_OP_COLOR_DODGE, PIXMAN_OP_COLOR_BURN, PIXMAN_OP_HARD_LIGHT, PIXMAN_OP_DIFFERENCE, PIXMAN_OP_EXCLUSION, PIXMAN_OP_SOFT_LIGHT, PIXMAN_OP_HSL_HUE, PIXMAN_OP_HSL_SATURATION, PIXMAN_OP_HSL_COLOR, PIXMAN_OP_HSL_LUMINOSITY, }; static void run_test (uint32_t seed, pixman_bool_t verbose, uint32_t mod) { pixman_image_t *source, *mask, *dest; pixman_op_t op; if (verbose) { if (mod == 0 || (seed % mod) == 0) printf ("Seed 0x%08x\n", seed); } source = mask = dest = NULL; prng_srand (seed); if (prng_rand_n (8) == 0) { int n_traps; pixman_trapezoid_t *trapezoids; int p = prng_rand_n (3); if (p == 0) dest = create_random_bits_image (DONT_CARE); else dest = create_random_bits_image (REQUIRE_ALPHA); if (!dest) goto out; set_general_properties (dest, TRUE); if (!(trapezoids = create_random_trapezoids ( &n_traps, dest->bits.width, dest->bits.height))) { goto out; } switch (p) { case 0: source = create_random_image (); if (source) { op = op_list [prng_rand_n (ARRAY_LENGTH (op_list))]; pixman_composite_trapezoids ( op, source, dest, random_format (REQUIRE_ALPHA), rand_x (source), rand_y (source), rand_x (dest), rand_y (dest), n_traps, trapezoids); } break; case 1: pixman_rasterize_trapezoid ( dest, &trapezoids[prng_rand_n (n_traps)], rand_x (dest), rand_y (dest)); break; case 2: pixman_add_trapezoids ( dest, rand_x (dest), rand_y (dest), n_traps, trapezoids); break; } free (trapezoids); } else { dest = create_random_bits_image (DONT_CARE); source = create_random_image (); mask = create_random_image (); if (source && mask && dest) { set_general_properties (dest, TRUE); op = op_list [prng_rand_n (ARRAY_LENGTH (op_list))]; pixman_image_composite32 (op, source, mask, dest, rand_x (source), rand_y (source), rand_x (mask), rand_y (mask), 0, 0, dest->bits.width, dest->bits.height); } } out: if (source) pixman_image_unref (source); if (mask) pixman_image_unref (mask); if (dest) pixman_image_unref (dest); } static pixman_bool_t get_int (char *s, uint32_t *i) { char *end; int p; p = strtol (s, &end, 0); if (end != s && *end == 0) { *i = p; return TRUE; } return FALSE; } int main (int argc, char **argv) { int verbose = FALSE; uint32_t seed = 1; uint32_t n_tests = 8000; uint32_t mod = 0; pixman_bool_t use_threads = TRUE; int32_t i; pixman_disable_out_of_bounds_workaround (); enable_divbyzero_exceptions(); if (getenv ("VERBOSE") != NULL) verbose = TRUE; for (i = 1; i < argc; ++i) { if (strcmp (argv[i], "-v") == 0) { verbose = TRUE; if (i + 1 < argc) { get_int (argv[i + 1], &mod); i++; } } else if (strcmp (argv[i], "-s") == 0 && i + 1 < argc) { get_int (argv[i + 1], &seed); use_threads = FALSE; i++; } else if (strcmp (argv[i], "-n") == 0 && i + 1 < argc) { get_int (argv[i + 1], &n_tests); i++; } else { if (strcmp (argv[i], "-h") != 0) printf ("Unknown option '%s'\n\n", argv[i]); printf ("Options:\n\n" "-n Number of tests to run\n" "-s Seed of first test (ignored if PIXMAN_RANDOMIZE_TESTS is set)\n" "-v Print out seeds\n" "-v Print out every n'th seed\n\n"); exit (-1); } } if (getenv ("PIXMAN_RANDOMIZE_TESTS")) { seed = get_random_seed(); printf ("First seed: 0x%08x\n", seed); } if (use_threads) { #ifdef USE_OPENMP # pragma omp parallel for default(none) shared(verbose, n_tests, mod, seed) #endif for (i = 0; i < (int32_t)n_tests; ++i) run_test (seed + i, verbose, mod); } else { for (i = 0; i < (int32_t)n_tests; ++i) run_test (seed + i, verbose, mod); } return 0; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/thread-test.c0000664000175000017500000001141614712446423016470 0ustar00mattst88mattst88#include "utils.h" #if !defined (HAVE_PTHREADS) && !defined (_WIN32) int main () { printf ("Skipped thread-test - pthreads or Windows Threads not supported\n"); return 0; } #else #include #ifdef HAVE_PTHREADS # include #elif defined (_WIN32) # define WIN32_LEAN_AND_MEAN # include #endif #define THREADS 16 typedef struct { int thread_no; uint32_t *dst_buf; prng_t prng_state; #if defined (_WIN32) && !defined (HAVE_PTHREADS) uint32_t crc32; #endif } info_t; static const pixman_op_t operators[] = { PIXMAN_OP_SRC, PIXMAN_OP_OVER, PIXMAN_OP_ADD, PIXMAN_OP_CLEAR, PIXMAN_OP_SRC, PIXMAN_OP_DST, PIXMAN_OP_OVER, PIXMAN_OP_OVER_REVERSE, PIXMAN_OP_IN, PIXMAN_OP_IN_REVERSE, PIXMAN_OP_OUT, PIXMAN_OP_OUT_REVERSE, PIXMAN_OP_ATOP, PIXMAN_OP_ATOP_REVERSE, PIXMAN_OP_XOR, PIXMAN_OP_ADD, PIXMAN_OP_MULTIPLY, PIXMAN_OP_SCREEN, PIXMAN_OP_OVERLAY, PIXMAN_OP_DARKEN, PIXMAN_OP_LIGHTEN, PIXMAN_OP_HARD_LIGHT, PIXMAN_OP_DIFFERENCE, PIXMAN_OP_EXCLUSION, }; static const pixman_format_code_t formats[] = { PIXMAN_a8r8g8b8, PIXMAN_r5g6b5, PIXMAN_a8, PIXMAN_a4, PIXMAN_a1, PIXMAN_b5g6r5, PIXMAN_r8g8b8a8, PIXMAN_a4r4g4b4 }; #define N_ROUNDS 8192 #define RAND_ELT(arr) \ arr[prng_rand_r(&info->prng_state) % ARRAY_LENGTH (arr)] #define DEST_WIDTH (7) #ifdef HAVE_PTHREADS static void * thread (void *data) #elif defined (_WIN32) DWORD WINAPI thread (LPVOID data) #endif { info_t *info = data; uint32_t crc32 = 0x0; uint32_t src_buf[64]; pixman_image_t *dst_img, *src_img; int i; prng_srand_r (&info->prng_state, info->thread_no); for (i = 0; i < N_ROUNDS; ++i) { pixman_op_t op; int rand1, rand2; prng_randmemset_r (&info->prng_state, info->dst_buf, DEST_WIDTH * sizeof (uint32_t), 0); prng_randmemset_r (&info->prng_state, src_buf, sizeof (src_buf), 0); src_img = pixman_image_create_bits ( RAND_ELT (formats), 4, 4, src_buf, 16); dst_img = pixman_image_create_bits ( RAND_ELT (formats), DEST_WIDTH, 1, info->dst_buf, DEST_WIDTH * sizeof (uint32_t)); image_endian_swap (src_img); image_endian_swap (dst_img); rand2 = prng_rand_r (&info->prng_state) % 4; rand1 = prng_rand_r (&info->prng_state) % 4; op = RAND_ELT (operators); pixman_image_composite32 ( op, src_img, NULL, dst_img, rand1, rand2, 0, 0, 0, 0, DEST_WIDTH, 1); crc32 = compute_crc32_for_image (crc32, dst_img); pixman_image_unref (src_img); pixman_image_unref (dst_img); } #ifdef HAVE_PTHREADS return (void *)(uintptr_t)crc32; #elif defined (_WIN32) info->crc32 = crc32; return 0; #endif } static inline uint32_t byteswap32 (uint32_t x) { return ((x & ((uint32_t)0xFF << 24)) >> 24) | ((x & ((uint32_t)0xFF << 16)) >> 8) | ((x & ((uint32_t)0xFF << 8)) << 8) | ((x & ((uint32_t)0xFF << 0)) << 24); } int main (void) { uint32_t dest[THREADS * DEST_WIDTH]; info_t info[THREADS] = { { 0 } }; #ifdef HAVE_PTHREADS pthread_t threads[THREADS]; void *retvals[THREADS]; #elif defined (_WIN32) HANDLE hThreadArray[THREADS]; DWORD dwThreadIdArray[THREADS]; #endif uint32_t crc32s[THREADS], crc32; int i; for (i = 0; i < THREADS; ++i) { info[i].thread_no = i; info[i].dst_buf = &dest[i * DEST_WIDTH]; } #ifdef HAVE_PTHREADS for (i = 0; i < THREADS; ++i) pthread_create (&threads[i], NULL, thread, &info[i]); for (i = 0; i < THREADS; ++i) pthread_join (threads[i], &retvals[i]); for (i = 0; i < THREADS; ++i) { crc32s[i] = (uintptr_t)retvals[i]; if (is_little_endian()) crc32s[i] = byteswap32 (crc32s[i]); } #elif defined (_WIN32) for (i = 0; i < THREADS; ++i) { hThreadArray[i] = CreateThread(NULL, 0, thread, &info[i], 0, &dwThreadIdArray[i]); if (hThreadArray[i] == NULL) { printf ("Windows thread creation failed!\n"); return 1; } } for (i = 0; i < THREADS; ++i) { WaitForSingleObject (hThreadArray[i], INFINITE); CloseHandle(hThreadArray[i]); } for (i = 0; i < THREADS; ++i) { crc32s[i] = info[i].crc32; if (is_little_endian()) crc32s[i] = byteswap32 (crc32s[i]); } #endif crc32 = compute_crc32 (0, crc32s, sizeof crc32s); #define EXPECTED 0x82C4D9FB if (crc32 != EXPECTED) { printf ("thread-test failed. Got checksum 0x%08X, expected 0x%08X\n", crc32, EXPECTED); return 1; } return 0; } #endif ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/tolerance-test.c0000664000175000017500000002432414712446423017177 0ustar00mattst88mattst88#include #include #include #include #include #include "utils.h" #define MAX_WIDTH 16 #define MAX_HEIGHT 16 #define MAX_STRIDE 4 static const pixman_format_code_t formats[] = { PIXMAN_a2r10g10b10, PIXMAN_x2r10g10b10, PIXMAN_a8r8g8b8, PIXMAN_a4r4g4b4, PIXMAN_a2r2g2b2, PIXMAN_r5g6b5, PIXMAN_r3g3b2, }; static const pixman_op_t operators[] = { PIXMAN_OP_CLEAR, PIXMAN_OP_SRC, PIXMAN_OP_DST, PIXMAN_OP_OVER, PIXMAN_OP_OVER_REVERSE, PIXMAN_OP_IN, PIXMAN_OP_IN_REVERSE, PIXMAN_OP_OUT, PIXMAN_OP_OUT_REVERSE, PIXMAN_OP_ATOP, PIXMAN_OP_ATOP_REVERSE, PIXMAN_OP_XOR, PIXMAN_OP_ADD, PIXMAN_OP_SATURATE, PIXMAN_OP_DISJOINT_CLEAR, PIXMAN_OP_DISJOINT_SRC, PIXMAN_OP_DISJOINT_DST, PIXMAN_OP_DISJOINT_OVER, PIXMAN_OP_DISJOINT_OVER_REVERSE, PIXMAN_OP_DISJOINT_IN, PIXMAN_OP_DISJOINT_IN_REVERSE, PIXMAN_OP_DISJOINT_OUT, PIXMAN_OP_DISJOINT_OUT_REVERSE, PIXMAN_OP_DISJOINT_ATOP, PIXMAN_OP_DISJOINT_ATOP_REVERSE, PIXMAN_OP_DISJOINT_XOR, PIXMAN_OP_CONJOINT_CLEAR, PIXMAN_OP_CONJOINT_SRC, PIXMAN_OP_CONJOINT_DST, PIXMAN_OP_CONJOINT_OVER, PIXMAN_OP_CONJOINT_OVER_REVERSE, PIXMAN_OP_CONJOINT_IN, PIXMAN_OP_CONJOINT_IN_REVERSE, PIXMAN_OP_CONJOINT_OUT, PIXMAN_OP_CONJOINT_OUT_REVERSE, PIXMAN_OP_CONJOINT_ATOP, PIXMAN_OP_CONJOINT_ATOP_REVERSE, PIXMAN_OP_CONJOINT_XOR, PIXMAN_OP_MULTIPLY, PIXMAN_OP_SCREEN, PIXMAN_OP_OVERLAY, PIXMAN_OP_DARKEN, PIXMAN_OP_LIGHTEN, PIXMAN_OP_COLOR_DODGE, PIXMAN_OP_COLOR_BURN, PIXMAN_OP_HARD_LIGHT, PIXMAN_OP_SOFT_LIGHT, PIXMAN_OP_DIFFERENCE, PIXMAN_OP_EXCLUSION, }; static const pixman_dither_t dithers[] = { PIXMAN_DITHER_ORDERED_BAYER_8, PIXMAN_DITHER_ORDERED_BLUE_NOISE_64, }; #define RANDOM_ELT(array) \ (array[prng_rand_n (ARRAY_LENGTH (array))]) static void free_bits (pixman_image_t *image, void *data) { free (image->bits.bits); } static pixman_image_t * create_image (pixman_image_t **clone) { pixman_format_code_t format = RANDOM_ELT (formats); pixman_image_t *image; int width = prng_rand_n (MAX_WIDTH); int height = prng_rand_n (MAX_HEIGHT); int stride = ((width * (PIXMAN_FORMAT_BPP (format) / 8)) + 3) & ~3; uint32_t *bytes = malloc (stride * height); prng_randmemset (bytes, stride * height, RANDMEMSET_MORE_00_AND_FF); image = pixman_image_create_bits ( format, width, height, bytes, stride); pixman_image_set_destroy_function (image, free_bits, NULL); assert (image); if (clone) { uint32_t *bytes_dup = malloc (stride * height); memcpy (bytes_dup, bytes, stride * height); *clone = pixman_image_create_bits ( format, width, height, bytes_dup, stride); pixman_image_set_destroy_function (*clone, free_bits, NULL); } return image; } static pixman_bool_t access (pixman_image_t *image, int x, int y, uint32_t *pixel) { int bytes_per_pixel; int stride; uint8_t *location; if (x < 0 || x >= image->bits.width || y < 0 || y >= image->bits.height) return FALSE; bytes_per_pixel = PIXMAN_FORMAT_BPP (image->bits.format) / 8; stride = image->bits.rowstride * 4; location = (uint8_t *)image->bits.bits + y * stride + x * bytes_per_pixel; if (bytes_per_pixel == 4) *pixel = *(uint32_t *)location; else if (bytes_per_pixel == 2) *pixel = *(uint16_t *)location; else if (bytes_per_pixel == 1) *pixel = *(uint8_t *)location; else assert (0); return TRUE; } static void get_color (pixel_checker_t *checker, pixman_image_t *image, int x, int y, color_t *color, uint32_t *pixel) { if (!access (image, x, y, pixel)) { color->a = 0.0; color->r = 0.0; color->g = 0.0; color->b = 0.0; } else { pixel_checker_convert_pixel_to_color ( checker, *pixel, color); } } static pixman_bool_t verify (int test_no, pixman_op_t op, pixman_image_t *source, pixman_image_t *mask, pixman_image_t *dest, pixman_image_t *orig_dest, int x, int y, int width, int height, pixman_bool_t component_alpha, pixman_dither_t dither) { pixel_checker_t dest_checker, src_checker, mask_checker; int i, j; pixel_checker_init (&src_checker, source->bits.format); pixel_checker_init (&dest_checker, dest->bits.format); pixel_checker_init (&mask_checker, mask->bits.format); if (dest->bits.dither != PIXMAN_DITHER_NONE) pixel_checker_allow_dither (&dest_checker); assert (dest->bits.format == orig_dest->bits.format); for (j = y; j < y + height; ++j) { for (i = x; i < x + width; ++i) { color_t src_color, mask_color, orig_dest_color, result; uint32_t dest_pixel, orig_dest_pixel, src_pixel, mask_pixel; access (dest, i, j, &dest_pixel); get_color (&src_checker, source, i - x, j - y, &src_color, &src_pixel); get_color (&mask_checker, mask, i - x, j - y, &mask_color, &mask_pixel); get_color (&dest_checker, orig_dest, i, j, &orig_dest_color, &orig_dest_pixel); do_composite (op, &src_color, &mask_color, &orig_dest_color, &result, component_alpha); if (!pixel_checker_check (&dest_checker, dest_pixel, &result)) { int a, r, g, b; printf ("--------- Test 0x%x failed ---------\n", test_no); printf (" operator: %s (%s alpha)\n", operator_name (op), component_alpha? "component" : "unified"); printf (" dither: %s\n", dither_name (dither)); printf (" dest_x, dest_y: %d %d\n", x, y); printf (" width, height: %d %d\n", width, height); printf (" source: format: %-14s size: %2d x %2d\n", format_name (source->bits.format), source->bits.width, source->bits.height); printf (" mask: format: %-14s size: %2d x %2d\n", format_name (mask->bits.format), mask->bits.width, mask->bits.height); printf (" dest: format: %-14s size: %2d x %2d\n", format_name (dest->bits.format), dest->bits.width, dest->bits.height); printf (" -- Failed pixel: (%d, %d) --\n", i, j); printf (" source ARGB: %f %f %f %f (pixel: %x)\n", src_color.a, src_color.r, src_color.g, src_color.b, src_pixel); printf (" mask ARGB: %f %f %f %f (pixel: %x)\n", mask_color.a, mask_color.r, mask_color.g, mask_color.b, mask_pixel); printf (" dest ARGB: %f %f %f %f (pixel: %x)\n", orig_dest_color.a, orig_dest_color.r, orig_dest_color.g, orig_dest_color.b, orig_dest_pixel); printf (" expected ARGB: %f %f %f %f\n", result.a, result.r, result.g, result.b); pixel_checker_get_min (&dest_checker, &result, &a, &r, &g, &b); printf (" min acceptable: %8d %8d %8d %8d\n", a, r, g, b); pixel_checker_split_pixel (&dest_checker, dest_pixel, &a, &r, &g, &b); printf (" got: %8d %8d %8d %8d (pixel: %x)\n", a, r, g, b, dest_pixel); pixel_checker_get_max (&dest_checker, &result, &a, &r, &g, &b); printf (" max acceptable: %8d %8d %8d %8d\n", a, r, g, b); printf ("\n"); printf (" { %s,\n", operator_name (op)); printf (" PIXMAN_%s,\t0x%x,\n", format_name (source->bits.format), src_pixel); printf (" PIXMAN_%s,\t0x%x,\n", format_name (mask->bits.format), mask_pixel); printf (" PIXMAN_%s,\t0x%x\n", format_name (dest->bits.format), orig_dest_pixel); printf (" },\n"); return FALSE; } } } return TRUE; } static pixman_bool_t do_check (int i) { pixman_image_t *source, *dest, *mask; pixman_op_t op; int x, y, width, height; pixman_image_t *dest_copy; pixman_bool_t result = TRUE; pixman_bool_t component_alpha; pixman_dither_t dither = PIXMAN_DITHER_NONE; prng_srand (i); op = RANDOM_ELT (operators); x = prng_rand_n (MAX_WIDTH); y = prng_rand_n (MAX_HEIGHT); width = prng_rand_n (MAX_WIDTH) + 4; height = prng_rand_n (MAX_HEIGHT) + 4; source = create_image (NULL); mask = create_image (NULL); dest = create_image (&dest_copy); if (x >= dest->bits.width) x = dest->bits.width / 2; if (y >= dest->bits.height) y = dest->bits.height / 2; if (x + width > dest->bits.width) width = dest->bits.width - x; if (y + height > dest->bits.height) height = dest->bits.height - y; if (prng_rand_n (2)) { dither = RANDOM_ELT (dithers); pixman_image_set_dither (dest, dither); } component_alpha = prng_rand_n (2); pixman_image_set_component_alpha (mask, component_alpha); pixman_image_composite32 (op, source, mask, dest, 0, 0, 0, 0, x, y, width, height); if (!verify (i, op, source, mask, dest, dest_copy, x, y, width, height, component_alpha, dither)) { result = FALSE; } pixman_image_unref (source); pixman_image_unref (mask); pixman_image_unref (dest); pixman_image_unref (dest_copy); return result; } #define N_TESTS 10000000 int main (int argc, const char *argv[]) { int i; int result = 0; if (argc == 2) { if (strcmp (argv[1], "--forever") == 0) { uint32_t n; prng_srand (time (0)); n = prng_rand(); for (;;) do_check (n++); } else { do_check (strtol (argv[1], NULL, 0)); } } else { #ifdef USE_OPENMP # pragma omp parallel for default(none) reduction(|:result) #endif for (i = 0; i < N_TESTS; ++i) { if (!do_check (i)) result |= 1; } } return result; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/trap-crasher.c0000664000175000017500000000110114712446423016625 0ustar00mattst88mattst88#include #include "utils.h" int main() { pixman_image_t *dst; pixman_trapezoid_t traps[] = { { 2147483646, 2147483647, { { 0, 0 }, { 0, 2147483647 } }, { { 65536, 0 }, { 0, 2147483647 } } }, { 32768, - 2147483647, { { 0, 0 }, { 0, 2147483647 } }, { { 65536, 0 }, { 0, 2147483647 } } }, }; dst = pixman_image_create_bits (PIXMAN_a8, 1, 1, NULL, -1); pixman_add_trapezoids (dst, 0, 0, ARRAY_LENGTH (traps), traps); pixman_image_unref (dst); return (0); } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/utils/0000775000175000017500000000000014712446423015235 5ustar00mattst88mattst88././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/utils/meson.build0000664000175000017500000000252114712446423017377 0ustar00mattst88mattst88# Copyright Âİ 2018 Intel Corporation # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. libtestutils = static_library( 'testutils', ['utils.c', 'utils-prng.c', config_h], dependencies : [idep_pixman, dep_openmp, dep_m, dep_png], ) libtestutils_dep = declare_dependency( link_with: libtestutils, include_directories: include_directories('.'), ) ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/utils/utils-prng.c0000664000175000017500000002753014712446423017514 0ustar00mattst88mattst88/* * Copyright Âİ 2012 Siarhei Siamashka * * Based on the public domain implementation of small noncryptographic PRNG * authored by Bob Jenkins: http://burtleburtle.net/bob/rand/smallprng.html * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include "utils.h" #include "utils-prng.h" #if defined(HAVE_GCC_VECTOR_EXTENSIONS) && defined(__SSE2__) #include #endif void smallprng_srand_r (smallprng_t *x, uint32_t seed) { uint32_t i; x->a = 0xf1ea5eed, x->b = x->c = x->d = seed; for (i = 0; i < 20; ++i) smallprng_rand_r (x); } /* * Set a 32-bit seed for PRNG * * LCG is used here for generating independent seeds for different * smallprng instances (in the case if smallprng is also used for * generating these seeds, "Big Crush" test from TestU01 detects * some problems in the glued 'prng_rand_128_r' output data). * Actually we might be even better using some cryptographic * hash for this purpose, but LCG seems to be also enough for * passing "Big Crush". */ void prng_srand_r (prng_t *x, uint32_t seed) { #ifdef HAVE_GCC_VECTOR_EXTENSIONS int i; prng_rand_128_data_t dummy; smallprng_srand_r (&x->p0, seed); x->a[0] = x->a[1] = x->a[2] = x->a[3] = 0xf1ea5eed; x->b[0] = x->c[0] = x->d[0] = (seed = seed * 1103515245 + 12345); x->b[1] = x->c[1] = x->d[1] = (seed = seed * 1103515245 + 12345); x->b[2] = x->c[2] = x->d[2] = (seed = seed * 1103515245 + 12345); x->b[3] = x->c[3] = x->d[3] = (seed = seed * 1103515245 + 12345); for (i = 0; i < 20; ++i) prng_rand_128_r (x, &dummy); #else smallprng_srand_r (&x->p0, seed); smallprng_srand_r (&x->p1, (seed = seed * 1103515245 + 12345)); smallprng_srand_r (&x->p2, (seed = seed * 1103515245 + 12345)); smallprng_srand_r (&x->p3, (seed = seed * 1103515245 + 12345)); smallprng_srand_r (&x->p4, (seed = seed * 1103515245 + 12345)); #endif } static force_inline void store_rand_128_data (void *addr, prng_rand_128_data_t *d, int aligned) { #ifdef HAVE_GCC_VECTOR_EXTENSIONS if (aligned) { *(uint8x16 *)addr = d->vb; return; } else { #ifdef __SSE2__ /* workaround for http://gcc.gnu.org/PR55614 */ _mm_storeu_si128 (addr, _mm_loadu_si128 ((__m128i *)d)); return; #endif } #endif /* we could try something better for unaligned writes (packed attribute), * but GCC is not very reliable: http://gcc.gnu.org/PR55454 */ memcpy (addr, d, 16); } /* * Helper function and the actual code for "prng_randmemset_r" function */ static force_inline void randmemset_internal (prng_t *prng, uint8_t *buf, size_t size, prng_randmemset_flags_t flags, int aligned) { prng_t local_prng = *prng; prng_rand_128_data_t randdata; size_t i; while (size >= 16) { prng_rand_128_data_t t; if (flags == 0) { prng_rand_128_r (&local_prng, &randdata); } else { prng_rand_128_r (&local_prng, &t); prng_rand_128_r (&local_prng, &randdata); #ifdef HAVE_GCC_VECTOR_EXTENSIONS if (flags & RANDMEMSET_MORE_FF) { const uint8x16 const_C0 = { 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0 }; randdata.vb |= (t.vb >= const_C0); } if (flags & RANDMEMSET_MORE_00) { const uint8x16 const_40 = { 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40 }; randdata.vb &= (t.vb >= const_40); } if (flags & RANDMEMSET_MORE_FFFFFFFF) { const uint32x4 const_C0000000 = { 0xC0000000, 0xC0000000, 0xC0000000, 0xC0000000 }; randdata.vw |= ((t.vw << 30) >= const_C0000000); } if (flags & RANDMEMSET_MORE_00000000) { const uint32x4 const_40000000 = { 0x40000000, 0x40000000, 0x40000000, 0x40000000 }; randdata.vw &= ((t.vw << 30) >= const_40000000); } #else #define PROCESS_ONE_LANE(i) \ if (flags & RANDMEMSET_MORE_FF) \ { \ uint32_t mask_ff = (t.w[i] & (t.w[i] << 1)) & 0x80808080; \ mask_ff |= mask_ff >> 1; \ mask_ff |= mask_ff >> 2; \ mask_ff |= mask_ff >> 4; \ randdata.w[i] |= mask_ff; \ } \ if (flags & RANDMEMSET_MORE_00) \ { \ uint32_t mask_00 = (t.w[i] | (t.w[i] << 1)) & 0x80808080; \ mask_00 |= mask_00 >> 1; \ mask_00 |= mask_00 >> 2; \ mask_00 |= mask_00 >> 4; \ randdata.w[i] &= mask_00; \ } \ if (flags & RANDMEMSET_MORE_FFFFFFFF) \ { \ int32_t mask_ff = ((t.w[i] << 30) & (t.w[i] << 31)) & \ 0x80000000; \ randdata.w[i] |= mask_ff >> 31; \ } \ if (flags & RANDMEMSET_MORE_00000000) \ { \ int32_t mask_00 = ((t.w[i] << 30) | (t.w[i] << 31)) & \ 0x80000000; \ randdata.w[i] &= mask_00 >> 31; \ } PROCESS_ONE_LANE (0) PROCESS_ONE_LANE (1) PROCESS_ONE_LANE (2) PROCESS_ONE_LANE (3) #endif } if (is_little_endian ()) { store_rand_128_data (buf, &randdata, aligned); buf += 16; } else { #ifndef __has_builtin #define __has_builtin(x) 0 #endif #ifdef HAVE_GCC_VECTOR_EXTENSIONS # if __has_builtin(__builtin_shufflevector) randdata.vb = __builtin_shufflevector (randdata.vb, randdata.vb, 3, 2, 1, 0, 7, 6 , 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); # else static const uint8x16 bswap_shufflemask = { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 }; randdata.vb = __builtin_shuffle (randdata.vb, bswap_shufflemask); # endif store_rand_128_data (buf, &randdata, aligned); buf += 16; #else uint8_t t1, t2, t3, t4; #define STORE_ONE_LANE(i) \ t1 = randdata.b[i * 4 + 3]; \ t2 = randdata.b[i * 4 + 2]; \ t3 = randdata.b[i * 4 + 1]; \ t4 = randdata.b[i * 4 + 0]; \ *buf++ = t1; \ *buf++ = t2; \ *buf++ = t3; \ *buf++ = t4; STORE_ONE_LANE (0) STORE_ONE_LANE (1) STORE_ONE_LANE (2) STORE_ONE_LANE (3) #endif } size -= 16; } i = 0; while (i < size) { uint8_t randbyte = prng_rand_r (&local_prng) & 0xFF; if (flags != 0) { uint8_t t = prng_rand_r (&local_prng) & 0xFF; if ((flags & RANDMEMSET_MORE_FF) && (t >= 0xC0)) randbyte = 0xFF; if ((flags & RANDMEMSET_MORE_00) && (t < 0x40)) randbyte = 0x00; if (i % 4 == 0 && i + 4 <= size) { t = prng_rand_r (&local_prng) & 0xFF; if ((flags & RANDMEMSET_MORE_FFFFFFFF) && (t >= 0xC0)) { memset(&buf[i], 0xFF, 4); i += 4; continue; } if ((flags & RANDMEMSET_MORE_00000000) && (t < 0x40)) { memset(&buf[i], 0x00, 4); i += 4; continue; } } } buf[i] = randbyte; i++; } *prng = local_prng; } /* * Fill memory buffer with random data. Flags argument may be used * to tweak some statistics properties: * RANDMEMSET_MORE_00 - set ~25% of bytes to 0x00 * RANDMEMSET_MORE_FF - set ~25% of bytes to 0xFF * RANDMEMSET_MORE_00000000 - ~25% chance for 00000000 4-byte clusters * RANDMEMSET_MORE_FFFFFFFF - ~25% chance for FFFFFFFF 4-byte clusters */ void prng_randmemset_r (prng_t *prng, void *voidbuf, size_t size, prng_randmemset_flags_t flags) { uint8_t *buf = (uint8_t *)voidbuf; if ((uintptr_t)buf & 15) { /* unaligned buffer */ if (flags == 0) randmemset_internal (prng, buf, size, 0, 0); else if (flags == RANDMEMSET_MORE_00_AND_FF) randmemset_internal (prng, buf, size, RANDMEMSET_MORE_00_AND_FF, 0); else randmemset_internal (prng, buf, size, flags, 0); } else { /* aligned buffer */ if (flags == 0) randmemset_internal (prng, buf, size, 0, 1); else if (flags == RANDMEMSET_MORE_00_AND_FF) randmemset_internal (prng, buf, size, RANDMEMSET_MORE_00_AND_FF, 1); else randmemset_internal (prng, buf, size, flags, 1); } } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/utils/utils-prng.h0000664000175000017500000001366614712446423017526 0ustar00mattst88mattst88/* * Copyright Âİ 2012 Siarhei Siamashka * * Based on the public domain implementation of small noncryptographic PRNG * authored by Bob Jenkins: http://burtleburtle.net/bob/rand/smallprng.html * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __UTILS_PRNG_H__ #define __UTILS_PRNG_H__ /* * This file provides a fast SIMD-optimized noncryptographic PRNG (pseudorandom * number generator), with the output good enough to pass "Big Crush" tests * from TestU01 (http://en.wikipedia.org/wiki/TestU01). * * SIMD code uses http://gcc.gnu.org/onlinedocs/gcc/Vector-Extensions.html * which is a GCC specific extension. There is also a slower alternative * code path, which should work with any C compiler. * * The "prng_t" structure keeps the internal state of the random number * generator. It is possible to have multiple instances of the random number * generator active at the same time, in this case each of them needs to have * its own "prng_t". All the functions take a pointer to "prng_t" * as the first argument. * * Functions: * * ---------------------------------------------------------------------------- * void prng_srand_r (prng_t *prng, uint32_t seed); * * Initialize the pseudorandom number generator. The sequence of preudorandom * numbers is deterministic and only depends on "seed". Any two generators * initialized with the same seed will produce exactly the same sequence. * * ---------------------------------------------------------------------------- * uint32_t prng_rand_r (prng_t *prng); * * Generate a single uniformly distributed 32-bit pseudorandom value. * * ---------------------------------------------------------------------------- * void prng_randmemset_r (prng_t *prng, * void *buffer, * size_t size, * prng_randmemset_flags_t flags); * * Fills the memory buffer "buffer" with "size" bytes of pseudorandom data. * The "flags" argument may be used to tweak some statistics properties: * RANDMEMSET_MORE_00 - set ~25% of bytes to 0x00 * RANDMEMSET_MORE_FF - set ~25% of bytes to 0xFF * The flags can be combined. This allows a bit better simulation of typical * pixel data, which normally contains a lot of fully transparent or fully * opaque pixels. */ #ifdef HAVE_CONFIG_H #include #endif #include "pixman-private.h" /*****************************************************************************/ #ifdef HAVE_GCC_VECTOR_EXTENSIONS typedef uint32_t uint32x4 __attribute__ ((vector_size(16))); typedef uint8_t uint8x16 __attribute__ ((vector_size(16))); #endif typedef struct { uint32_t a, b, c, d; } smallprng_t; typedef struct { #ifdef HAVE_GCC_VECTOR_EXTENSIONS uint32x4 a, b, c, d; #else smallprng_t p1, p2, p3, p4; #endif smallprng_t p0; } prng_t; typedef union { uint8_t b[16]; uint32_t w[4]; #ifdef HAVE_GCC_VECTOR_EXTENSIONS uint8x16 vb; uint32x4 vw; #endif } prng_rand_128_data_t; /*****************************************************************************/ static force_inline uint32_t smallprng_rand_r (smallprng_t *x) { uint32_t e = x->a - ((x->b << 27) + (x->b >> (32 - 27))); x->a = x->b ^ ((x->c << 17) ^ (x->c >> (32 - 17))); x->b = x->c + x->d; x->c = x->d + e; x->d = e + x->a; return x->d; } /* Generate 4 bytes (32-bits) of random data */ static force_inline uint32_t prng_rand_r (prng_t *x) { return smallprng_rand_r (&x->p0); } /* Generate 16 bytes (128-bits) of random data */ static force_inline void prng_rand_128_r (prng_t *x, prng_rand_128_data_t *data) { #ifdef HAVE_GCC_VECTOR_EXTENSIONS uint32x4 e = x->a - ((x->b << 27) + (x->b >> (32 - 27))); x->a = x->b ^ ((x->c << 17) ^ (x->c >> (32 - 17))); x->b = x->c + x->d; x->c = x->d + e; x->d = e + x->a; data->vw = x->d; #else data->w[0] = smallprng_rand_r (&x->p1); data->w[1] = smallprng_rand_r (&x->p2); data->w[2] = smallprng_rand_r (&x->p3); data->w[3] = smallprng_rand_r (&x->p4); #endif } typedef enum { RANDMEMSET_MORE_00 = 1, /* ~25% chance for 0x00 bytes */ RANDMEMSET_MORE_FF = 2, /* ~25% chance for 0xFF bytes */ RANDMEMSET_MORE_00000000 = 4, /* ~25% chance for 0x00000000 clusters */ RANDMEMSET_MORE_FFFFFFFF = 8, /* ~25% chance for 0xFFFFFFFF clusters */ RANDMEMSET_MORE_00_AND_FF = (RANDMEMSET_MORE_00 | RANDMEMSET_MORE_00000000 | RANDMEMSET_MORE_FF | RANDMEMSET_MORE_FFFFFFFF) } prng_randmemset_flags_t; /* Set the 32-bit seed for PRNG */ void prng_srand_r (prng_t *prng, uint32_t seed); /* Fill memory buffer with random data */ void prng_randmemset_r (prng_t *prng, void *buffer, size_t size, prng_randmemset_flags_t flags); #endif ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/utils/utils.c0000664000175000017500000015411614712446423016551 0ustar00mattst88mattst88#define _GNU_SOURCE #include "utils.h" #include #include #include #include #include #include #ifdef HAVE_GETTIMEOFDAY #include #else #include #endif #ifdef HAVE_UNISTD_H #include #endif #ifdef HAVE_SYS_MMAN_H #include #endif #ifdef HAVE_FENV_H #include #endif #ifdef HAVE_LIBPNG #include #endif #define ROUND_UP(x, mult) (((x) + (mult) - 1) / (mult) * (mult)) /* Random number generator state */ prng_t prng_state_data = {0}; prng_t *prng_state = NULL; /*----------------------------------------------------------------------------*\ * CRC-32 version 2.0.0 by Craig Bruce, 2006-04-29. * * This program generates the CRC-32 values for the files named in the * command-line arguments. These are the same CRC-32 values used by GZIP, * PKZIP, and ZMODEM. The Crc32_ComputeBuf () can also be detached and * used independently. * * THIS PROGRAM IS PUBLIC-DOMAIN SOFTWARE. * * Based on the byte-oriented implementation "File Verification Using CRC" * by Mark R. Nelson in Dr. Dobb's Journal, May 1992, pp. 64-67. * * v1.0.0: original release. * v1.0.1: fixed printf formats. * v1.0.2: fixed something else. * v1.0.3: replaced CRC constant table by generator function. * v1.0.4: reformatted code, made ANSI C. 1994-12-05. * v2.0.0: rewrote to use memory buffer & static table, 2006-04-29. \*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*\ * NAME: * Crc32_ComputeBuf () - computes the CRC-32 value of a memory buffer * DESCRIPTION: * Computes or accumulates the CRC-32 value for a memory buffer. * The 'inCrc32' gives a previously accumulated CRC-32 value to allow * a CRC to be generated for multiple sequential buffer-fuls of data. * The 'inCrc32' for the first buffer must be zero. * ARGUMENTS: * inCrc32 - accumulated CRC-32 value, must be 0 on first call * buf - buffer to compute CRC-32 value for * bufLen - number of bytes in buffer * RETURNS: * crc32 - computed CRC-32 value * ERRORS: * (no errors are possible) \*----------------------------------------------------------------------------*/ uint32_t compute_crc32 (uint32_t in_crc32, const void *buf, size_t buf_len) { static const uint32_t crc_table[256] = { 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3, 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5, 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F, 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433, 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C, 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F, 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD, 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7, 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B, 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79, 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D, 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E, 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB, 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF, 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94, 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D }; uint32_t crc32; unsigned char * byte_buf; size_t i; /* accumulate crc32 for buffer */ crc32 = in_crc32 ^ 0xFFFFFFFF; byte_buf = (unsigned char*) buf; for (i = 0; i < buf_len; i++) crc32 = (crc32 >> 8) ^ crc_table[(crc32 ^ byte_buf[i]) & 0xFF]; return (crc32 ^ 0xFFFFFFFF); } static uint32_t compute_crc32_for_image_internal (uint32_t crc32, pixman_image_t *img, pixman_bool_t remove_alpha, pixman_bool_t remove_rgb) { pixman_format_code_t fmt = pixman_image_get_format (img); uint32_t *data = pixman_image_get_data (img); int stride = pixman_image_get_stride (img); int height = pixman_image_get_height (img); uint32_t mask = 0xffffffff; int i; if (stride < 0) { data += (stride / 4) * (height - 1); stride = - stride; } /* mask unused 'x' part */ if (PIXMAN_FORMAT_BPP (fmt) - PIXMAN_FORMAT_DEPTH (fmt) && PIXMAN_FORMAT_DEPTH (fmt) != 0) { uint32_t m = (1 << PIXMAN_FORMAT_DEPTH (fmt)) - 1; if (PIXMAN_FORMAT_TYPE (fmt) == PIXMAN_TYPE_BGRA || PIXMAN_FORMAT_TYPE (fmt) == PIXMAN_TYPE_RGBA) { m <<= (PIXMAN_FORMAT_BPP (fmt) - PIXMAN_FORMAT_DEPTH (fmt)); } mask &= m; } /* mask alpha channel */ if (remove_alpha && PIXMAN_FORMAT_A (fmt)) { uint32_t m; if (PIXMAN_FORMAT_BPP (fmt) == 32) m = 0xffffffff; else m = (1 << PIXMAN_FORMAT_BPP (fmt)) - 1; m >>= PIXMAN_FORMAT_A (fmt); if (PIXMAN_FORMAT_TYPE (fmt) == PIXMAN_TYPE_BGRA || PIXMAN_FORMAT_TYPE (fmt) == PIXMAN_TYPE_RGBA || PIXMAN_FORMAT_TYPE (fmt) == PIXMAN_TYPE_A) { /* Alpha is at the bottom of the pixel */ m <<= PIXMAN_FORMAT_A (fmt); } mask &= m; } /* mask rgb channels */ if (remove_rgb && PIXMAN_FORMAT_RGB (fmt)) { uint32_t m = ((uint32_t)~0) >> (32 - PIXMAN_FORMAT_BPP (fmt)); uint32_t size = PIXMAN_FORMAT_R (fmt) + PIXMAN_FORMAT_G (fmt) + PIXMAN_FORMAT_B (fmt); m &= ~((1 << size) - 1); if (PIXMAN_FORMAT_TYPE (fmt) == PIXMAN_TYPE_BGRA || PIXMAN_FORMAT_TYPE (fmt) == PIXMAN_TYPE_RGBA) { /* RGB channels are at the top of the pixel */ m >>= size; } mask &= m; } for (i = 0; i * PIXMAN_FORMAT_BPP (fmt) < 32; i++) mask |= mask << (i * PIXMAN_FORMAT_BPP (fmt)); for (i = 0; i < stride * height / 4; i++) data[i] &= mask; /* swap endiannes in order to provide identical results on both big * and litte endian systems */ image_endian_swap (img); return compute_crc32 (crc32, data, stride * height); } uint32_t compute_crc32_for_image (uint32_t crc32, pixman_image_t *img) { if (img->common.alpha_map) { crc32 = compute_crc32_for_image_internal (crc32, img, TRUE, FALSE); crc32 = compute_crc32_for_image_internal ( crc32, (pixman_image_t *)img->common.alpha_map, FALSE, TRUE); } else { crc32 = compute_crc32_for_image_internal (crc32, img, FALSE, FALSE); } return crc32; } void print_image (pixman_image_t *image) { int i, j; int width, height, stride; pixman_format_code_t format; uint8_t *buffer; int s; width = pixman_image_get_width (image); height = pixman_image_get_height (image); stride = pixman_image_get_stride (image); format = pixman_image_get_format (image); buffer = (uint8_t *)pixman_image_get_data (image); s = (stride >= 0)? stride : - stride; printf ("---\n"); for (i = 0; i < height; i++) { for (j = 0; j < s; j++) { if (j == (width * PIXMAN_FORMAT_BPP (format) + 7) / 8) printf ("| "); printf ("%02X ", *((uint8_t *)buffer + i * stride + j)); } printf ("\n"); } printf ("---\n"); } /* perform endian conversion of pixel data */ void image_endian_swap (pixman_image_t *img) { int stride = pixman_image_get_stride (img); uint32_t *data = pixman_image_get_data (img); int height = pixman_image_get_height (img); int bpp = PIXMAN_FORMAT_BPP (pixman_image_get_format (img)); int i, j; /* swap bytes only on big endian systems */ if (is_little_endian()) return; if (bpp == 8) return; for (i = 0; i < height; i++) { uint8_t *line_data = (uint8_t *)data + stride * i; int s = (stride >= 0)? stride : - stride; switch (bpp) { case 1: for (j = 0; j < s; j++) { line_data[j] = ((line_data[j] & 0x80) >> 7) | ((line_data[j] & 0x40) >> 5) | ((line_data[j] & 0x20) >> 3) | ((line_data[j] & 0x10) >> 1) | ((line_data[j] & 0x08) << 1) | ((line_data[j] & 0x04) << 3) | ((line_data[j] & 0x02) << 5) | ((line_data[j] & 0x01) << 7); } break; case 4: for (j = 0; j < s; j++) { line_data[j] = (line_data[j] >> 4) | (line_data[j] << 4); } break; case 16: for (j = 0; j + 2 <= s; j += 2) { char t1 = line_data[j + 0]; char t2 = line_data[j + 1]; line_data[j + 1] = t1; line_data[j + 0] = t2; } break; case 24: for (j = 0; j + 3 <= s; j += 3) { char t1 = line_data[j + 0]; char t2 = line_data[j + 1]; char t3 = line_data[j + 2]; line_data[j + 2] = t1; line_data[j + 1] = t2; line_data[j + 0] = t3; } break; case 32: for (j = 0; j + 4 <= s; j += 4) { char t1 = line_data[j + 0]; char t2 = line_data[j + 1]; char t3 = line_data[j + 2]; char t4 = line_data[j + 3]; line_data[j + 3] = t1; line_data[j + 2] = t2; line_data[j + 1] = t3; line_data[j + 0] = t4; } break; default: assert (FALSE); break; } } } #define N_LEADING_PROTECTED 10 #define N_TRAILING_PROTECTED 10 typedef struct { void *addr; uint32_t len; uint8_t *trailing; int n_bytes; } info_t; #if FENCE_MALLOC_ACTIVE unsigned long fence_get_page_size () { /* You can fake a page size here, if you want to test e.g. 64 kB * pages on a 4 kB page system. Just put a multiplier below. */ return getpagesize (); } /* This is apparently necessary on at least OS X */ #ifndef MAP_ANONYMOUS #define MAP_ANONYMOUS MAP_ANON #endif void * fence_malloc (int64_t len) { unsigned long page_size = fence_get_page_size (); unsigned long page_mask = page_size - 1; uint32_t n_payload_bytes = (len + page_mask) & ~page_mask; uint32_t n_bytes = (page_size * (N_LEADING_PROTECTED + N_TRAILING_PROTECTED + 2) + n_payload_bytes) & ~page_mask; uint8_t *initial_page; uint8_t *leading_protected; uint8_t *trailing_protected; uint8_t *payload; uint8_t *addr; if (len < 0) abort(); addr = mmap (NULL, n_bytes, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (addr == MAP_FAILED) { printf ("mmap failed on %lld %u\n", (long long int)len, n_bytes); return NULL; } initial_page = (uint8_t *)(((uintptr_t)addr + page_mask) & ~page_mask); leading_protected = initial_page + page_size; payload = leading_protected + N_LEADING_PROTECTED * page_size; trailing_protected = payload + n_payload_bytes; ((info_t *)initial_page)->addr = addr; ((info_t *)initial_page)->len = len; ((info_t *)initial_page)->trailing = trailing_protected; ((info_t *)initial_page)->n_bytes = n_bytes; if ((mprotect (leading_protected, N_LEADING_PROTECTED * page_size, PROT_NONE) == -1) || (mprotect (trailing_protected, N_TRAILING_PROTECTED * page_size, PROT_NONE) == -1)) { munmap (addr, n_bytes); return NULL; } return payload; } void fence_free (void *data) { uint32_t page_size = fence_get_page_size (); uint8_t *payload = data; uint8_t *leading_protected = payload - N_LEADING_PROTECTED * page_size; uint8_t *initial_page = leading_protected - page_size; info_t *info = (info_t *)initial_page; munmap (info->addr, info->n_bytes); } static void fence_image_destroy (pixman_image_t *image, void *data) { fence_free (data); } /* Create an image with fence pages. * * Creates an image, where the data area is allocated with fence_malloc (). * Each row has an additional page in the stride. * * min_width is only a minimum width for the image. The width is aligned up * for the row size to be divisible by both page size and pixel size. * * If stride_fence is true, the additional page on each row will be * armed to cause SIGSEGV or SIGBUS on all accesses. This should catch * all accesses outside the valid row pixels. */ pixman_image_t * fence_image_create_bits (pixman_format_code_t format, int min_width, int height, pixman_bool_t stride_fence) { unsigned page_size = fence_get_page_size (); unsigned page_mask = page_size - 1; unsigned bitspp = PIXMAN_FORMAT_BPP (format); unsigned bits_boundary; unsigned row_bits; int width; /* pixels */ unsigned stride; /* bytes */ void *pixels; pixman_image_t *image; int i; /* must be power of two */ assert (page_size && (page_size & page_mask) == 0); if (bitspp < 1 || min_width < 1 || height < 1) abort (); /* least common multiple between page size * 8 and bitspp */ bits_boundary = bitspp; while (! (bits_boundary & 1)) bits_boundary >>= 1; bits_boundary *= page_size * 8; /* round up to bits_boundary */ row_bits = ROUND_UP ( (unsigned)min_width * bitspp, bits_boundary); width = row_bits / bitspp; stride = row_bits / 8; if (stride_fence) stride += page_size; /* add fence page */ if (UINT_MAX / stride < (unsigned)height) abort (); pixels = fence_malloc (stride * (unsigned)height); if (!pixels) return NULL; if (stride_fence) { uint8_t *guard = (uint8_t *)pixels + stride - page_size; /* arm row end fence pages */ for (i = 0; i < height; i++) { if (mprotect (guard + i * stride, page_size, PROT_NONE) == -1) goto out_fail; } } assert (width >= min_width); image = pixman_image_create_bits_no_clear (format, width, height, pixels, stride); if (!image) goto out_fail; pixman_image_set_destroy_function (image, fence_image_destroy, pixels); return image; out_fail: fence_free (pixels); return NULL; } #else /* FENCE_MALLOC_ACTIVE */ void * fence_malloc (int64_t len) { return malloc (len); } void fence_free (void *data) { free (data); } pixman_image_t * fence_image_create_bits (pixman_format_code_t format, int min_width, int height, pixman_bool_t stride_fence) { return pixman_image_create_bits (format, min_width, height, NULL, 0); /* Implicitly allocated storage does not need a destroy function * to get freed on refcount hitting zero. */ } unsigned long fence_get_page_size () { return 0; } #endif /* FENCE_MALLOC_ACTIVE */ uint8_t * make_random_bytes (int n_bytes) { uint8_t *bytes = fence_malloc (n_bytes); if (!bytes) return NULL; prng_randmemset (bytes, n_bytes, 0); return bytes; } float * make_random_floats (int n_bytes) { uint8_t *bytes = fence_malloc (n_bytes); float *vals = (float *)bytes; if (!bytes) return 0; for (n_bytes /= 4; n_bytes; vals++, n_bytes--) *vals = (float)rand() / (float)RAND_MAX; return (float *)bytes; } void a8r8g8b8_to_rgba_np (uint32_t *dst, uint32_t *src, int n_pixels) { uint8_t *dst8 = (uint8_t *)dst; int i; for (i = 0; i < n_pixels; ++i) { uint32_t p = src[i]; uint8_t a, r, g, b; a = (p & 0xff000000) >> 24; r = (p & 0x00ff0000) >> 16; g = (p & 0x0000ff00) >> 8; b = (p & 0x000000ff) >> 0; if (a != 0) { #define DIVIDE(c, a) \ do \ { \ int t = ((c) * 255) / a; \ (c) = t < 0? 0 : t > 255? 255 : t; \ } while (0) DIVIDE (r, a); DIVIDE (g, a); DIVIDE (b, a); } *dst8++ = r; *dst8++ = g; *dst8++ = b; *dst8++ = a; } } #ifdef HAVE_LIBPNG pixman_bool_t write_png (pixman_image_t *image, const char *filename) { int width = pixman_image_get_width (image); int height = pixman_image_get_height (image); int stride = width * 4; uint32_t *data = malloc (height * stride); pixman_image_t *copy; png_struct *write_struct; png_info *info_struct; pixman_bool_t result = FALSE; FILE *f = fopen (filename, "wb"); png_bytep *row_pointers; int i; if (!f) return FALSE; row_pointers = malloc (height * sizeof (png_bytep)); copy = pixman_image_create_bits ( PIXMAN_a8r8g8b8, width, height, data, stride); pixman_image_composite32 ( PIXMAN_OP_SRC, image, NULL, copy, 0, 0, 0, 0, 0, 0, width, height); a8r8g8b8_to_rgba_np (data, data, height * width); for (i = 0; i < height; ++i) row_pointers[i] = (png_bytep)(data + i * width); if (!(write_struct = png_create_write_struct ( PNG_LIBPNG_VER_STRING, NULL, NULL, NULL))) goto out1; if (!(info_struct = png_create_info_struct (write_struct))) goto out2; png_init_io (write_struct, f); png_set_IHDR (write_struct, info_struct, width, height, 8, PNG_COLOR_TYPE_RGB_ALPHA, PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE); png_write_info (write_struct, info_struct); png_write_image (write_struct, row_pointers); png_write_end (write_struct, NULL); result = TRUE; out2: png_destroy_write_struct (&write_struct, &info_struct); out1: if (fclose (f) != 0) result = FALSE; pixman_image_unref (copy); free (row_pointers); free (data); return result; } #else /* no libpng */ pixman_bool_t write_png (pixman_image_t *image, const char *filename) { return FALSE; } #endif static void color8_to_color16 (uint32_t color8, pixman_color_t *color16) { color16->alpha = ((color8 & 0xff000000) >> 24); color16->red = ((color8 & 0x00ff0000) >> 16); color16->green = ((color8 & 0x0000ff00) >> 8); color16->blue = ((color8 & 0x000000ff) >> 0); color16->alpha |= color16->alpha << 8; color16->red |= color16->red << 8; color16->blue |= color16->blue << 8; color16->green |= color16->green << 8; } void draw_checkerboard (pixman_image_t *image, int check_size, uint32_t color1, uint32_t color2) { pixman_color_t check1, check2; pixman_image_t *c1, *c2; int n_checks_x, n_checks_y; int i, j; color8_to_color16 (color1, &check1); color8_to_color16 (color2, &check2); c1 = pixman_image_create_solid_fill (&check1); c2 = pixman_image_create_solid_fill (&check2); n_checks_x = ( pixman_image_get_width (image) + check_size - 1) / check_size; n_checks_y = ( pixman_image_get_height (image) + check_size - 1) / check_size; for (j = 0; j < n_checks_y; j++) { for (i = 0; i < n_checks_x; i++) { pixman_image_t *src; if (((i ^ j) & 1)) src = c1; else src = c2; pixman_image_composite32 (PIXMAN_OP_SRC, src, NULL, image, 0, 0, 0, 0, i * check_size, j * check_size, check_size, check_size); } } } static uint32_t call_test_function (uint32_t (*test_function)(int testnum, int verbose), int testnum, int verbose) { uint32_t retval; #if defined (__GNUC__) && defined (_WIN32) && (defined (__i386) || defined (__i386__)) __asm__ ( /* Deliberately avoid aligning the stack to 16 bytes */ "pushl %1\n\t" "pushl %2\n\t" "call *%3\n\t" "addl $8, %%esp\n\t" : "=a" (retval) : "r" (verbose), "r" (testnum), "r" (test_function) : "edx", "ecx"); /* caller save registers */ #else retval = test_function (testnum, verbose); #endif return retval; } /* * A function, which can be used as a core part of the test programs, * intended to detect various problems with the help of fuzzing input * to pixman API (according to some templates, aka "smart" fuzzing). * Some general information about such testing can be found here: * http://en.wikipedia.org/wiki/Fuzz_testing * * It may help detecting: * - crashes on bad handling of valid or reasonably invalid input to * pixman API. * - deviations from the behavior of older pixman releases. * - deviations from the behavior of the same pixman release, but * configured in a different way (for example with SIMD optimizations * disabled), or running on a different OS or hardware. * * The test is performed by calling a callback function a huge number * of times. The callback function is expected to run some snippet of * pixman code with pseudorandom variations to the data feeded to * pixman API. A result of running each callback function should be * some deterministic value which depends on test number (test number * can be used as a seed for PRNG). When 'verbose' argument is nonzero, * callback function is expected to print to stdout some information * about what it does. * * Return values from many small tests are accumulated together and * used as final checksum, which can be compared to some expected * value. Running the tests not individually, but in a batch helps * to reduce process start overhead and also allows to parallelize * testing and utilize multiple CPU cores. * * The resulting executable can be run without any arguments. In * this case it runs a batch of tests starting from 1 and up to * 'default_number_of_iterations'. The resulting checksum is * compared with 'expected_checksum' and FAIL or PASS verdict * depends on the result of this comparison. * * If the executable is run with 2 numbers provided as command line * arguments, they specify the starting and ending numbers for a test * batch. * * If the executable is run with only one number provided as a command * line argument, then this number is used to call the callback function * once, and also with verbose flag set. */ int fuzzer_test_main (const char *test_name, int default_number_of_iterations, uint32_t expected_checksum, uint32_t (*test_function)(int testnum, int verbose), int argc, const char *argv[]) { int i, n1 = 1, n2 = 0; uint32_t checksum = 0; int verbose = getenv ("VERBOSE") != NULL; if (argc >= 3) { n1 = atoi (argv[1]); n2 = atoi (argv[2]); if (n2 < n1) { printf ("invalid test range\n"); return 1; } } else if (argc >= 2) { n2 = atoi (argv[1]); checksum = call_test_function (test_function, n2, 1); printf ("%d: checksum=%08X\n", n2, checksum); return 0; } else { n1 = 1; n2 = default_number_of_iterations; } #ifdef USE_OPENMP #pragma omp parallel for reduction(+:checksum) default(none) \ shared(n1, n2, test_function, verbose) #endif for (i = n1; i <= n2; i++) { uint32_t crc = call_test_function (test_function, i, 0); if (verbose) printf ("%d: %08X\n", i, crc); checksum += crc; } if (n1 == 1 && n2 == default_number_of_iterations) { if (checksum == expected_checksum) { printf ("%s test passed (checksum=%08X)\n", test_name, checksum); } else { printf ("%s test failed! (checksum=%08X, expected %08X)\n", test_name, checksum, expected_checksum); return 1; } } else { printf ("%d-%d: checksum=%08X\n", n1, n2, checksum); } return 0; } /* Try to obtain current time in seconds */ double gettime (void) { #ifdef HAVE_GETTIMEOFDAY struct timeval tv; gettimeofday (&tv, NULL); return (double)((int64_t)tv.tv_sec * 1000000 + tv.tv_usec) / 1000000.; #else return (double)clock() / (double)CLOCKS_PER_SEC; #endif } uint32_t get_random_seed (void) { union { double d; uint32_t u32; } t; t.d = gettime(); prng_srand (t.u32); return prng_rand (); } #ifdef HAVE_SIGACTION #ifdef HAVE_ALARM static const char *global_msg; static void on_alarm (int signo) { printf ("%s\n", global_msg); exit (1); } #endif #endif void fail_after (int seconds, const char *msg) { #ifdef HAVE_SIGACTION #ifdef HAVE_ALARM struct sigaction action; global_msg = msg; memset (&action, 0, sizeof (action)); action.sa_handler = on_alarm; alarm (seconds); sigaction (SIGALRM, &action, NULL); #endif #endif } void enable_divbyzero_exceptions (void) { #ifdef HAVE_FENV_H #ifdef HAVE_FEENABLEEXCEPT #ifdef HAVE_FEDIVBYZERO feenableexcept (FE_DIVBYZERO); #endif #endif #endif } void enable_invalid_exceptions (void) { #ifdef HAVE_FENV_H #ifdef HAVE_FEENABLEEXCEPT #ifdef FE_INVALID feenableexcept (FE_INVALID); #endif #endif #endif } void * aligned_malloc (size_t align, size_t size) { void *result; #ifdef HAVE_POSIX_MEMALIGN if (posix_memalign (&result, align, size) != 0) result = NULL; #else result = malloc (size); #endif return result; } #define CONVERT_15(c, is_rgb) \ (is_rgb? \ ((((c) >> 3) & 0x001f) | \ (((c) >> 6) & 0x03e0) | \ (((c) >> 9) & 0x7c00)) : \ (((((c) >> 16) & 0xff) * 153 + \ (((c) >> 8) & 0xff) * 301 + \ (((c) ) & 0xff) * 58) >> 2)) double convert_srgb_to_linear (double c) { if (c <= 0.04045) return c / 12.92; else return pow ((c + 0.055) / 1.055, 2.4); } double convert_linear_to_srgb (double c) { if (c <= 0.0031308) return c * 12.92; else return 1.055 * pow (c, 1.0/2.4) - 0.055; } void initialize_palette (pixman_indexed_t *palette, uint32_t depth, int is_rgb) { int i; uint32_t mask = (1 << depth) - 1; for (i = 0; i < 32768; ++i) palette->ent[i] = prng_rand() & mask; memset (palette->rgba, 0, sizeof (palette->rgba)); for (i = 0; i < mask + 1; ++i) { uint32_t rgba24; pixman_bool_t retry; uint32_t i15; /* We filled the rgb->index map with random numbers, but we * do need the ability to round trip, that is if some indexed * color expands to an argb24, then the 15 bit version of that * color must map back to the index. Anything else, we don't * care about too much. */ do { uint32_t old_idx; rgba24 = prng_rand(); i15 = CONVERT_15 (rgba24, is_rgb); old_idx = palette->ent[i15]; if (CONVERT_15 (palette->rgba[old_idx], is_rgb) == i15) retry = 1; else retry = 0; } while (retry); palette->rgba[i] = rgba24; palette->ent[i15] = i; } for (i = 0; i < mask + 1; ++i) { assert (palette->ent[CONVERT_15 (palette->rgba[i], is_rgb)] == i); } } struct operator_entry { pixman_op_t op; const char *name; pixman_bool_t is_alias; }; typedef struct operator_entry operator_entry_t; static const operator_entry_t op_list[] = { #define ENTRY(op) \ { PIXMAN_OP_##op, "PIXMAN_OP_" #op, FALSE } #define ALIAS(op, nam) \ { PIXMAN_OP_##op, nam, TRUE } /* operator_name () will return the first hit in this table, * so keep the list properly ordered between entries and aliases. * Aliases are not listed by list_operators (). */ ENTRY (CLEAR), ENTRY (SRC), ENTRY (DST), ENTRY (OVER), ENTRY (OVER_REVERSE), ALIAS (OVER_REVERSE, "overrev"), ENTRY (IN), ENTRY (IN_REVERSE), ALIAS (IN_REVERSE, "inrev"), ENTRY (OUT), ENTRY (OUT_REVERSE), ALIAS (OUT_REVERSE, "outrev"), ENTRY (ATOP), ENTRY (ATOP_REVERSE), ALIAS (ATOP_REVERSE, "atoprev"), ENTRY (XOR), ENTRY (ADD), ENTRY (SATURATE), ENTRY (DISJOINT_CLEAR), ENTRY (DISJOINT_SRC), ENTRY (DISJOINT_DST), ENTRY (DISJOINT_OVER), ENTRY (DISJOINT_OVER_REVERSE), ENTRY (DISJOINT_IN), ENTRY (DISJOINT_IN_REVERSE), ENTRY (DISJOINT_OUT), ENTRY (DISJOINT_OUT_REVERSE), ENTRY (DISJOINT_ATOP), ENTRY (DISJOINT_ATOP_REVERSE), ENTRY (DISJOINT_XOR), ENTRY (CONJOINT_CLEAR), ENTRY (CONJOINT_SRC), ENTRY (CONJOINT_DST), ENTRY (CONJOINT_OVER), ENTRY (CONJOINT_OVER_REVERSE), ENTRY (CONJOINT_IN), ENTRY (CONJOINT_IN_REVERSE), ENTRY (CONJOINT_OUT), ENTRY (CONJOINT_OUT_REVERSE), ENTRY (CONJOINT_ATOP), ENTRY (CONJOINT_ATOP_REVERSE), ENTRY (CONJOINT_XOR), ENTRY (MULTIPLY), ENTRY (SCREEN), ENTRY (OVERLAY), ENTRY (DARKEN), ENTRY (LIGHTEN), ENTRY (COLOR_DODGE), ENTRY (COLOR_BURN), ENTRY (HARD_LIGHT), ENTRY (SOFT_LIGHT), ENTRY (DIFFERENCE), ENTRY (EXCLUSION), ENTRY (HSL_HUE), ENTRY (HSL_SATURATION), ENTRY (HSL_COLOR), ENTRY (HSL_LUMINOSITY), ALIAS (NONE, "") #undef ENTRY #undef ALIAS }; typedef struct { pixman_dither_t dither; const char *name; pixman_bool_t is_alias; } dither_entry_t; static const dither_entry_t dither_list[] = { #define ENTRY(dither) \ { PIXMAN_DITHER_##dither, "PIXMAN_DITHER_" #dither, FALSE } #define ALIAS(dither, nam) \ { PIXMAN_DITHER_##dither, nam, TRUE } /* dither_name () will return the first hit in this table, * so keep the list properly ordered between entries and aliases. * Aliases are not listed by list_dithers (). */ ENTRY (ORDERED_BAYER_8), ENTRY (ORDERED_BLUE_NOISE_64), ENTRY (NONE), #undef ENTRY #undef ALIAS }; struct format_entry { pixman_format_code_t format; const char *name; pixman_bool_t is_alias; }; typedef struct format_entry format_entry_t; static const format_entry_t format_list[] = { #define ENTRY(f) \ { PIXMAN_##f, #f, FALSE } #define ALIAS(f, nam) \ { PIXMAN_##f, nam, TRUE } /* format_name () will return the first hit in this table, * so keep the list properly ordered between entries and aliases. * Aliases are not listed by list_formats (). */ /* 128bpp formats */ ENTRY (rgba_float), /* 96bpp formats */ ENTRY (rgb_float), /* 32bpp formats */ ENTRY (a8r8g8b8), ALIAS (a8r8g8b8, "8888"), ENTRY (x8r8g8b8), ALIAS (x8r8g8b8, "x888"), ENTRY (a8b8g8r8), ENTRY (x8b8g8r8), ENTRY (b8g8r8a8), ENTRY (b8g8r8x8), ENTRY (r8g8b8a8), ENTRY (r8g8b8x8), ENTRY (x14r6g6b6), ENTRY (x2r10g10b10), ALIAS (x2r10g10b10, "2x10"), ENTRY (a2r10g10b10), ALIAS (a2r10g10b10, "2a10"), ENTRY (x2b10g10r10), ENTRY (a2b10g10r10), /* sRGB formats */ ENTRY (a8r8g8b8_sRGB), ENTRY (r8g8b8_sRGB), /* 24bpp formats */ ENTRY (r8g8b8), ALIAS (r8g8b8, "0888"), ENTRY (b8g8r8), /* 16 bpp formats */ ENTRY (r5g6b5), ALIAS (r5g6b5, "0565"), ENTRY (b5g6r5), ENTRY (a1r5g5b5), ALIAS (a1r5g5b5, "1555"), ENTRY (x1r5g5b5), ENTRY (a1b5g5r5), ENTRY (x1b5g5r5), ENTRY (a4r4g4b4), ALIAS (a4r4g4b4, "4444"), ENTRY (x4r4g4b4), ENTRY (a4b4g4r4), ENTRY (x4b4g4r4), /* 8bpp formats */ ENTRY (a8), ALIAS (a8, "8"), ENTRY (r3g3b2), ENTRY (b2g3r3), ENTRY (a2r2g2b2), ALIAS (a2r2g2b2, "2222"), ENTRY (a2b2g2r2), ALIAS (c8, "x4c4 / c8"), /* ENTRY (c8), */ ALIAS (g8, "x4g4 / g8"), /* ENTRY (g8), */ ENTRY (x4a4), /* These format codes are identical to c8 and g8, respectively. */ /* ENTRY (x4c4), */ /* ENTRY (x4g4), */ /* 4 bpp formats */ ENTRY (a4), ENTRY (r1g2b1), ENTRY (b1g2r1), ENTRY (a1r1g1b1), ENTRY (a1b1g1r1), ALIAS (c4, "c4"), /* ENTRY (c4), */ ALIAS (g4, "g4"), /* ENTRY (g4), */ /* 1bpp formats */ ENTRY (a1), ALIAS (g1, "g1"), /* ENTRY (g1), */ /* YUV formats */ ALIAS (yuy2, "yuy2"), /* ENTRY (yuy2), */ ALIAS (yv12, "yv12"), /* ENTRY (yv12), */ /* Fake formats, not in pixman_format_code_t enum */ ALIAS (null, "null"), ALIAS (solid, "solid"), ALIAS (solid, "n"), ALIAS (pixbuf, "pixbuf"), ALIAS (rpixbuf, "rpixbuf"), ALIAS (unknown, "unknown"), #undef ENTRY #undef ALIAS }; pixman_format_code_t format_from_string (const char *s) { int i; for (i = 0; i < ARRAY_LENGTH (format_list); ++i) { const format_entry_t *ent = &format_list[i]; if (strcasecmp (ent->name, s) == 0) return ent->format; } return PIXMAN_null; } static void emit (const char *s, int *n_chars) { *n_chars += printf ("%s,", s); if (*n_chars > 60) { printf ("\n "); *n_chars = 0; } else { printf (" "); (*n_chars)++; } } void list_formats (void) { int n_chars; int i; printf ("Formats:\n "); n_chars = 0; for (i = 0; i < ARRAY_LENGTH (format_list); ++i) { const format_entry_t *ent = &format_list[i]; if (ent->is_alias) continue; emit (ent->name, &n_chars); } printf ("\n\n"); } void list_operators (void) { char short_name [128] = { 0 }; int i, n_chars; printf ("Operators:\n "); n_chars = 0; for (i = 0; i < ARRAY_LENGTH (op_list); ++i) { const operator_entry_t *ent = &op_list[i]; int j; if (ent->is_alias) continue; snprintf (short_name, sizeof (short_name) - 1, "%s", ent->name + strlen ("PIXMAN_OP_")); for (j = 0; short_name[j] != '\0'; ++j) short_name[j] = tolower (short_name[j]); emit (short_name, &n_chars); } printf ("\n\n"); } void list_dithers (void) { int n_chars; int i; printf ("Dithers:\n "); n_chars = 0; for (i = 0; i < ARRAY_LENGTH (dither_list); ++i) { const dither_entry_t *ent = &dither_list[i]; if (ent->is_alias) continue; emit (ent->name, &n_chars); } printf ("\n\n"); } pixman_op_t operator_from_string (const char *s) { int i; for (i = 0; i < ARRAY_LENGTH (op_list); ++i) { const operator_entry_t *ent = &op_list[i]; if (ent->is_alias) { if (strcasecmp (ent->name, s) == 0) return ent->op; } else { if (strcasecmp (ent->name + strlen ("PIXMAN_OP_"), s) == 0) return ent->op; } } return PIXMAN_OP_NONE; } pixman_dither_t dither_from_string (const char *s) { int i; for (i = 0; i < ARRAY_LENGTH (dither_list); ++i) { const dither_entry_t *ent = &dither_list[i]; if (strcasecmp (ent->name, s) == 0) return ent->dither; } return PIXMAN_DITHER_NONE; } const char * operator_name (pixman_op_t op) { int i; for (i = 0; i < ARRAY_LENGTH (op_list); ++i) { const operator_entry_t *ent = &op_list[i]; if (ent->op == op) return ent->name; } return ""; } const char * format_name (pixman_format_code_t format) { int i; for (i = 0; i < ARRAY_LENGTH (format_list); ++i) { const format_entry_t *ent = &format_list[i]; if (ent->format == format) return ent->name; } return ""; }; const char * dither_name (pixman_dither_t dither) { int i; for (i = 0; i < ARRAY_LENGTH (dither_list); ++i) { const dither_entry_t *ent = &dither_list[i]; if (ent->dither == dither) return ent->name; } return ""; } #define IS_ZERO(f) (-DBL_MIN < (f) && (f) < DBL_MIN) typedef double (* blend_func_t) (double as, double s, double ad, double d); static force_inline double blend_multiply (double sa, double s, double da, double d) { return d * s; } static force_inline double blend_screen (double sa, double s, double da, double d) { return d * sa + s * da - s * d; } static force_inline double blend_overlay (double sa, double s, double da, double d) { if (2 * d < da) return 2 * s * d; else return sa * da - 2 * (da - d) * (sa - s); } static force_inline double blend_darken (double sa, double s, double da, double d) { s = s * da; d = d * sa; if (s > d) return d; else return s; } static force_inline double blend_lighten (double sa, double s, double da, double d) { s = s * da; d = d * sa; if (s > d) return s; else return d; } static force_inline double blend_color_dodge (double sa, double s, double da, double d) { if (IS_ZERO (d)) return 0.0f; else if (d * sa >= sa * da - s * da) return sa * da; else if (IS_ZERO (sa - s)) return sa * da; else return sa * sa * d / (sa - s); } static force_inline double blend_color_burn (double sa, double s, double da, double d) { if (d >= da) return sa * da; else if (sa * (da - d) >= s * da) return 0.0f; else if (IS_ZERO (s)) return 0.0f; else return sa * (da - sa * (da - d) / s); } static force_inline double blend_hard_light (double sa, double s, double da, double d) { if (2 * s < sa) return 2 * s * d; else return sa * da - 2 * (da - d) * (sa - s); } static force_inline double blend_soft_light (double sa, double s, double da, double d) { if (2 * s <= sa) { if (IS_ZERO (da)) return d * sa; else return d * sa - d * (da - d) * (sa - 2 * s) / da; } else { if (IS_ZERO (da)) { return d * sa; } else { if (4 * d <= da) return d * sa + (2 * s - sa) * d * ((16 * d / da - 12) * d / da + 3); else return d * sa + (sqrt (d * da) - d) * (2 * s - sa); } } } static force_inline double blend_difference (double sa, double s, double da, double d) { double dsa = d * sa; double sda = s * da; if (sda < dsa) return dsa - sda; else return sda - dsa; } static force_inline double blend_exclusion (double sa, double s, double da, double d) { return s * da + d * sa - 2 * d * s; } static double clamp (double d) { if (d > 1.0) return 1.0; else if (d < 0.0) return 0.0; else return d; } static double blend_channel (double as, double s, double ad, double d, blend_func_t blend) { return clamp ((1 - ad) * s + (1 - as) * d + blend (as, s, ad, d)); } static double calc_op (pixman_op_t op, double src, double dst, double srca, double dsta) { #define mult_chan(src, dst, Fa, Fb) MIN ((src) * (Fa) + (dst) * (Fb), 1.0) double Fa, Fb; switch (op) { case PIXMAN_OP_CLEAR: case PIXMAN_OP_DISJOINT_CLEAR: case PIXMAN_OP_CONJOINT_CLEAR: return mult_chan (src, dst, 0.0, 0.0); case PIXMAN_OP_SRC: case PIXMAN_OP_DISJOINT_SRC: case PIXMAN_OP_CONJOINT_SRC: return mult_chan (src, dst, 1.0, 0.0); case PIXMAN_OP_DST: case PIXMAN_OP_DISJOINT_DST: case PIXMAN_OP_CONJOINT_DST: return mult_chan (src, dst, 0.0, 1.0); case PIXMAN_OP_OVER: return mult_chan (src, dst, 1.0, 1.0 - srca); case PIXMAN_OP_OVER_REVERSE: return mult_chan (src, dst, 1.0 - dsta, 1.0); case PIXMAN_OP_IN: return mult_chan (src, dst, dsta, 0.0); case PIXMAN_OP_IN_REVERSE: return mult_chan (src, dst, 0.0, srca); case PIXMAN_OP_OUT: return mult_chan (src, dst, 1.0 - dsta, 0.0); case PIXMAN_OP_OUT_REVERSE: return mult_chan (src, dst, 0.0, 1.0 - srca); case PIXMAN_OP_ATOP: return mult_chan (src, dst, dsta, 1.0 - srca); case PIXMAN_OP_ATOP_REVERSE: return mult_chan (src, dst, 1.0 - dsta, srca); case PIXMAN_OP_XOR: return mult_chan (src, dst, 1.0 - dsta, 1.0 - srca); case PIXMAN_OP_ADD: return mult_chan (src, dst, 1.0, 1.0); case PIXMAN_OP_SATURATE: case PIXMAN_OP_DISJOINT_OVER_REVERSE: if (srca == 0.0) Fa = 1.0; else Fa = MIN (1.0, (1.0 - dsta) / srca); return mult_chan (src, dst, Fa, 1.0); case PIXMAN_OP_DISJOINT_OVER: if (dsta == 0.0) Fb = 1.0; else Fb = MIN (1.0, (1.0 - srca) / dsta); return mult_chan (src, dst, 1.0, Fb); case PIXMAN_OP_DISJOINT_IN: if (srca == 0.0) Fa = 0.0; else Fa = MAX (0.0, 1.0 - (1.0 - dsta) / srca); return mult_chan (src, dst, Fa, 0.0); case PIXMAN_OP_DISJOINT_IN_REVERSE: if (dsta == 0.0) Fb = 0.0; else Fb = MAX (0.0, 1.0 - (1.0 - srca) / dsta); return mult_chan (src, dst, 0.0, Fb); case PIXMAN_OP_DISJOINT_OUT: if (srca == 0.0) Fa = 1.0; else Fa = MIN (1.0, (1.0 - dsta) / srca); return mult_chan (src, dst, Fa, 0.0); case PIXMAN_OP_DISJOINT_OUT_REVERSE: if (dsta == 0.0) Fb = 1.0; else Fb = MIN (1.0, (1.0 - srca) / dsta); return mult_chan (src, dst, 0.0, Fb); case PIXMAN_OP_DISJOINT_ATOP: if (srca == 0.0) Fa = 0.0; else Fa = MAX (0.0, 1.0 - (1.0 - dsta) / srca); if (dsta == 0.0) Fb = 1.0; else Fb = MIN (1.0, (1.0 - srca) / dsta); return mult_chan (src, dst, Fa, Fb); case PIXMAN_OP_DISJOINT_ATOP_REVERSE: if (srca == 0.0) Fa = 1.0; else Fa = MIN (1.0, (1.0 - dsta) / srca); if (dsta == 0.0) Fb = 0.0; else Fb = MAX (0.0, 1.0 - (1.0 - srca) / dsta); return mult_chan (src, dst, Fa, Fb); case PIXMAN_OP_DISJOINT_XOR: if (srca == 0.0) Fa = 1.0; else Fa = MIN (1.0, (1.0 - dsta) / srca); if (dsta == 0.0) Fb = 1.0; else Fb = MIN (1.0, (1.0 - srca) / dsta); return mult_chan (src, dst, Fa, Fb); case PIXMAN_OP_CONJOINT_OVER: if (dsta == 0.0) Fb = 0.0; else Fb = MAX (0.0, 1.0 - srca / dsta); return mult_chan (src, dst, 1.0, Fb); case PIXMAN_OP_CONJOINT_OVER_REVERSE: if (srca == 0.0) Fa = 0.0; else Fa = MAX (0.0, 1.0 - dsta / srca); return mult_chan (src, dst, Fa, 1.0); case PIXMAN_OP_CONJOINT_IN: if (srca == 0.0) Fa = 1.0; else Fa = MIN (1.0, dsta / srca); return mult_chan (src, dst, Fa, 0.0); case PIXMAN_OP_CONJOINT_IN_REVERSE: if (dsta == 0.0) Fb = 1.0; else Fb = MIN (1.0, srca / dsta); return mult_chan (src, dst, 0.0, Fb); case PIXMAN_OP_CONJOINT_OUT: if (srca == 0.0) Fa = 0.0; else Fa = MAX (0.0, 1.0 - dsta / srca); return mult_chan (src, dst, Fa, 0.0); case PIXMAN_OP_CONJOINT_OUT_REVERSE: if (dsta == 0.0) Fb = 0.0; else Fb = MAX (0.0, 1.0 - srca / dsta); return mult_chan (src, dst, 0.0, Fb); case PIXMAN_OP_CONJOINT_ATOP: if (srca == 0.0) Fa = 1.0; else Fa = MIN (1.0, dsta / srca); if (dsta == 0.0) Fb = 0.0; else Fb = MAX (0.0, 1.0 - srca / dsta); return mult_chan (src, dst, Fa, Fb); case PIXMAN_OP_CONJOINT_ATOP_REVERSE: if (srca == 0.0) Fa = 0.0; else Fa = MAX (0.0, 1.0 - dsta / srca); if (dsta == 0.0) Fb = 1.0; else Fb = MIN (1.0, srca / dsta); return mult_chan (src, dst, Fa, Fb); case PIXMAN_OP_CONJOINT_XOR: if (srca == 0.0) Fa = 0.0; else Fa = MAX (0.0, 1.0 - dsta / srca); if (dsta == 0.0) Fb = 0.0; else Fb = MAX (0.0, 1.0 - srca / dsta); return mult_chan (src, dst, Fa, Fb); case PIXMAN_OP_MULTIPLY: case PIXMAN_OP_SCREEN: case PIXMAN_OP_OVERLAY: case PIXMAN_OP_DARKEN: case PIXMAN_OP_LIGHTEN: case PIXMAN_OP_COLOR_DODGE: case PIXMAN_OP_COLOR_BURN: case PIXMAN_OP_HARD_LIGHT: case PIXMAN_OP_SOFT_LIGHT: case PIXMAN_OP_DIFFERENCE: case PIXMAN_OP_EXCLUSION: case PIXMAN_OP_HSL_HUE: case PIXMAN_OP_HSL_SATURATION: case PIXMAN_OP_HSL_COLOR: case PIXMAN_OP_HSL_LUMINOSITY: default: abort(); return 0; /* silence MSVC */ } #undef mult_chan } void do_composite (pixman_op_t op, const color_t *src, const color_t *mask, const color_t *dst, color_t *result, pixman_bool_t component_alpha) { color_t srcval, srcalpha; static const blend_func_t blend_funcs[] = { blend_multiply, blend_screen, blend_overlay, blend_darken, blend_lighten, blend_color_dodge, blend_color_burn, blend_hard_light, blend_soft_light, blend_difference, blend_exclusion, }; if (mask == NULL) { srcval = *src; srcalpha.r = src->a; srcalpha.g = src->a; srcalpha.b = src->a; srcalpha.a = src->a; } else if (component_alpha) { srcval.r = src->r * mask->r; srcval.g = src->g * mask->g; srcval.b = src->b * mask->b; srcval.a = src->a * mask->a; srcalpha.r = src->a * mask->r; srcalpha.g = src->a * mask->g; srcalpha.b = src->a * mask->b; srcalpha.a = src->a * mask->a; } else { srcval.r = src->r * mask->a; srcval.g = src->g * mask->a; srcval.b = src->b * mask->a; srcval.a = src->a * mask->a; srcalpha.r = src->a * mask->a; srcalpha.g = src->a * mask->a; srcalpha.b = src->a * mask->a; srcalpha.a = src->a * mask->a; } if (op >= PIXMAN_OP_MULTIPLY) { blend_func_t func = blend_funcs[op - PIXMAN_OP_MULTIPLY]; result->a = srcalpha.a + dst->a - srcalpha.a * dst->a; result->r = blend_channel (srcalpha.r, srcval.r, dst->a, dst->r, func); result->g = blend_channel (srcalpha.g, srcval.g, dst->a, dst->g, func); result->b = blend_channel (srcalpha.b, srcval.b, dst->a, dst->b, func); } else { result->r = calc_op (op, srcval.r, dst->r, srcalpha.r, dst->a); result->g = calc_op (op, srcval.g, dst->g, srcalpha.g, dst->a); result->b = calc_op (op, srcval.b, dst->b, srcalpha.b, dst->a); result->a = calc_op (op, srcval.a, dst->a, srcalpha.a, dst->a); } } static double round_channel (double p, int m) { int t; double r; t = p * ((1 << m)); t -= t >> m; r = t / (double)((1 << m) - 1); return r; } void round_color (pixman_format_code_t format, color_t *color) { if (PIXMAN_FORMAT_R (format) == 0) { color->r = 0.0; color->g = 0.0; color->b = 0.0; } else { color->r = round_channel (color->r, PIXMAN_FORMAT_R (format)); color->g = round_channel (color->g, PIXMAN_FORMAT_G (format)); color->b = round_channel (color->b, PIXMAN_FORMAT_B (format)); } if (PIXMAN_FORMAT_A (format) == 0) color->a = 1; else color->a = round_channel (color->a, PIXMAN_FORMAT_A (format)); } /* The acceptable deviation in units of [0.0, 1.0] */ #define DEVIATION (0.0128) /* Check whether @pixel is a valid quantization of the a, r, g, b * parameters. Some slack is permitted. */ void pixel_checker_init (pixel_checker_t *checker, pixman_format_code_t format) { assert (PIXMAN_FORMAT_VIS (format)); checker->format = format; if (format == PIXMAN_rgba_float || format == PIXMAN_rgb_float) return; switch (PIXMAN_FORMAT_TYPE (format)) { case PIXMAN_TYPE_A: checker->bs = 0; checker->gs = 0; checker->rs = 0; checker->as = 0; break; case PIXMAN_TYPE_ARGB: case PIXMAN_TYPE_ARGB_SRGB: checker->bs = 0; checker->gs = checker->bs + PIXMAN_FORMAT_B (format); checker->rs = checker->gs + PIXMAN_FORMAT_G (format); checker->as = checker->rs + PIXMAN_FORMAT_R (format); break; case PIXMAN_TYPE_ABGR: checker->rs = 0; checker->gs = checker->rs + PIXMAN_FORMAT_R (format); checker->bs = checker->gs + PIXMAN_FORMAT_G (format); checker->as = checker->bs + PIXMAN_FORMAT_B (format); break; case PIXMAN_TYPE_BGRA: /* With BGRA formats we start counting at the high end of the pixel */ checker->bs = PIXMAN_FORMAT_BPP (format) - PIXMAN_FORMAT_B (format); checker->gs = checker->bs - PIXMAN_FORMAT_B (format); checker->rs = checker->gs - PIXMAN_FORMAT_G (format); checker->as = checker->rs - PIXMAN_FORMAT_R (format); break; case PIXMAN_TYPE_RGBA: /* With BGRA formats we start counting at the high end of the pixel */ checker->rs = PIXMAN_FORMAT_BPP (format) - PIXMAN_FORMAT_R (format); checker->gs = checker->rs - PIXMAN_FORMAT_R (format); checker->bs = checker->gs - PIXMAN_FORMAT_G (format); checker->as = checker->bs - PIXMAN_FORMAT_B (format); break; default: assert (0); break; } checker->am = ((1U << PIXMAN_FORMAT_A (format)) - 1) << checker->as; checker->rm = ((1U << PIXMAN_FORMAT_R (format)) - 1) << checker->rs; checker->gm = ((1U << PIXMAN_FORMAT_G (format)) - 1) << checker->gs; checker->bm = ((1U << PIXMAN_FORMAT_B (format)) - 1) << checker->bs; checker->aw = PIXMAN_FORMAT_A (format); checker->rw = PIXMAN_FORMAT_R (format); checker->gw = PIXMAN_FORMAT_G (format); checker->bw = PIXMAN_FORMAT_B (format); checker->ad = DEVIATION; checker->rd = DEVIATION; checker->gd = DEVIATION; checker->bd = DEVIATION; } /* When dithering is enabled, we allow one extra pixel of tolerance */ void pixel_checker_allow_dither (pixel_checker_t *checker) { checker->ad += 1 / (double)((1 << checker->aw) - 1); checker->rd += 1 / (double)((1 << checker->rw) - 1); checker->gd += 1 / (double)((1 << checker->gw) - 1); checker->bd += 1 / (double)((1 << checker->bw) - 1); } static void pixel_checker_require_uint32_format (const pixel_checker_t *checker) { assert (checker->format != PIXMAN_rgba_float && checker->format != PIXMAN_rgb_float); } void pixel_checker_split_pixel (const pixel_checker_t *checker, uint32_t pixel, int *a, int *r, int *g, int *b) { pixel_checker_require_uint32_format(checker); *a = (pixel & checker->am) >> checker->as; *r = (pixel & checker->rm) >> checker->rs; *g = (pixel & checker->gm) >> checker->gs; *b = (pixel & checker->bm) >> checker->bs; } void pixel_checker_get_masks (const pixel_checker_t *checker, uint32_t *am, uint32_t *rm, uint32_t *gm, uint32_t *bm) { pixel_checker_require_uint32_format(checker); if (am) *am = checker->am; if (rm) *rm = checker->rm; if (gm) *gm = checker->gm; if (bm) *bm = checker->bm; } void pixel_checker_convert_pixel_to_color (const pixel_checker_t *checker, uint32_t pixel, color_t *color) { int a, r, g, b; pixel_checker_require_uint32_format(checker); pixel_checker_split_pixel (checker, pixel, &a, &r, &g, &b); if (checker->am == 0) color->a = 1.0; else color->a = a / (double)(checker->am >> checker->as); if (checker->rm == 0) color->r = 0.0; else color->r = r / (double)(checker->rm >> checker->rs); if (checker->gm == 0) color->g = 0.0; else color->g = g / (double)(checker->gm >> checker->gs); if (checker->bm == 0) color->b = 0.0; else color->b = b / (double)(checker->bm >> checker->bs); if (PIXMAN_FORMAT_TYPE (checker->format) == PIXMAN_TYPE_ARGB_SRGB) { color->r = convert_srgb_to_linear (color->r); color->g = convert_srgb_to_linear (color->g); color->b = convert_srgb_to_linear (color->b); } } static int32_t convert (double v, uint32_t width, uint32_t mask, uint32_t shift, double def) { int32_t r; if (!mask) v = def; r = (v * ((mask >> shift) + 1)); r -= r >> width; return r; } static void get_limits (const pixel_checker_t *checker, double sign, color_t *color, int *ao, int *ro, int *go, int *bo) { color_t tmp; if (PIXMAN_FORMAT_TYPE (checker->format) == PIXMAN_TYPE_ARGB_SRGB) { tmp.a = color->a; tmp.r = convert_linear_to_srgb (color->r); tmp.g = convert_linear_to_srgb (color->g); tmp.b = convert_linear_to_srgb (color->b); color = &tmp; } *ao = convert (color->a + sign * checker->ad, checker->aw, checker->am, checker->as, 1.0); *ro = convert (color->r + sign * checker->rd, checker->rw, checker->rm, checker->rs, 0.0); *go = convert (color->g + sign * checker->gd, checker->gw, checker->gm, checker->gs, 0.0); *bo = convert (color->b + sign * checker->bd, checker->bw, checker->bm, checker->bs, 0.0); } void pixel_checker_get_max (const pixel_checker_t *checker, color_t *color, int *am, int *rm, int *gm, int *bm) { pixel_checker_require_uint32_format(checker); get_limits (checker, 1, color, am, rm, gm, bm); } void pixel_checker_get_min (const pixel_checker_t *checker, color_t *color, int *am, int *rm, int *gm, int *bm) { pixel_checker_require_uint32_format(checker); get_limits (checker, - 1, color, am, rm, gm, bm); } pixman_bool_t pixel_checker_check (const pixel_checker_t *checker, uint32_t pixel, color_t *color) { int32_t a_lo, a_hi, r_lo, r_hi, g_lo, g_hi, b_lo, b_hi; int32_t ai, ri, gi, bi; pixman_bool_t result; pixel_checker_require_uint32_format(checker); pixel_checker_get_min (checker, color, &a_lo, &r_lo, &g_lo, &b_lo); pixel_checker_get_max (checker, color, &a_hi, &r_hi, &g_hi, &b_hi); pixel_checker_split_pixel (checker, pixel, &ai, &ri, &gi, &bi); result = a_lo <= ai && ai <= a_hi && r_lo <= ri && ri <= r_hi && g_lo <= gi && gi <= g_hi && b_lo <= bi && bi <= b_hi; return result; } static void color_limits (const pixel_checker_t *checker, double limit, const color_t *color, color_t *out) { if (PIXMAN_FORMAT_A(checker->format)) out->a = color->a + limit; else out->a = 1.; out->r = color->r + limit; out->g = color->g + limit; out->b = color->b + limit; } pixman_bool_t pixel_checker_check_color (const pixel_checker_t *checker, const color_t *actual, const color_t *reference) { color_t min, max; pixman_bool_t result; color_limits(checker, -DEVIATION, reference, &min); color_limits(checker, DEVIATION, reference, &max); result = actual->a >= min.a && actual->a <= max.a && actual->r >= min.r && actual->r <= max.r && actual->g >= min.g && actual->g <= max.g && actual->b >= min.b && actual->b <= max.b; return result; } ././@PaxHeader0000000000000000000000000000002600000000000010213 xustar0022 mtime=1730825491.0 pixman-0.44.0/test/utils/utils.h0000664000175000017500000001774714712446423016566 0ustar00mattst88mattst88#ifdef HAVE_CONFIG_H #include #endif #include #include "pixman-private.h" /* For 'inline' definition */ #include "utils-prng.h" #if defined(_MSC_VER) #define snprintf _snprintf #define strcasecmp _stricmp #endif #define ARRAY_LENGTH(A) ((int) (sizeof (A) / sizeof ((A) [0]))) /* A primitive pseudorandom number generator, * taken from POSIX.1-2001 example */ extern prng_t prng_state_data; extern prng_t *prng_state; #ifdef USE_OPENMP #pragma omp threadprivate(prng_state_data) #pragma omp threadprivate(prng_state) #endif static inline uint32_t prng_rand (void) { return prng_rand_r (prng_state); } static inline void prng_srand (uint32_t seed) { if (!prng_state) { /* Without setting a seed, PRNG does not work properly (is just * returning zeros). So we only initialize the pointer here to * make sure that 'prng_srand' is always called before any * other 'prng_*' function. The wrongdoers violating this order * will get a segfault. */ prng_state = &prng_state_data; } prng_srand_r (prng_state, seed); } static inline uint32_t prng_rand_n (int max) { return prng_rand () % max; } static inline void prng_randmemset (void *buffer, size_t size, prng_randmemset_flags_t flags) { prng_randmemset_r (prng_state, buffer, size, flags); } /* CRC 32 computation */ uint32_t compute_crc32 (uint32_t in_crc32, const void *buf, size_t buf_len); uint32_t compute_crc32_for_image (uint32_t in_crc32, pixman_image_t *image); /* Print the image in hexadecimal */ void print_image (pixman_image_t *image); /* Returns TRUE if running on a little endian system */ static force_inline pixman_bool_t is_little_endian (void) { unsigned long endian_check_var = 1; return *(unsigned char *)&endian_check_var == 1; } /* perform endian conversion of pixel data */ void image_endian_swap (pixman_image_t *img); #if defined (HAVE_MPROTECT) && defined (HAVE_GETPAGESIZE) && \ defined (HAVE_SYS_MMAN_H) && defined (HAVE_MMAP) /* fence_malloc and friends have working fence implementation. * Without this, fence_malloc still allocs but does not catch * out-of-bounds accesses. */ #define FENCE_MALLOC_ACTIVE 1 #else #define FENCE_MALLOC_ACTIVE 0 #endif /* Allocate memory that is bounded by protected pages, * so that out-of-bounds access will cause segfaults */ void * fence_malloc (int64_t len); void fence_free (void *data); pixman_image_t * fence_image_create_bits (pixman_format_code_t format, int min_width, int height, pixman_bool_t stride_fence); /* Return the page size if FENCE_MALLOC_ACTIVE, or zero otherwise */ unsigned long fence_get_page_size (); /* Generate n_bytes random bytes in fence_malloced memory */ uint8_t * make_random_bytes (int n_bytes); float * make_random_floats (int n_bytes); /* Return current time in seconds */ double gettime (void); uint32_t get_random_seed (void); /* main body of the fuzzer test */ int fuzzer_test_main (const char *test_name, int default_number_of_iterations, uint32_t expected_checksum, uint32_t (*test_function)(int testnum, int verbose), int argc, const char *argv[]); void fail_after (int seconds, const char *msg); /* If possible, enable traps for floating point exceptions */ void enable_divbyzero_exceptions(void); void enable_invalid_exceptions(void); /* Converts a8r8g8b8 pixels to pixels that * - are not premultiplied, * - are stored in this order in memory: R, G, B, A, regardless of * the endianness of the computer. * It is allowed for @src and @dst to point to the same memory buffer. */ void a8r8g8b8_to_rgba_np (uint32_t *dst, uint32_t *src, int n_pixels); pixman_bool_t write_png (pixman_image_t *image, const char *filename); void draw_checkerboard (pixman_image_t *image, int check_size, uint32_t color1, uint32_t color2); /* A pair of macros which can help to detect corruption of * floating point registers after a function call. This may * happen if _mm_empty() call is forgotten in MMX/SSE2 fast * path code, or ARM NEON assembly optimized function forgets * to save/restore d8-d15 registers before use. */ #define FLOAT_REGS_CORRUPTION_DETECTOR_START() \ static volatile double frcd_volatile_constant1 = 123451; \ static volatile double frcd_volatile_constant2 = 123452; \ static volatile double frcd_volatile_constant3 = 123453; \ static volatile double frcd_volatile_constant4 = 123454; \ static volatile double frcd_volatile_constant5 = 123455; \ static volatile double frcd_volatile_constant6 = 123456; \ static volatile double frcd_volatile_constant7 = 123457; \ static volatile double frcd_volatile_constant8 = 123458; \ double frcd_canary_variable1 = frcd_volatile_constant1; \ double frcd_canary_variable2 = frcd_volatile_constant2; \ double frcd_canary_variable3 = frcd_volatile_constant3; \ double frcd_canary_variable4 = frcd_volatile_constant4; \ double frcd_canary_variable5 = frcd_volatile_constant5; \ double frcd_canary_variable6 = frcd_volatile_constant6; \ double frcd_canary_variable7 = frcd_volatile_constant7; \ double frcd_canary_variable8 = frcd_volatile_constant8; #define FLOAT_REGS_CORRUPTION_DETECTOR_FINISH() \ assert (frcd_canary_variable1 == frcd_volatile_constant1); \ assert (frcd_canary_variable2 == frcd_volatile_constant2); \ assert (frcd_canary_variable3 == frcd_volatile_constant3); \ assert (frcd_canary_variable4 == frcd_volatile_constant4); \ assert (frcd_canary_variable5 == frcd_volatile_constant5); \ assert (frcd_canary_variable6 == frcd_volatile_constant6); \ assert (frcd_canary_variable7 == frcd_volatile_constant7); \ assert (frcd_canary_variable8 == frcd_volatile_constant8); /* Try to get an aligned memory chunk */ void * aligned_malloc (size_t align, size_t size); double convert_srgb_to_linear (double component); double convert_linear_to_srgb (double component); void initialize_palette (pixman_indexed_t *palette, uint32_t depth, int is_rgb); pixman_format_code_t format_from_string (const char *s); void list_formats (void); void list_operators (void); void list_dithers (void); pixman_op_t operator_from_string (const char *s); pixman_dither_t dither_from_string (const char *s); const char * operator_name (pixman_op_t op); const char * format_name (pixman_format_code_t format); const char * dither_name (pixman_dither_t dither); typedef struct { double r, g, b, a; } color_t; void do_composite (pixman_op_t op, const color_t *src, const color_t *mask, const color_t *dst, color_t *result, pixman_bool_t component_alpha); void round_color (pixman_format_code_t format, color_t *color); typedef struct { pixman_format_code_t format; uint32_t am, rm, gm, bm; uint32_t as, rs, gs, bs; uint32_t aw, rw, gw, bw; float ad, rd, gd, bd; } pixel_checker_t; void pixel_checker_init (pixel_checker_t *checker, pixman_format_code_t format); void pixel_checker_allow_dither (pixel_checker_t *checker); void pixel_checker_split_pixel (const pixel_checker_t *checker, uint32_t pixel, int *a, int *r, int *g, int *b); void pixel_checker_get_max (const pixel_checker_t *checker, color_t *color, int *a, int *r, int *g, int *b); void pixel_checker_get_min (const pixel_checker_t *checker, color_t *color, int *a, int *r, int *g, int *b); pixman_bool_t pixel_checker_check (const pixel_checker_t *checker, uint32_t pixel, color_t *color); void pixel_checker_convert_pixel_to_color (const pixel_checker_t *checker, uint32_t pixel, color_t *color); void pixel_checker_get_masks (const pixel_checker_t *checker, uint32_t *am, uint32_t *rm, uint32_t *gm, uint32_t *bm);