pax_global_header00006660000000000000000000000064146553042130014515gustar00rootroot0000000000000052 comment=f40d5a70e3e06bd1b62d5145ba791268e4689a6d ipu7-drivers-0~git202407090310.f40d5a70/000077500000000000000000000000001465530421300167455ustar00rootroot00000000000000ipu7-drivers-0~git202407090310.f40d5a70/.github/000077500000000000000000000000001465530421300203055ustar00rootroot00000000000000ipu7-drivers-0~git202407090310.f40d5a70/.github/workflows/000077500000000000000000000000001465530421300223425ustar00rootroot00000000000000ipu7-drivers-0~git202407090310.f40d5a70/.github/workflows/test.yml000066400000000000000000000067101465530421300240500ustar00rootroot00000000000000name: test on: push: pull_request: schedule: - cron: '30 1 * * *' #every day at 1:30am permissions: {} jobs: Ubuntu-2404-dkms: runs-on: ubuntu-latest container: ubuntu:24.04 steps: - name: Checkout uses: actions/checkout@v4 - name: Prepare environment shell: bash run: | sed -i 's/noble-updates/noble-updates noble-proposed/' /etc/apt/sources.list.d/ubuntu.sources apt-get update --quiet; apt-get install --yes --no-install-recommends dkms gpg-agent kmod software-properties-common sudo - name: Download header files shell: bash run: | # latest generic kernel headers apt-get install --yes linux-headers-generic linux-headers-generic-hwe-24.04-edge # latest oem kernel apt-get install --yes linux-headers-oem-24.04a linux-headers-oem-24.04b - name: Register with dkms shell: bash run: | dkms add . - name: Compile driver shell: bash run: | # run dkms build and all available kernel headers failed="" succeeded="" for kver in /lib/modules/*/build; do # skip the kernel headers from the azure kernel. These kernel headers # are preinstalled and of no interest if [[ "$kver" == *"azure"* ]]; then echo "Skipping $kver - This is the kernel of the github runner."; continue; fi; test -d $kver || continue kver=${kver%/build} kver=${kver##*/} echo "=== Testing ${kver} ==="; echo "running: dkms build -m ipu7-drivers/0.0.0 -k ${kver}"; ret=$(sudo dkms build -m ipu7-drivers/0.0.0 -k ${kver} >&2; echo $?); if [ ${ret} -eq 0 ]; then succeeded="${succeeded} ${kver}" else echo "#### Skipped unexpected failure ${kver}"; failed="${failed} ${kver}"; fi; done if [ "x${failed}" != "x" ]; then echo "#### Failed kernels: ${failed}"; exit 1 fi echo "#### Successful builds for kernels: ${succeeded}"; ipu7-drivers-0~git202407090310.f40d5a70/CODE_OF_CONDUCT.md000066400000000000000000000124761465530421300215560ustar00rootroot00000000000000# Contributor Covenant Code of Conduct ## Our Pledge We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, caste, color, religion, or sexual identity and orientation. We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. ## Our Standards Examples of behavior that contributes to a positive environment for our community include: * Demonstrating empathy and kindness toward other people * Being respectful of differing opinions, viewpoints, and experiences * Giving and gracefully accepting constructive feedback * Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience * Focusing on what is best not just for us as individuals, but for the overall community Examples of unacceptable behavior include: * The use of sexualized language or imagery, and sexual attention or advances of any kind * Trolling, insulting or derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or email address, without their explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ## Enforcement Responsibilities Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate. ## Scope This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at CommunityCodeOfConduct AT intel DOT com. All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the reporter of any incident. ## Enforcement Guidelines Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: ### 1. Correction **Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. **Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. ### 2. Warning **Community Impact**: A violation through a single incident or series of actions. **Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. ### 3. Temporary Ban **Community Impact**: A serious violation of community standards, including sustained inappropriate behavior. **Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. ### 4. Permanent Ban **Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. **Consequence**: A permanent ban from any sort of public interaction within the community. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.1, available at [https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder][Mozilla CoC]. For answers to common questions about this code of conduct, see the FAQ at [https://www.contributor-covenant.org/faq][FAQ]. Translations are available at [https://www.contributor-covenant.org/translations][translations]. [homepage]: https://www.contributor-covenant.org [v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html [Mozilla CoC]: https://github.com/mozilla/diversity [FAQ]: https://www.contributor-covenant.org/faq ipu7-drivers-0~git202407090310.f40d5a70/CONTRIBUTING.md000066400000000000000000000043711465530421300212030ustar00rootroot00000000000000# Contributing ### License is licensed under the terms in [LICENSE]. By contributing to the project, you agree to the license and copyright terms therein and release your contribution under these terms. ### Sign your work Please use the sign-off line at the end of the patch. Your signature certifies that you wrote the patch or otherwise have the right to pass it on as an open-source patch. The rules are pretty simple: if you can certify the below (from [developercertificate.org](http://developercertificate.org/)): ``` Developer Certificate of Origin Version 1.1 Copyright (C) 2004, 2006 The Linux Foundation and its contributors. 660 York Street, Suite 102, San Francisco, CA 94110 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Developer's Certificate of Origin 1.1 By making a contribution to this project, I certify that: (a) The contribution was created in whole or in part by me and I have the right to submit it under the open source license indicated in the file; or (b) The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right under that license to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license), as indicated in the file; or (c) The contribution was provided directly to me by some other person who certified (a), (b) or (c) and I have not modified it. (d) I understand and agree that this project and the contribution are public and that a record of the contribution (including all personal information I submit with it, including my sign-off) is maintained indefinitely and may be redistributed consistent with this project or the open source license(s) involved. ``` Then you just add a line to every git commit message: Signed-off-by: Joe Smith Use your real name (sorry, no pseudonyms or anonymous contributions.) If you set your `user.name` and `user.email` git configs, you can sign your commit automatically with `git commit -s`. ipu7-drivers-0~git202407090310.f40d5a70/Makefile000066400000000000000000000012151465530421300204040ustar00rootroot00000000000000# SPDX-License-Identifier: GPL-2.0 # Copyright (c) 2022 Intel Corporation. KERNELRELEASE ?= $(shell uname -r) KERNEL_SRC ?= /lib/modules/$(KERNELRELEASE)/build MODSRC := $(shell pwd) export EXTERNAL_BUILD = 1 export CONFIG_VIDEO_INTEL_IPU7 = m export CONFIG_IPU_BRIDGE = y obj-y += drivers/media/pci/intel/ipu7/ subdir-ccflags-y += -I$(src)/include subdir-ccflags-$(CONFIG_IPU_BRIDGE) += \ -DCONFIG_IPU_BRIDGE subdir-ccflags-y += $(subdir-ccflags-m) all: $(MAKE) -C $(KERNEL_SRC) M=$(MODSRC) modules modules_install: $(MAKE) INSTALL_MOD_DIR=updates -C $(KERNEL_SRC) M=$(MODSRC) modules_install clean: $(MAKE) -C $(KERNEL_SRC) M=$(MODSRC) clean ipu7-drivers-0~git202407090310.f40d5a70/README.md000066400000000000000000000044701465530421300202310ustar00rootroot00000000000000# ipu7-drivers This repository supports MIPI cameras through the IPU7 on Intel Lunar Lake platform. There are 4 repositories that provide the complete setup: - https://github.com/intel/ipu7-drivers - kernel drivers for the IPU and sensors - https://github.com/intel/ipu7-camera-bins - IPU firmware and proprietary image processing libraries - https://github.com/intel/ipu7-camera-hal - HAL for processing of images in userspace - https://github.com/intel/icamerasrc/tree/icamerasrc_slim_api (branch:icamerasrc_slim_api) - Gstreamer src plugin ## Content of this repository - IPU7 kernel driver - Kernel patches needed ## Dependencies - Kernel config IPU_BRIDGE should be enabled when kernel version >= v6.6. - Kernel config INTEL_SKL_INT3472 should be enabled if camera sensor driver is using discrete power control logic, like ov13b10.c. - The camera sensor drivers you are using should be enabled in kernel config. - For other camera sensor drivers and related ipu-bridge changes, please [go to IPU6 release repository](https://github.com/intel/ipu6-drivers). ## Build instructions: Three ways are available: 1. Build with kernel source code tree (in-tree build) 2. Build with kernel headers only (out-of-tree build) 3. Build and install by dkms (DKMS build) ### 1. In-tree build - Tested with kernel v6.8 ~ v6.11-rc2 1. Check out kernel source code 2. Patch kernel source code, using patches under `patch/` and `patch//in-tree-build` depending on what you need. 3. Copy `drivers` and `include` folders to kernel source code. 4. Enable the following settings in .config: ```conf CONFIG_VIDEO_INTEL_IPU7=m CONFIG_IPU_BRIDGE=m ``` And other components, depending on what you need: ```conf CONFIG_PINCTRL_INTEL_PLATFORM=m CONFIG_INTEL_SKL_INT3472=m CONFIG_VIDEO_OV13B10=m CONFIG_VIDEO_OV02C10=m CONFIG_VIDEO_OV05C10=m ``` 5. Build you kernel ### 2. Out-of-tree build - Requires kernel headers installed on build machine - Requires dependencies enabled in kernel, and patches under `patch/` (not `in-tree-build`) applied to the kernel you will run the modules on - To build and install: ```sh make -j`nproc` && sudo make modules_install && sudo depmod -a ``` ### 3. Build with dkms - Register, build and auto install: ```sh sudo dkms add . sudo dkms autoinstall ipu7-drivers/0.0.0 ``` ipu7-drivers-0~git202407090310.f40d5a70/SECURITY.md000066400000000000000000000006261465530421300205420ustar00rootroot00000000000000# Security Policy Intel is committed to rapidly addressing security vulnerabilities affecting our customers and providing clear guidance on the solution, impact, severity and mitigation. ## Reporting a Vulnerability Please report any security vulnerabilities in this project utilizing the guidelines [here](https://www.intel.com/content/www/us/en/security-center/vulnerability-handling-guidelines.html). ipu7-drivers-0~git202407090310.f40d5a70/dkms.conf000066400000000000000000000011301465530421300205450ustar00rootroot00000000000000PACKAGE_NAME=ipu7-drivers PACKAGE_VERSION=0.0.0 MAKE="make KERNELRELEASE=$kernelver KERNEL_SRC=$kernel_source_dir" CLEAN="make KERNELRELEASE=$kernelver KERNEL_SRC=$kernel_source_dir clean" AUTOINSTALL="yes" BUILT_MODULE_NAME[0]="intel-ipu7" BUILT_MODULE_LOCATION[0]="drivers/media/pci/intel/ipu7" DEST_MODULE_LOCATION[0]="/updates" BUILT_MODULE_NAME[1]="intel-ipu7-isys" BUILT_MODULE_LOCATION[1]="drivers/media/pci/intel/ipu7" DEST_MODULE_LOCATION[1]="/updates" BUILT_MODULE_NAME[2]="intel-ipu7-psys" BUILT_MODULE_LOCATION[2]="drivers/media/pci/intel/ipu7/psys" DEST_MODULE_LOCATION[2]="/updates"ipu7-drivers-0~git202407090310.f40d5a70/drivers/000077500000000000000000000000001465530421300204235ustar00rootroot00000000000000ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/000077500000000000000000000000001465530421300215025ustar00rootroot00000000000000ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/000077500000000000000000000000001465530421300222555ustar00rootroot00000000000000ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/000077500000000000000000000000001465530421300233705ustar00rootroot00000000000000ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/000077500000000000000000000000001465530421300242545ustar00rootroot00000000000000ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/Kconfig000066400000000000000000000015371465530421300255650ustar00rootroot00000000000000config VIDEO_INTEL_IPU7 tristate "Intel IPU7 driver" depends on ACPI depends on MEDIA_SUPPORT depends on MEDIA_PCI_SUPPORT depends on X86_64 select IOMMU_API select IOMMU_IOVA select X86_DEV_DMA_OPS select VIDEOBUF2_DMA_CONTIG select V4L2_FWNODE select PHYS_ADDR_T_64BIT select COMMON_CLK help This is the Intel imaging processing unit, found in Intel SoCs and used for capturing images and video from a camera sensor. To compile this driver, say Y here! It contains 3 modules - intel_ipu7, intel_ipu7_isys and intel_ipu7_psys. config VIDEO_INTEL_IPU7_MGC bool "Compile for IPU7 MGC driver" depends on VIDEO_INTEL_IPU7 help If selected, MGC device nodes would be created. Recommended for driver developers only. If you want to the MGC devices exposed to user as media entity, you must select this option, otherwise no. ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/Makefile000066400000000000000000000017001465530421300257120ustar00rootroot00000000000000# SPDX-License-Identifier: GPL-2.0 # Copyright (c) 2017 - 2024 Intel Corporation. is_kernel_lt_6_10 = $(shell if [ $$(printf "6.10\n$(KERNELVERSION)" | sort -V | head -n1) != "6.10" ]; then echo 1; fi) ifeq ($(is_kernel_lt_6_10), 1) ifneq ($(EXTERNAL_BUILD), 1) src := $(srctree)/$(src) endif endif intel-ipu7-objs += ipu7.o \ ipu7-bus.o \ ipu7-dma.o \ ipu7-mmu.o \ ipu7-buttress.o \ ipu7-cpd.o \ ipu7-syscom.o \ ipu7-boot.o obj-$(CONFIG_VIDEO_INTEL_IPU7) += intel-ipu7.o intel-ipu7-isys-objs += ipu7-isys.o \ ipu7-isys-csi2.o \ ipu7-isys-csi-phy.o \ ipu7-fw-isys.o \ ipu7-isys-video.o \ ipu7-isys-queue.o \ ipu7-isys-subdev.o ifdef CONFIG_VIDEO_INTEL_IPU7_MGC intel-ipu7-isys-objs += ipu7-isys-tpg.o endif obj-$(CONFIG_VIDEO_INTEL_IPU7) += intel-ipu7-isys.o obj-$(CONFIG_VIDEO_INTEL_IPU7) += psys/ ccflags-y += -I$(src)/ ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/abi/000077500000000000000000000000001465530421300250075ustar00rootroot00000000000000ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/abi/ipu7_fw_boot_abi.h000066400000000000000000000534111465530421300304020ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2020--2024 Intel Corporation */ #ifndef IPU7_FW_BOOT_ABI_H #define IPU7_FW_BOOT_ABI_H #include "ipu7_fw_common_abi.h" #include "ipu7_fw_syscom_abi.h" /** * Logger severity levels */ /** critical severity level */ #define IA_GOFO_FWLOG_SEVERITY_CRIT (0U) /** error severity level */ #define IA_GOFO_FWLOG_SEVERITY_ERROR (1U) /** warning severity level */ #define IA_GOFO_FWLOG_SEVERITY_WARNING (2U) /** info severity level */ #define IA_GOFO_FWLOG_SEVERITY_INFO (3U) /** debug severity level */ #define IA_GOFO_FWLOG_SEVERITY_DEBUG (4U) /** verbose severity level */ #define IA_GOFO_FWLOG_SEVERITY_VERBOSE (5U) /** * Max number of logger sources used by FW. Set to 64 (instead of 32) for extensibility. */ #define IA_GOFO_FWLOG_MAX_LOGGER_SOURCES (64U) /** Logger boot-time channels enablement/disablement configuration defines */ #define LOGGER_CONFIG_CHANNEL_ENABLE_HWPRINTF_BITMASK (1U << 0U) #define LOGGER_CONFIG_CHANNEL_ENABLE_SYSCOM_BITMASK (1U << 1U) #define LOGGER_CONFIG_CHANNEL_ENABLE_ALL_BITMASK ( \ LOGGER_CONFIG_CHANNEL_ENABLE_HWPRINTF_BITMASK | \ LOGGER_CONFIG_CHANNEL_ENABLE_SYSCOM_BITMASK \ ) /** Logger boot time configuration */ struct ia_gofo_logger_config { /** * Set to "true" to use the source severity as defined in this struct. * Set to "false" to ignore the source severity and use defaults. */ uint8_t use_source_severity; /** * Set severity for each source. * Based on IA_GOFO_FWLOG_SEVERITY_ */ uint8_t source_severity[IA_GOFO_FWLOG_MAX_LOGGER_SOURCES]; /** * Set to "true" to use the channels enable bitmask as defined in this struct. * Set to "false" to ignore the channels enable bitmask and use defaults (per compilation). */ uint8_t use_channels_enable_bitmask; /** * Selects which logger channels to enable * Note: Channel enablement will be dictated based on the compiled channels * Note: buttress channel is not configurable and can not be disabled * * bitmask: See LOGGER_CONFIG_CHANNEL defines */ uint8_t channels_enable_bitmask; /** * padding to align */ uint8_t padding[1]; /** * HW-PRINTF's buffer base addr in ddr. * Note: in secure mode, this buffer should be in the untrusted memory range. */ ia_gofo_addr_t hw_printf_buffer_base_addr; /** * HW-PRINTF's buffer size in ddr. * Cache line aligned. */ uint32_t hw_printf_buffer_size_bytes; }; /** Compile time checks only - this function is not to be called! */ static inline void ia_gofo_logger_config_abi_test_func(void) { CHECK_ALIGN32(struct ia_gofo_logger_config); } /* * Watchdog handler, written in ASM, uses this file to get the value to be stored in a boot status register. * Rest of the file will be ignored by assembler to prevent it compiling with C syntax resulting in many errors. */ #ifndef __ASSEMBLER__ #pragma pack(push, 1) /** * @addtogroup ia_gofo_boot_abi * @{ * */ /** Number of FW Boot_Param Registers per application */ #define IA_GOFO_BUTTRESS_FW_BOOT_PARAMS_MAX_REG_IDX_PER_APP ((uint32_t)IA_GOFO_FW_BOOT_ID_MAX) /** Offset to IS sets of boot parameter registers */ #define IA_GOFO_BUTTRESS_FW_BOOT_PARAMS_IS_OFFSET (0U) /** Offset to PS sets of boot parameter registers */ #define IA_GOFO_BUTTRESS_FW_BOOT_PARAMS_PS_OFFSET ((IA_GOFO_BUTTRESS_FW_BOOT_PARAMS_IS_OFFSET) + \ (uint32_t)(IA_GOFO_BUTTRESS_FW_BOOT_PARAMS_MAX_REG_IDX_PER_APP)) /** * Offset to primary set of boot parameter registers from subsystem register base. * See IA_GOFO_BUTTRESS_FW_BOOT_PARAMS_IS_OFFSET and IA_GOFO_BUTTRESS_FW_BOOT_PARAMS_PS_OFFSET */ #define IA_GOFO_BUTTRESS_FW_BOOT_PARAMS_PRIMARY_OFFSET (0U) /** * Offset to secondary set of boot parameter registers from subsystem register base. * * New in IPU7, the secondary registers are in their own memory * page to allow access controls to be defined separately for * each security context. Primary context registers are now * directly accesable to the untrusted SW and only the secondary * conext regiseters are limited to the trusted SW. Previously, * all FW_BOOT_PARAMS registers were accessible only to the * trusted SW. * * The value here is in units of register indexes from IA_GOFO_BUTTRESS_FW_BOOT_PARAMS_PRIMARY_OFFSET. * * See IA_GOFO_BUTTRESS_FW_BOOT_PARAMS_IS_OFFSET and IA_GOFO_BUTTRESS_FW_BOOT_PARAMS_PS_OFFSET */ #define IA_GOFO_CCG_IPU_BUTTRESS_FW_BOOT_PARAMS_SECONDARY_OFFSET (0x3000U / 4U) /** * Offset to Stand-alone SoC (HKR) secondary set of boot parameter registers from subsystem register base. * The value here is in units of register indexs from IA_GOFO_BUTTRESS_FW_BOOT_PARAMS_PRIMARY_OFFSET. */ #define IA_GOFO_HKR_IPU_BUTTRESS_FW_BOOT_PARAMS_SECONDARY_OFFSET (IA_GOFO_BUTTRESS_FW_BOOT_PARAMS_MAX_REG_IDX_PER_APP * 2U) /** * Offset to Stand-alone SoC (HKR) secondary set of boot parameter registers from subsystem register base. * The value here is in units of register indexs from IA_GOFO_BUTTRESS_FW_BOOT_PARAMS_PRIMARY_OFFSET. */ #define IA_GOFO_HKR_HIF_BUTTRESS_FW_BOOT_PARAMS_SECONDARY_OFFSET (IA_GOFO_BUTTRESS_FW_BOOT_PARAMS_MAX_REG_IDX_PER_APP) /** Number in Intel SoC (CCG) of FW Boot Param Registers */ #define IA_GOFO_CCG_IPU_BUTTRESS_FW_BOOT_PARAMS_MAX_REG_IDX (IA_GOFO_BUTTRESS_FW_BOOT_PARAMS_MAX_REG_IDX_PER_APP * 4U) /** Number in Stand-alone SoC (HKR) of FW Boot Param Registers */ #define IA_GOFO_HKR_IPU_BUTTRESS_FW_BOOT_PARAMS_MAX_REG_IDX (IA_GOFO_BUTTRESS_FW_BOOT_PARAMS_MAX_REG_IDX_PER_APP * 4U) /** Size of reserved words in boot structure */ #define IA_GOFO_BOOT_RESERVED_SIZE (58U) /** Size of reserved words in secondary boot structure */ #define IA_GOFO_BOOT_SECONDARY_RESERVED_SIZE (IA_GOFO_BOOT_RESERVED_SIZE) /** Size of reserved fields in secondary boot structure in bytes. * Those fields are reserved for future use and to allow unity with the Primary boot configuration structure. */ #define IA_GOFO_BOOT_SECONDARY_RESERVED_FIELDS (sizeof(ia_gofo_addr_t) + sizeof(ia_gofo_addr_t) + sizeof(uint32_t)) /** * Boot parameter register index (offset) in register units (4 bytes each) * Offset is relative to a base per subsystem and boot context */ enum ia_gofo_buttress_reg_id { /** pass boot (mostly syscom) configuration to SPC */ IA_GOFO_FW_BOOT_CONFIG_ID = 0, /** syscom state - initialized by host before uC kick, modified by firmware afterwards */ IA_GOFO_FW_BOOT_STATE_ID = 1, /** * Reserved for some future use. Secondary context only, * occupying the same index as STATE, which is * used only in the primary context as there is only one * system state and one owner at the host. * * Host should initialize to zero in the secondary context, but * only if there is a secondary context. When there is no * secondary context, this register must NOT be written or * read. It may not even be implemented physically in hardware * that does not support security via dual contexts. * * See IA_GOFO_FW_BOOT_STATE_ID */ IA_GOFO_FW_BOOT_RESERVED1_ID = IA_GOFO_FW_BOOT_STATE_ID, /** * Location of indices for syscom queues. Set by firmware, read by host * This is the base of a contiguous array of syscom index pairs * @note Not valid until IA_GOFO_FW_BOOT_STATE is set to IA_GOFO_FW_BOOT_STATE_READY or * at least IA_GOFO_FW_BOOT_STATE_QUEUE_INIT_DONE */ IA_GOFO_FW_BOOT_SYSCOM_QUEUE_INDICES_BASE_ID = 2, #ifndef REDUCED_FW_BOOT_PARAMS /** * syscom untrusted addr min - defines range of shared buffer * addresses valid for the primary (untrusted) device context. * Initialized by host before uC kick and read by firmware. * * Usage in the firmware is: * * bool is_addr_untrusted = addr >= untrusted_addr_min; * * where *is_addr_untrusted* must evaluate to true for all * buffers (start and end of each one) passed to the primary * context and false for all buffers passed to the secondary * context. * * Alignment constraint: the value of this register must be * aligned to 4KB pages. * * This register usage has meaning only in the secondary * (trusted) context parameter set, despite it being directly * relevant to the primary (untrusted) context because only in * the secondary context do we trust the value as it comes from * the trusted host entity. The corresponding register in the * primary context may be assigned a different use in the * future. * * When there is no secondary context, this register must NOT be * written or read. It may not even be implemented physically * in hardware that does not support security via dual contexts. */ IA_GOFO_FW_BOOT_UNTRUSTED_ADDR_MIN_ID = 3, /** * Reserved for some future use. Primary context only, * occupying the same index as UNTRUSTED_ADDR_MIN, which is * used only in the secondary context. * * Host should initialize to zero (in the primary context). * * See IA_GOFO_FW_BOOT_UNTRUSTED_ADDR_MIN_ID */ IA_GOFO_FW_BOOT_RESERVED0_ID = IA_GOFO_FW_BOOT_UNTRUSTED_ADDR_MIN_ID, /** * Messaging version as declared by firmware. * This is the highest version determined to be compatible with host's capabilities. * If no compatible version can be found, firmware will set its highest supported version. * @note Not valid until IA_GOFO_FW_BOOT_STATE is set to IA_GOFO_FW_BOOT_STATE_READY or * at least IA_GOFO_FW_BOOT_STATE_QUEUE_INIT_DONE */ IA_GOFO_FW_BOOT_MESSAGING_VERSION_ID = 4, #else /* * Funtional safety SoC products may have fewer FW_BOOT registers, * but on the other hand, they do not need the UNTRUSTED_ADDR_MIN * register. To get their boot context to fit into the * available registers remove the RESERVED0/UNTRUSTED_ADDR_MIN * assignment from the definition. */ IA_GOFO_FW_BOOT_MESSAGING_VERSION_ID = 3, #endif /** Number of register ID's */ IA_GOFO_FW_BOOT_ID_MAX }; #ifdef REDUCED_FW_BOOT_PARAMS #define IA_GOFO_FW_BOOT_UNTRUSTED_ADDR_MIN_ID (0xFFU) #endif /** * Evaluates to true if boot state is critical * @see enum ia_gofo_boot_state */ #define IA_GOFO_FW_BOOT_STATE_IS_CRITICAL(boot_state) (0xDEAD0000U == ((boot_state) & 0xFFFF0000U)) /** * Very minimalistic boot parameters containing the minimum required * to setup messaging. Any further configuration is handled in the * messages. */ struct ia_gofo_boot_config { /** Length of this structure in bytes, including the variable number of queue configs */ uint32_t length; /** Semantic version number of this configuration structure */ struct ia_gofo_version_s config_version; /** * Semantic messaging protocol versions supported by host. * Host will thus declare the protocol versions it * supports, in order of preference */ struct ia_gofo_msg_version_list client_version_support; /** * Package directory location in the IPU address space. * Used only in unsecured boot. Secure boot location is hard coded to IMR. */ ia_gofo_addr_t pkg_dir; /** * Configuration specific to a subsystem. * Optional. Currently used for debug purposes. */ ia_gofo_addr_t subsys_config; /** * frequency in MHz. */ uint32_t uc_tile_frequency_mhz; /** * 16-bit Checksum value * Used only in INSYS Stand-alone SoC (HKR) * Calculation: 1's complement 16bit checksum, calculated on all the boot configuration struct * (including the syscom metadata). The algorithm is explained in RFC-1071 - https://www.rfc-editor.org/rfc/rfc1071 */ uint16_t checksum; /** Padding bytes used for alignment */ uint8_t padding[2]; /** Reserved for future use. Set to zero. */ uint32_t reserved[IA_GOFO_BOOT_RESERVED_SIZE]; /** Configuration of syscom queues */ struct syscom_config_s syscom_context_config[1]; }; /** * Very minimalistic boot parameters containing the minimum required * to setup messaging on the secondary syscom channel. * Any further configuration is handled in the messages. */ struct ia_gofo_secondary_boot_config { /** Length of this structure in bytes, including the variable number of queue configs */ uint32_t length; /** Semantic version number of this configuration structure */ struct ia_gofo_version_s config_version; /** * Semantic messaging protocol versions supported by host. * Host will thus declare the protocol versions it * supports, in order of preference */ struct ia_gofo_msg_version_list client_version_support; /** Reserved fields for future use and to keep this secondary strucutre aligned with primary structure */ uint8_t reserved1[IA_GOFO_BOOT_SECONDARY_RESERVED_FIELDS]; /** * 16-bit Checksum value * Used only in INSYS Stand-alone SoC (HKR) * Calculation: 1's complement 16bit checksum, calculated on all the boot configuration struct * (including the syscom metadata). The algorithm is explained in RFC-1071 - https://www.rfc-editor.org/rfc/rfc1071 */ uint16_t checksum; /** Padding bytes used for alignment */ uint8_t padding[2]; /** Reserved for future use. Set to zero. */ uint32_t reserved2[IA_GOFO_BOOT_SECONDARY_RESERVED_SIZE]; /** Configuration of secondary syscom queues */ struct syscom_config_s syscom_context_config[1]; }; /** * Calculate allocation size of boot configuration structure as a parameter of the number * of queues. */ #define FW_BOOT_CONFIG_ALLOC_SIZE(num_queues) \ ((sizeof(struct ia_gofo_boot_config) + \ (sizeof(struct syscom_queue_params_config) * num_queues))) #pragma pack(pop) /** @} */ /** Compile time checks only - this function is not to be called! */ static inline void ia_gofo_boot_config_abi_test_func(void) { CHECK_ALIGN32(struct ia_gofo_boot_config); CHECK_ALIGN32(struct ia_gofo_secondary_boot_config); } #endif /* __ASSEMBLER__ */ /** * The following boot status values defines used in ASM code (can't use enum) * this values are also added to the boot status enum */ /** Boot Status register value upon WDT Timeout */ #define IA_GOFO_WDT_TIMEOUT_ERR 0xDEAD0401U /** Boot status register value upon Memory errors: Double memory error - occur while other error. */ #define IA_GOFO_MEM_FATAL_DME_ERR 0xDEAD0801U /** Boot status register value upon Memory errors: unrecoverable memory error - in DMEM */ #define IA_GOFO_MEM_UNCORRECTABLE_LOCAL_ERR 0xDEAD0802U /** Boot status register value upon Memory errors: error - in dirty dcache data */ #define IA_GOFO_MEM_UNCORRECTABLE_DIRTY_ERR 0xDEAD0803U /** Boot status register value upon Memory errors: error - in dcache tag */ #define IA_GOFO_MEM_UNCORRECTABLE_DTAG_ERR 0xDEAD0804U /** Boot status register value upon Memory errors: uncorrectable memory error - in icache and dcache (not dirty) */ #define IA_GOFO_MEM_UNCORRECTABLE_CACHE_ERR 0xDEAD0805U /** Boot status register value upon double exception */ #define IA_GOFO_DOUBLE_EXCEPTION_ERR 0xDEAD0806U /** Boot Status register value upon DMEM BIST failure.*/ #define IA_GOFO_BIST_DMEM_FAULT_DETECTION_ERR 0xDEAD1000U /** Boot Status register value upon DATA INTEGRITY BIST failure.*/ #define IA_GOFO_BIST_DATA_INTEGRITY_FAILURE 0xDEAD1010U #ifndef __ASSEMBLER__ /** Boot status values for the register at IA_GOFO_FW_BOOT_STATE_ID */ enum ia_gofo_boot_state { /** * @note This value is only used for FuSa systems. * * SW sets this value to sync between safe (FuSa) SW and non safe feature SW. * It should not be set or encountered by FW. * * This state is set when the secondary boot params have been configured * by the safety SW and therefore the functional SW can kick FW. * If this value is not set, the functional SW must wait for the safety SW * to set it before kicking FW. * * Before kicking FW, the functional SW should change the boot state to * IA_GOFO_FW_BOOT_STATE_UNINIT. */ IA_GOFO_FW_BOOT_STATE_SECONDARY_BOOT_CONFIG_READY = 0x57A7B000U, /** * Set by SW before uC firmware kick * FW can either busy-wait poll until it sees this value * at buttress_registers_s::boot_state or wait for an interrupt * and then check for this value. */ IA_GOFO_FW_BOOT_STATE_UNINIT = 0x57A7E000U, /** * FW sets this value to buttress_registers_s::boot_state at early boot * (ASAP) - confirms firmware program start * @note Informational/Debug only */ IA_GOFO_FW_BOOT_STATE_STARTING_0 = 0x57A7D000U, /** * FW sets this value to buttress_registers_s::boot_state at early boot * after DMEM global/static variable initialization is done. * @note Informational/Debug only */ IA_GOFO_FW_BOOT_STATE_MEM_INIT_DONE = 0x57A7D100U, /** * FW sets this value to buttress_registers_s::boot_state at the start * of late boot when boot parameters (content of * buttress_registers_s::p_boot_params) are being processed and used * to direct the rest of firmware boot * @note Informational/Debug only */ IA_GOFO_FW_BOOT_STATE_BOOT_CONFIG_START = 0x57A7D200U, /** * FW sets this value to buttress_registers_s::boot_state during * late boot when the log output queue has been initialized and logging * is now active. At this point, host may pop log messages from the log * output queue. If the firmware boot is stuck (i.e. never reaches * IA_GOFO_FW_BOOT_STATE_READY), this queue may have important information. * @note Informational/Debug only - only meaningful if a log queue is * defined at the higher messaging layer */ IA_GOFO_FW_BOOT_STATE_QUEUE_INIT_DONE = 0x57A7D300U, /** * FW sets this value to buttress_registers_s::boot_state when the * boot process is complete, including readiness to receive and process * syscom command messages. * SW can either busy-wait poll on this value, or wait for an interrupt and * then check for this value. * * The boot_state register must be polled and checked for this value: * - At any boot or communication timeout * - Before every syscom queue scan start, no matter if that scan is interrupt driven or * polling-based. */ IA_GOFO_FW_BOOT_STATE_READY = 0x57A7E100U, /** * FW sets this value to buttress_registers_s::boot_state at any time where a critical * (== unrecoverable) error is detected which * is not covered by another error state defined in this enumeration * @note While this value is defined as part of the boot state enumeration, it may be * driven by firmware at any time * that firmware is active and not just during the boot process. */ IA_GOFO_FW_BOOT_STATE_CRIT_UNSPECIFIED = 0xDEAD0001U, /** * FW sets this value to buttress_registers_s::boot_state * when the boot configuration structure pointer * at register IA_GOFO_FW_BOOT_CONFIG_ID is not valid. Note that firmware has limited * capability to detect this, but some cases (e.g. NULL, * IPU HW address space, alignment errors) are possible. */ IA_GOFO_FW_BOOT_STATE_CRIT_CFG_PTR = 0xDEAD0101U, /** * FW sets this value to buttress_registers_s::boot_state * when the boot configuration structure version specified * in is not supported by the firmware */ IA_GOFO_FW_BOOT_STATE_CRIT_CFG_VERSION = 0xDEAD0201U, /** * FW sets this value to buttress_registers_s::boot_state when the messaging protocol * version is not supported by the firmware */ IA_GOFO_FW_BOOT_STATE_CRIT_MSG_VERSION = 0xDEAD0301U, /** * FW sets this value to buttress_registers_s::boot_state when HW WDT timeout happens */ IA_GOFO_FW_BOOT_STATE_CRIT_WDT_TIMEOUT = IA_GOFO_WDT_TIMEOUT_ERR, /** * FW sets this value to buttress_registers_s::boot_state at early boot * to indicate that data section unpacking process failed. * @note Informational/Debug only */ IA_GOFO_FW_BOOT_STATE_WRONG_DATA_SECTION_UNPACKING = 0xDEAD0501U, /** * FW sets this value to buttress_registers_s::boot_state at early boot * to indicate that rodata section unpacking process failed. * @note Informational/Debug only */ IA_GOFO_FW_BOOT_STATE_WRONG_RO_DATA_SECTION_UNPACKING = 0xDEAD0601U, /** * FW sets this value to buttress_registers_s::boot_state at * boot to indicate that the value of the boot parameter * register at index IA_GOFO_FW_BOOT_UNTRUSTED_ADDR_MIN_ID is * not valid. * * See IA_GOFO_FW_BOOT_UNTRUSTED_ADDR_MIN_ID for the definition * of valid values. */ IA_GOFO_FW_BOOT_STATE_INVALID_UNTRUSTED_ADDR_MIN = 0xDEAD0701U, /** * FW sets this value to buttress_registers_s::boot_state when memory error happends */ IA_GOFO_FW_BOOT_STATE_CRIT_MEM_FATAL_DME = IA_GOFO_MEM_FATAL_DME_ERR, /** * FW sets this value to buttress_registers_s::boot_state when memory error happends */ IA_GOFO_FW_BOOT_STATE_CRIT_MEM_UNCORRECTABLE_LOCAL = IA_GOFO_MEM_UNCORRECTABLE_LOCAL_ERR, /** * FW sets this value to buttress_registers_s::boot_state when memory error happends */ IA_GOFO_FW_BOOT_STATE_CRIT_MEM_UNCORRECTABLE_DIRTY = IA_GOFO_MEM_UNCORRECTABLE_DIRTY_ERR, /** * FW sets this value to buttress_registers_s::boot_state when memory error happends */ IA_GOFO_FW_BOOT_STATE_CRIT_MEM_UNCORRECTABLE_DTAG = IA_GOFO_MEM_UNCORRECTABLE_DTAG_ERR, /** * FW sets this value to buttress_registers_s::boot_state when memory error happends */ IA_GOFO_FW_BOOT_STATE_CRIT_MEM_UNCORRECTABLE_CACHE = IA_GOFO_MEM_UNCORRECTABLE_CACHE_ERR, /** * FW sets this value to buttress_registers_s::boot_state when double exception happends */ IA_GOFO_FW_BOOT_STATE_CRIT_DOUBLE_EXCEPTION = IA_GOFO_DOUBLE_EXCEPTION_ERR, /** * HOST sets this value to buttress_registers_s::boot_state when it wants to signal FW to * Begin an orderly shutdown. This value is execptional as it is the only one that HOST * may write when firmware is active and is not really a state, but rather a command. * * IA_GOFO_FW_BOOT_STATE_SHUTDOWN_CMD may only be written by the host AFTER host ensures that * the FW state is IA_GOFO_FW_BOOT_STATE_READY. */ IA_GOFO_FW_BOOT_STATE_SHUTDOWN_CMD = 0x57A7F001U, /** * FW sets this value to buttress_registers_s::boot_state when it has begun a shutdown * sequence. * @note Informational/Debug only */ IA_GOFO_FW_BOOT_STATE_SHUTDOWN_START = 0x57A7E200U, /** * FW sets this value to buttress_registers_s::boot_state when it completes its shutdown * sequence. At this point, FW is halted. */ IA_GOFO_FW_BOOT_STATE_INACTIVE = 0x57A7E300U, /** * FW sets this value to buttress_registers_s::boot_state when it discovers hw timeout * on cb cmd execution. At this point, FW is halted. */ IA_GOFO_FW_BOOT_HW_CMD_ACK_TIMEOUT = 0x57A7E400U }; #endif /* __ASSEMBLER__ */ #endifipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/abi/ipu7_fw_common_abi.h000066400000000000000000000562141465530421300307330ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2020 - 2024 Intel Corporation */ #ifndef IPU7_FW_COMMOM_ABI_H #define IPU7_FW_COMMOM_ABI_H #include /** * Generic macro to trap size alignment violations at compile time * A violation will result in a compilation error. * @param struct_type The structure type to check * @param alignment Byte alignment to enforce */ #define CHECK_SIZE_ALIGNMENT(struct_type, alignment) do { \ const uint8_t arr[((sizeof(struct_type) % (alignment)) == 0U) ? 1 : -1]; (void)arr; \ } while (false) /** Macro to trap 16 bit size alignment violations at compile time */ #define CHECK_ALIGN16(struct_type) CHECK_SIZE_ALIGNMENT(struct_type, sizeof(uint16_t)) /** Macro to trap 32 bit size alignment violations at compile time */ #define CHECK_ALIGN32(struct_type) CHECK_SIZE_ALIGNMENT(struct_type, sizeof(uint32_t)) /** Macro to trap 64 bit size alignment violations at compile time */ #define CHECK_ALIGN64(struct_type) CHECK_SIZE_ALIGNMENT(struct_type, sizeof(uint64_t)) /** * Cache line size in bytes * @todo Should probably get cache line size from per-platform header */ #define IA_GOFO_CL_SIZE (64U) /** Macro to trap cache line alignment violations at compile time */ #define CHECK_ALIGN_CL(struct_type) CHECK_SIZE_ALIGNMENT(struct_type, IA_GOFO_CL_SIZE) /** * Memory page size in bytes * @todo Should probably get page size from per-platform header */ #define IA_GOFO_PAGE_SIZE (4096U) #define IA_GOFO_SRAM_PAGE_SIZE (256U) /** Macro to trap memory page violations at compile time */ #define CHECK_ALIGN_PAGE(struct_type) CHECK_SIZE_ALIGNMENT(struct_type, IA_GOFO_PAGE_SIZE) /** Compile time test of two constant values */ #define CHECK_EQUAL(lval, rval) do { const uint8_t arr[((lval) == (rval)) ? 1 : -1]; (void)arr; } while (false) /** Compile time test of two constant values */ #define CHECK_SMALLER_OR_EQUAL(lval, rval) do { const uint8_t arr[((lval) <= (rval)) ? 1 : -1]; (void)arr; } while (false) /** Test the size (in bytes) of a structure */ #define CHECK_SIZE(struct_type, size) CHECK_EQUAL(sizeof(struct_type), size) /** * @defgroup ia_gofo_common_abi Common Elements for IPU ABI's * The most basic and common ABI definitions used by a variety of ABI modules * @{ */ #pragma pack(push, 1) /** * @file * This file contains the most basic and common ABI definitions * used by Host and IPU firmware(s) for communication */ #ifndef IA_GOFO_PTR_AS_NATIVE /** * IPU address space pointer type. Note that it's size is likely different than * the host's own pointers. */ typedef uint32_t ia_gofo_addr_t; #else /** * IPU address space pointer type - defined to be the same as the host - useful * for simulated unit test or if somehow firmware and host code run on the same * processor. */ typedef uintptr_t ia_gofo_addr_t; #endif /** IPU NULL address */ #define IA_GOFO_ADDR_NULL (0U) /** * Host pointer (at integer) * @todo This could change if we work with small "host" */ typedef uint64_t host_addr_t; /** * Version structure for semantic versioning * See semver.org * In the IPU protocols, * the version is not valid if both major and minor components are BOTH zero. * That is, any variant of the pattern 0.0.x.x is not valid * @note Order of fields is optimized to allow comparison of the entire structure contents * as a little endien unsigned integer. * @see IA_GOFO_MSG_VERSION_INIT */ struct ia_gofo_version_s { /** Patch number. Not sure if we'll actually use this... */ uint8_t patch; /** Subminor version number. Change means a non-breaking bug fix. */ uint8_t subminor; /** Minor version number. Change means non-breaking change (still backwards compatible) */ uint8_t minor; /** Major version number. Change means a compatibility break */ uint8_t major; }; /** * Convenience macro to initialize the fields of a version structure * This macro is recommended for static initialization as initialization by order is * not intuitive due to the ordering of the fields. */ #define IA_GOFO_MSG_VERSION_INIT(major_val, minor_val, subminor_val, patch_val) \ {.major = (major_val), .minor = (minor_val), .subminor = (subminor_val), .patch = (patch_val)} /** Convenience macro to determine if a version value is valid */ #define IA_GOFO_MSG_VERSION_IS_VALID(version_struct) \ (!((version_struct).major == 0U) && ((version_struct).minor == 0U)) /** * Maximum nuber of versions in the version list * @see ia_gofo_msg_version_list */ #define IA_GOFO_MSG_VERSION_LIST_MAX_ENTRIES (3U) /** Maximum nuber of versions in the version list */ #define IA_GOFO_MSG_RESERVED_SIZE (3U) /** List of versions */ struct ia_gofo_msg_version_list { /** Number of elements in versions field */ uint8_t num_versions; /** Reserved for alignment. Set to zero. */ uint8_t reserved[IA_GOFO_MSG_RESERVED_SIZE]; /** Version array. */ struct ia_gofo_version_s versions[IA_GOFO_MSG_VERSION_LIST_MAX_ENTRIES]; }; #pragma pack(pop) /** @} */ /** Compile time checks only - this function is not to be called! */ static inline void ia_gofo_common_abi_test_func(void) { CHECK_ALIGN32(struct ia_gofo_version_s); CHECK_ALIGN32(struct ia_gofo_msg_version_list); } /** * Generic macro to trap size alignment violations at compile time * A violation will result in a compilation error. * @param struct_type The structure type to check * @param alignment Byte alignment to enforce */ #define CHECK_SIZE_ALIGNMENT(struct_type, alignment) do { \ const uint8_t arr[((sizeof(struct_type) % (alignment)) == 0U) ? 1 : -1]; (void)arr; \ } while (false) /** Macro to trap 16 bit size alignment violations at compile time */ #define CHECK_ALIGN16(struct_type) CHECK_SIZE_ALIGNMENT(struct_type, sizeof(uint16_t)) /** Macro to trap 32 bit size alignment violations at compile time */ #define CHECK_ALIGN32(struct_type) CHECK_SIZE_ALIGNMENT(struct_type, sizeof(uint32_t)) /** Macro to trap 64 bit size alignment violations at compile time */ #define CHECK_ALIGN64(struct_type) CHECK_SIZE_ALIGNMENT(struct_type, sizeof(uint64_t)) /** * Cache line size in bytes * @todo Should probably get cache line size from per-platform header */ #define IA_GOFO_CL_SIZE (64U) /** Macro to trap cache line alignment violations at compile time */ #define CHECK_ALIGN_CL(struct_type) CHECK_SIZE_ALIGNMENT(struct_type, IA_GOFO_CL_SIZE) /** * Memory page size in bytes * @todo Should probably get page size from per-platform header */ #define IA_GOFO_PAGE_SIZE (4096U) #define IA_GOFO_SRAM_PAGE_SIZE (256U) /** Macro to trap memory page violations at compile time */ #define CHECK_ALIGN_PAGE(struct_type) CHECK_SIZE_ALIGNMENT(struct_type, IA_GOFO_PAGE_SIZE) /** Compile time test of two constant values */ #define CHECK_EQUAL(lval, rval) do { const uint8_t arr[((lval) == (rval)) ? 1 : -1]; (void)arr; } while (false) /** Compile time test of two constant values */ #define CHECK_SMALLER_OR_EQUAL(lval, rval) do { const uint8_t arr[((lval) <= (rval)) ? 1 : -1]; (void)arr; } while (false) /** Test the size (in bytes) of a structure */ #define CHECK_SIZE(struct_type, size) CHECK_EQUAL(sizeof(struct_type), size) /** Reserved TLV types for all parsing contexts */ #define TLV_TYPE_PADDING (0U) #pragma pack(push, 1) /** Applies the sizeof operator to a member field */ #define IA_GOFO_ABI_SIZEOF_FIELD(struct_type, field) (sizeof(((struct_type *)0)->field)) /** * Number of bits in a byte. Bytes here assumed to be octects. * This macro is for clarity to make expressions more expressive */ #define IA_GOFO_ABI_BITS_PER_BYTE (8U) /** * Header for type-length-value fields * Placement must be aligned to TLV_MSG_ALIGNMENT * Total length must be a multiple of TLV_ITEM_ALIGNMENT bytes * (and not a multiple of the the stricter TLV_MSG_ALIGNMENT). */ struct ia_gofo_tlv_header { /** Type of this field. Subject to parsing context. */ uint16_t tlv_type; /** * Size of this field in TLV_ITEM_ALIGNMENT elements, such that the base of this * structure + (tlv_len32 * TLV_ITEM_ALIGNMENT) will be the next tlv field in a list * * See TLV_MAX_LEN and TLV_ITEM_ALIGNMENT */ uint16_t tlv_len32; }; /** * Maximum number of *bytes* a TLV item can contain * Note: there is (uint32_t) cast for (1U) as WA for the KW checker * MISRA.SHIFT.RANGE.201 which interprets 1U's type as unsigned char rather * than unsigned int. */ #define TLV_MAX_LEN \ ((((uint32_t)(1U)) << (IA_GOFO_ABI_SIZEOF_FIELD(struct ia_gofo_tlv_header, tlv_len32) * IA_GOFO_ABI_BITS_PER_BYTE)) \ * TLV_ITEM_ALIGNMENT) /** * Root of a list of TLV fields * See TLV_LIST_ALIGNMENT */ struct ia_gofo_tlv_list { /** Number of elements in the TLV list */ uint16_t num_elems; /** * Offset from the base of the *this* root structure (not the surrounding * structure), in bytes, pointing to the first element in the TLV list * @note Must be aligned to TLV_LIST_ALIGNMENT */ uint16_t head_offset; }; /** * Minimum alignment for all TLV items, both for starting address and size. * But see also TLV_MSG_ALIGNMENT */ #define TLV_ITEM_ALIGNMENT ((uint32_t)sizeof(uint32_t)) /** * Minimum alignment for all top level TLV items (messages), both for starting address and size * This is needed because those items are allowed to have 64 bit integers within and we need to * remove the possibility of misaligning the fields of an othewise correctly built structure. * */ #define TLV_MSG_ALIGNMENT ((uint32_t)sizeof(uint64_t)) /** * Alignment required for both start and total size of a TLV list * * All TLV lists head_offset must conform to alignment AND * all TLV lists must be padded with zero's after the end of the last item of the list * to this same alignment to ensure that any following TLV items are also * aligned without having to think about it too much. * * The alignment value is chosen to be general (strict) enough for all TLV requirements. * This is a potentially a bit wasteful, but prevents issues * that could arise. Assuming here that we don't have that many lists in a single message * and/or that the items they contain result in a more or less aligned list size anyway. */ #define TLV_LIST_ALIGNMENT TLV_ITEM_ALIGNMENT #pragma pack(pop) /** Compile time checks only - this function is not to be called! */ static inline void ia_gofo_msg_tlv_test_func(void) { CHECK_ALIGN16(struct ia_gofo_tlv_header); CHECK_ALIGN32(struct ia_gofo_tlv_list); } #ifndef IA_GOFO_MODULO /** * Simple modulo. Required due to way some assert macro's work where the expression * is string-ified and then the percent sign could be erroneously interpreted as part of a * format string. */ #define IA_GOFO_MODULO(dividend, divisor) ((dividend) % (divisor)) #endif #ifndef IA_GOFO_ASSERT /** Empty definition to allow headers to compile if NO_IA_GOFO_ABI_PLATFORM_DEPS is defined */ #define IA_GOFO_ASSERT(expr) #endif /** Number of elements in ia_gofo_msg_err::err_detail */ #define IA_GOFO_MSG_ERR_MAX_DETAILS (4U) /** All uses of ia_gofo_msg_err must define err_code such that value 0 is not an error */ #define IA_GOFO_MSG_ERR_OK (0U) /** * This value is reserved, but should never be sent on the ABI * Used as a placeholder for defensive programming techniques * that set error "NOT_OK" state as default and only set "OK" when that * can be verified. * * If received in an ABI message, it is likely a bug in the programming. */ #define IA_GOFO_MSG_ERR_UNSPECIFED (0xFFFFFFFFU) /** * All uses of ia_gofo_msg_err must define err_code such that this value is reserved. * Companion value to IA_GOFO_MSG_ERR_UNSPECIFED */ #define IA_GOFO_MSG_ERR_GROUP_UNSPECIFIED (0U) /** Quick test for error code */ #define IA_GOFO_MSG_ERR_IS_OK(err) (IA_GOFO_MSG_ERR_OK == (err).err_code) #pragma pack(push, 1) /** * IPU messaging error structure, used to return errors or generic results in * ACK/DONE responses. * * This structure is generic and can be used in different contexts where the * meaning of the rest of the fields is dependent on err_group. */ struct ia_gofo_msg_err { /** Grouping of the error code. @note Groups are domain specific. */ uint32_t err_group; /** * Main error code. * Numbering depends on err_group, BUT zero must always mean OK (NO ERROR) * See IA_GOFO_MSG_ERR_OK */ uint32_t err_code; /** Error parameters. Meaning depends on err_group and err_code */ uint32_t err_detail[IA_GOFO_MSG_ERR_MAX_DETAILS]; }; #pragma pack(pop) /** * Application extension starting group ID * * Applications may define their own group ID's starting at this value. */ #define IA_GOFO_MSG_ERR_GROUP_APP_EXT_START (16U) /** * Maximum group ID that will be on the ABI. Defining this allows applications the option * to use higher values internally with a shared error infrastructure. */ #define IA_GOFO_MSG_ERR_GROUP_MAX (31U) /** See IA_GOFO_MSG_ERR_GROUP_MAX */ #define IA_GOFO_MSG_ERR_GROUP_INTERNAL_START (IA_GOFO_MSG_ERR_GROUP_MAX + 1U) /** @} */ /** Compile time checks only - this function is not to be called! */ static inline void ia_gofo_msg_err_test_func(void) { CHECK_ALIGN64(struct ia_gofo_msg_err); } /** * @file * @brief This file defines error groups and codes that are are common across * multiple domains. * * Error codes related to a specific message may be found in that message's * structure header. * * @addtogroup ia_gofo_msg_abi * @{ */ /** Reserved value. Used only with IA_GOFO_MSG_ERR_OK & IA_GOFO_MSG_ERR_UNSPECIFED */ #define IA_GOFO_MSG_ERR_GROUP_RESERVED IA_GOFO_MSG_ERR_GROUP_UNSPECIFIED /** Generic message errors that aren't specific to another group or IP */ #define IA_GOFO_MSG_ERR_GROUP_GENERAL 1 /** Error detail enumeration for error group IA_GOFO_MSG_ERR_GROUP_GENERAL */ enum ia_gofo_msg_err_general { /** No error */ IA_GOFO_MSG_ERR_GENERAL_OK = IA_GOFO_MSG_ERR_OK, /** * Message was smaller than expected for its type. err_detail[0] is the size, in bytes, * received. err_detail[1] is the minimum required */ IA_GOFO_MSG_ERR_GENERAL_MSG_TOO_SMALL = 1, /** * Message was larger than can be supported. err_detail[0] is the size, in bytes, * received. err_detail[1] is the maximum supported */ IA_GOFO_MSG_ERR_GENERAL_MSG_TOO_LARGE = 2, /** * State machine error. Messge received doesn't fit with device state. * err_detail[0] will be the actual device state. */ IA_GOFO_MSG_ERR_GENERAL_DEVICE_STATE = 3, /** * An item in the message is not aligned. err_detail[0] is the offending item's offset * into the message. err_detail[1] is the required alignment. */ IA_GOFO_MSG_ERR_GENERAL_ALIGNMENT = 4, /** * The referenced address in an indirect message is invalid. err_detail[0] is the * offending address. */ IA_GOFO_MSG_ERR_GENERAL_INDIRECT_REF_PTR_INVALID = 5, /** * The message type in the header is invalid. err_detail[0] is the received type. */ IA_GOFO_MSG_ERR_GENERAL_INVALID_MSG_TYPE = 6, /** * The header is NULL. May happen because of an error in SYSCOM, * e.g. NULL queue parameter or reading fail. * No err_detail because there was no data to parse. */ IA_GOFO_MSG_ERR_GENERAL_SYSCOM_FAIL = 7, /** Size of enumeration */ IA_GOFO_MSG_ERR_GENERAL_N }; #pragma pack(push, 1) /** Type 0 is padding, which is irrelevant here */ #define IA_GOFO_MSG_TYPE_RESERVED 0 /** See ia_gofo_msg_indirect */ #define IA_GOFO_MSG_TYPE_INDIRECT 1 /** See ia_gofo_msg_log */ #define IA_GOFO_MSG_TYPE_LOG 2 /** * This type is for returning general errors, * such as header errors and SYSCOM failures */ #define IA_GOFO_MSG_TYPE_GENERAL_ERR 3 /** Generic header for all message types */ struct ia_gofo_msg_header { /** All messages are top level TLV "items" */ struct ia_gofo_tlv_header tlv_header; /** Additional information. Option type ID's are scoped to the message type */ struct ia_gofo_tlv_list msg_options; /** * Set by SW for commands, * and same will be returned by FW in that command's response(s) */ uint64_t user_token; }; /** Generic header for all the ack message types */ struct ia_gofo_msg_header_ack { /** Common ABI message header. Type depends on the kind of response message. */ struct ia_gofo_msg_header header; /** * Generic error structure. err_header.error_code == 0 means ACK, * all others are an error */ struct ia_gofo_msg_err err; }; /** * Generic error message with its own message type. * Dedicated for reporting general message errors, such as invalid message type in the header. */ struct ia_gofo_msg_general_err { /** Common ack header */ struct ia_gofo_msg_header_ack header; }; #pragma pack(pop) /** @} */ /** Compile time checks only - this function is not to be called! */ static inline void ia_gofo_msg_header_test_func(void) { CHECK_ALIGN64(struct ia_gofo_msg_header); CHECK_ALIGN64(struct ia_gofo_msg_header_ack); CHECK_ALIGN64(struct ia_gofo_msg_general_err); } #pragma pack(push, 1) /** Data movement mechanisms between producer and consumer */ enum ia_gofo_msg_link_streaming_mode { /** * Static Offline. * This value is required to be set when FW is responsible to manage the data flow * accros a link without streaming hardware support. * * If SW manages the timing of this data flow by holding back task requests, it * should not define as a link on this interface. */ IA_GOFO_MSG_LINK_STREAMING_MODE_SOFF = 0, /** Dynamic Offline */ IA_GOFO_MSG_LINK_STREAMING_MODE_DOFF = 1, /** Buffer chasing large memory */ IA_GOFO_MSG_LINK_STREAMING_MODE_BCLM = 2, /** Buffer chasing small memory with fix starting point */ IA_GOFO_MSG_LINK_STREAMING_MODE_BCSM_FIX = 3, /** Number of items in this enumeration */ IA_GOFO_MSG_LINK_STREAMING_MODE_N }; /** * SOC PBKs. * Enumeration of all HW PBKs instances which are external to specifics IPs. * Those PBK used for cross IPs connection. */ enum ia_gofo_soc_pbk_instance_id { IA_GOFO_SOC_PBK_ID0 = 0, IA_GOFO_SOC_PBK_ID1 = 1, IA_GOFO_SOC_PBK_ID_N }; /** Maxinum number of PBK slots that may be allocated per link */ #define IA_GOFO_MSG_LINK_PBK_MAX_SLOTS (2U) #pragma pack(pop) #pragma pack(push, 1) /** * Short generic message to redirect message parsing somewhere else where the real * message is located. */ struct ia_gofo_msg_indirect { /** * Common ABI message header. Type will be IA_GOFO_MSG_TYPE_INDIRECT * Exceptionally for this message type, user_data is reserved and must be set to zero. * The user_data of the referenced message may be used as usual. */ struct ia_gofo_msg_header header; /** Copy of the TLV part of the message header that this one refers to. */ struct ia_gofo_tlv_header ref_header; /** * Pointer to the actual message header that this message refers to. * Header must be of type TLV_HEADER_TYPE_32BIT. * and alignment must be to TLV_MSG_ALIGNMENT */ ia_gofo_addr_t ref_msg_ptr; }; #pragma pack(pop) /** @} */ /** Compile time checks only - this function is not to be called! */ static inline void ia_gofo_msg_indirect_test_func(void) { CHECK_ALIGN64(struct ia_gofo_msg_indirect); } #pragma pack(push, 1) /** Maximum number of additional parameters per log message */ #define IA_GOFO_MSG_LOG_MAX_PARAMS (4U) /** Reserved format ID range minimum for documented "official" (non-debug) messages */ #define IA_GOFO_MSG_LOG_DOC_FMT_ID_MIN (0U) /** Reserved format ID range maximum for documented "official" (non-debug) messages */ #define IA_GOFO_MSG_LOG_DOC_FMT_ID_MAX (4095U) /** Reserved format ID for invalid state */ #define IA_GOFO_MSG_LOG_FMT_ID_INVALID (0xFFFFFFFU) /** * @brief Log message information structure. Contains the details of a log message * with the exception of the message timestamp. * * The timestamp field is not included to allow an *option* for reuse internally in * firmware with those channels where hardware prepends the timestamp automatically. * * @see ia_gofo_msg_log_info_ts */ struct ia_gofo_msg_log_info { /** * Running counter of log messages. * Helps detect lost messages */ uint16_t log_counter; /** * Indicates the types of the parameters. * 64 or 32 bits parameters. */ uint8_t msg_parameter_types; /** * messages which can't be printed (due to limition like printing from irqs) * will be queued in inner FW OS queue and printed later. * mark those messages as out of order. * Note: the log can be sorted by ts to get order log. * ** Messages which printed before the channel (for example syscom) * init will be queued until the channel will be ready * ** Messages which can't be printed due to the channel limitions * (for example syscom can't print from irq) will be queued and printed from idle thread */ uint8_t is_out_of_order; /** * Unique identifier of log message. * If the ID is between the range of * * IA_GOFO_MSG_LOG_DOC_FMT_ID_MIN =< fmt_id =< IA_GOFO_MSG_LOG_DOC_FMT_ID_MAX, * it is a documented "official" (i.e. non-debug) log messages and the * fmt_id is then a key to a documented string table defined per src_id. * otherwise, the message is a "debug" message and the ID needs to be * resolved against a dictionary generated at build time. * Also see IA_GOFO_MSG_LOG_FMT_ID_INVALID */ uint32_t fmt_id; /** * Log message parameters. * Can be combined with a string lookup * based on the fmt_id for readable output */ uint32_t params[IA_GOFO_MSG_LOG_MAX_PARAMS]; }; /** * @brief Log message information structure. Contains the details of a log message * including the log timestamp. * * This structure contains the payload information for IA_GOFO log messages, but without * any message header. * * This structure is separate from the containing message (See ia_gofo_msg_log) to allow * its reuse with other log channels, for example a hardware logging unit, that have a * different message header (or none at all) than the ia_gofo_msg_header used in * ia_gofo_msg_log. */ struct ia_gofo_msg_log_info_ts { /** * Message Timestamp. Should be set as close as possible to whatever * triggered the log message. * Units are at the discretion of the sender, but the time source must be * monotonously increasing and no two messages in-flight at the same time * may have the same timestamp. * @todo Optimally, the time source will be synchronized * with other debug sources, such as the hardware trace unit. */ uint64_t msg_ts; /** Log message information */ struct ia_gofo_msg_log_info log_info; }; /** * Log reporting message, sent from IPU to host * * @note The log message severity and source ID's are part of the log meta-data referenced by the * fmt_id and not expressed explicitly in this log message. */ struct ia_gofo_msg_log { /** * Common ABI message header. * Type will be IA_GOFO_MSG_TYPE_DEV_LOG */ struct ia_gofo_msg_header header; /** Log message information including a timestamp */ struct ia_gofo_msg_log_info_ts log_info_ts; }; #pragma pack(pop) /** @} */ /** Compile time checks only - this function is not to be called! */ static inline void ia_gofo_msg_log_test_func(void) { CHECK_ALIGN64(struct ia_gofo_msg_log); CHECK_ALIGN64(struct ia_gofo_msg_log_info); CHECK_ALIGN64(struct ia_gofo_msg_log_info_ts); } /** * Output queue for acknowledge/done messages * This queue is opened by firmware at startup */ #define IA_GOFO_MSG_ABI_OUT_ACK_QUEUE_ID (0U) /** * Output queue for log messages, including error messages * This queue is opened by firmware at startup */ #define IA_GOFO_MSG_ABI_OUT_LOG_QUEUE_ID (1U) /** * Input queue for device level messages * This queue is opened by firmware at startup */ #define IA_GOFO_MSG_ABI_IN_DEV_QUEUE_ID (2U) #endifipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/abi/ipu7_fw_config_abi.h000066400000000000000000000011441465530421300307000ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2021 - 2024 Intel Corporation */ #ifndef IPU7_FW_CONFIG_ABI_H #define IPU7_FW_CONFIG_ABI_H #include #define IPU_CONFIG_ABI_WDT_TIMER_DISABLED 0U #define IPU_CONFIG_ABI_CMD_TIMER_DISABLED 0U /** HW watchdog configuration */ struct ipu7_wdt_abi { /** * HW WDT timer#1 timeout value, provided in uSec. * If value of zero used WDT will be disabled. */ uint32_t wdt_timer1_us; /** * HW WDT timer#2 timeout value, provided in uSec. * If value of zero used WDT will be disabled. */ uint32_t wdt_timer2_us; }; #endif ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/abi/ipu7_fw_insys_config_abi.h000066400000000000000000000010641465530421300321260ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2021 - 2024 Intel Corporation */ #ifndef IPU7_FW_INSYS_CONFIG_ABI_H #define IPU7_FW_INSYS_CONFIG_ABI_H #include "ipu7_fw_config_abi.h" #include "ipu7_fw_boot_abi.h" #include "ipu7_fw_isys_abi.h" /** * INSYS specific config. */ struct ipu7_insys_config { /** * timeout val in millisecond. */ uint32_t timeout_val_ms; /** @see ia_gofo_boot_config.subsys_config */ struct ia_gofo_logger_config logger_config; /** HW watchdog configuration */ struct ipu7_wdt_abi wdt_config; }; #endif ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/abi/ipu7_fw_isys_abi.h000066400000000000000000001126151465530421300304300ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* Copyright (C) 2020 - 2024 Intel Corporation */ #ifndef IPU7_FW_ISYS_ABI_H #define IPU7_FW_ISYS_ABI_H #include "ipu7_fw_common_abi.h" #include "ipu7_fw_isys_abi.h" /** INYSYS queue defs - general */ /** MSG, LOG, RESERVED output queues */ #define IPU_INSYS_MAX_OUTPUT_QUEUES (3U) /** Max number of supported virtual streams */ #define IPU_INSYS_STREAM_ID_MAX (16U) /** DEV + MSG (for each stream) */ #define IPU_INSYS_MAX_INPUT_QUEUES (IPU_INSYS_STREAM_ID_MAX + 1U) /** INSYS queue defs - output queues */ #define IPU_INSYS_OUTPUT_FIRST_QUEUE (0U) #define IPU_INSYS_OUTPUT_LAST_QUEUE (IPU_INSYS_MAX_OUTPUT_QUEUES - 1U) /** OUT queues */ #define IPU_INSYS_OUTPUT_MSG_QUEUE (IPU_INSYS_OUTPUT_FIRST_QUEUE) #define IPU_INSYS_OUTPUT_LOG_QUEUE (IPU_INSYS_OUTPUT_FIRST_QUEUE + 1U) #define IPU_INSYS_OUTPUT_RESERVED_QUEUE (IPU_INSYS_OUTPUT_LAST_QUEUE) /** INSYS queue defs - input queues */ #define IPU_INSYS_INPUT_FIRST_QUEUE (IPU_INSYS_MAX_OUTPUT_QUEUES) #define IPU_INSYS_INPUT_LAST_QUEUE (IPU_INSYS_INPUT_FIRST_QUEUE + IPU_INSYS_MAX_INPUT_QUEUES - 1U) /** IN queues */ #define IPU_INSYS_INPUT_DEV_QUEUE (IPU_INSYS_INPUT_FIRST_QUEUE) #define IPU_INSYS_INPUT_MSG_QUEUE (IPU_INSYS_INPUT_FIRST_QUEUE + 1U) #define IPU_INSYS_INPUT_MSG_MAX_QUEUE (IPU_INSYS_MAX_INPUT_QUEUES - 1U) /** Max number of Input/Output Pins */ #define MAX_IPINS (4U) #define MAX_OPINS (4U) /** HW limitation for max number of OPINS for a single IPIN */ #define MAX_OPINS_FOR_SINGLE_IPINS (3U) /* * Consider 1 slot per stream since driver is not expected to pipeline * device commands for the same stream */ #define DEV_SEND_QUEUE_SIZE (IPU_INSYS_STREAM_ID_MAX) /** Max number of planes for frame formats supported by the FW */ #define PIN_PLANES_MAX (4U) /* * ipu7_insys_return_token: data item of exacly 8 bytes (64 bits) * which can be used to pass a return token back to the host */ typedef uint64_t ipu7_insys_return_token; /** Response type enumeration for response messages to commands (FW to SW) */ enum ipu7_insys_resp_type { IPU_INSYS_RESP_TYPE_STREAM_OPEN_DONE = 0, IPU_INSYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK = 1, IPU_INSYS_RESP_TYPE_STREAM_CAPTURE_ACK = 2, IPU_INSYS_RESP_TYPE_STREAM_ABORT_ACK = 3, IPU_INSYS_RESP_TYPE_STREAM_FLUSH_ACK = 4, IPU_INSYS_RESP_TYPE_STREAM_CLOSE_ACK = 5, IPU_INSYS_RESP_TYPE_PIN_DATA_READY = 6, IPU_INSYS_RESP_TYPE_FRAME_SOF = 7, IPU_INSYS_RESP_TYPE_FRAME_EOF = 8, IPU_INSYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE = 9, IPU_INSYS_RESP_TYPE_STREAM_CAPTURE_DONE = 10, IPU_INSYS_RESP_TYPE_PWM_IRQ = 11, N_IPU_INSYS_RESP_TYPE }; /** Command type enumeration for command messages (SW to FW) */ enum ipu7_insys_send_type { IPU_INSYS_SEND_TYPE_STREAM_OPEN = 0, IPU_INSYS_SEND_TYPE_STREAM_START_AND_CAPTURE = 1, IPU_INSYS_SEND_TYPE_STREAM_CAPTURE = 2, IPU_INSYS_SEND_TYPE_STREAM_ABORT = 3, IPU_INSYS_SEND_TYPE_STREAM_FLUSH = 4, IPU_INSYS_SEND_TYPE_STREAM_CLOSE = 5, N_IPU_INSYS_SEND_TYPE }; /** * @brief Number of VCs supported port. Equal for all ports. * MIPI csi2 spec 2.0 supports up to 16 virtual per physical channel */ enum ipu7_insys_mipi_vc { IPU_INSYS_MIPI_VC_0 = 0, IPU_INSYS_MIPI_VC_1 = 1, IPU_INSYS_MIPI_VC_2 = 2, IPU_INSYS_MIPI_VC_3 = 3, IPU_INSYS_MIPI_VC_4 = 4, IPU_INSYS_MIPI_VC_5 = 5, IPU_INSYS_MIPI_VC_6 = 6, IPU_INSYS_MIPI_VC_7 = 7, IPU_INSYS_MIPI_VC_8 = 8, IPU_INSYS_MIPI_VC_9 = 9, IPU_INSYS_MIPI_VC_10 = 10, IPU_INSYS_MIPI_VC_11 = 11, IPU_INSYS_MIPI_VC_12 = 12, IPU_INSYS_MIPI_VC_13 = 13, IPU_INSYS_MIPI_VC_14 = 14, IPU_INSYS_MIPI_VC_15 = 15, N_IPU_INSYS_MIPI_VC }; /** * @brief Number of ports supported . */ enum ipu7_insys_mipi_port { IPU_INSYS_MIPI_PORT_0 = 0, IPU_INSYS_MIPI_PORT_1 = 1, IPU_INSYS_MIPI_PORT_2 = 2, IPU_INSYS_MIPI_PORT_3 = 3, IPU_INSYS_MIPI_PORT_4 = 4, IPU_INSYS_MIPI_PORT_5 = 5, NA_IPU_INSYS_MIPI_PORT }; /** Supported Pixel Frame formats. Expandable if needed */ enum ipu7_insys_frame_format_type { IPU_INSYS_FRAME_FORMAT_NV11 = 0,/**< 12 bit YUV 411, Y, UV plane */ IPU_INSYS_FRAME_FORMAT_NV12 = 1,/**< 12 bit YUV 420, Y, UV plane */ IPU_INSYS_FRAME_FORMAT_NV12_16 = 2,/**< 16 bit YUV 420, Y, UV plane */ IPU_INSYS_FRAME_FORMAT_NV12_TILEY = 3,/**< 12 bit YUV 420, Intel * proprietary tiled format, * TileY */ IPU_INSYS_FRAME_FORMAT_NV16 = 4,/**< 16 bit YUV 422, Y, UV plane */ IPU_INSYS_FRAME_FORMAT_NV21 = 5,/**< 12 bit YUV 420, Y, VU plane */ IPU_INSYS_FRAME_FORMAT_NV61 = 6,/**< 16 bit YUV 422, Y, VU plane */ IPU_INSYS_FRAME_FORMAT_YV12 = 7,/**< 12 bit YUV 420, Y, V, U plane */ IPU_INSYS_FRAME_FORMAT_YV16 = 8,/**< 16 bit YUV 422, Y, V, U plane */ IPU_INSYS_FRAME_FORMAT_YUV420 = 9,/**< 12 bit YUV 420, Y, U, V plane */ IPU_INSYS_FRAME_FORMAT_YUV420_10 = 10,/**< yuv420, 10 bits per subpixel */ IPU_INSYS_FRAME_FORMAT_YUV420_12 = 11,/**< yuv420, 12 bits per subpixel */ IPU_INSYS_FRAME_FORMAT_YUV420_14 = 12,/**< yuv420, 14 bits per subpixel */ IPU_INSYS_FRAME_FORMAT_YUV420_16 = 13,/**< yuv420, 16 bits per subpixel */ IPU_INSYS_FRAME_FORMAT_YUV422 = 14,/**< 16 bit YUV 422, Y, U, V plane */ IPU_INSYS_FRAME_FORMAT_YUV422_16 = 15,/**< yuv422, 16 bits per subpixel */ IPU_INSYS_FRAME_FORMAT_UYVY = 16,/**< 16 bit YUV 422, UYVY interleaved */ IPU_INSYS_FRAME_FORMAT_YUYV = 17,/**< 16 bit YUV 422, YUYV interleaved */ IPU_INSYS_FRAME_FORMAT_YUV444 = 18,/**< 24 bit YUV 444, Y, U, V plane */ IPU_INSYS_FRAME_FORMAT_YUV_LINE = 19,/**< Internal format, 2 y lines * followed by a uvinterleaved line */ IPU_INSYS_FRAME_FORMAT_RAW8 = 20, /**< RAW8, 1 plane */ IPU_INSYS_FRAME_FORMAT_RAW10 = 21, /**< RAW10, 1 plane */ IPU_INSYS_FRAME_FORMAT_RAW12 = 22, /**< RAW12, 1 plane */ IPU_INSYS_FRAME_FORMAT_RAW14 = 23, /**< RAW14, 1 plane */ IPU_INSYS_FRAME_FORMAT_RAW16 = 24, /**< RAW16, 1 plane */ IPU_INSYS_FRAME_FORMAT_RGB565 = 25,/**< 16 bit RGB, 1 plane. Each 3 sub * pixels are packed into one 16 bit * value, 5 bits for R, 6 bits for G * and 5 bits for B. */ IPU_INSYS_FRAME_FORMAT_PLANAR_RGB888 = 26, /**< 24 bit RGB, 3 planes */ IPU_INSYS_FRAME_FORMAT_RGBA888 = 27,/**< 32 bit RGBA, 1 plane, * A=Alpha (alpha is unused) */ IPU_INSYS_FRAME_FORMAT_QPLANE6 = 28,/**< Internal, for advanced ISP */ IPU_INSYS_FRAME_FORMAT_BINARY_8 = 29,/**< byte stream, used for jpeg. */ N_IPU_INSYS_FRAME_FORMAT }; /** Temporary for driver compatibility */ #define IPU_INSYS_FRAME_FORMAT_RAW (IPU_INSYS_FRAME_FORMAT_RAW16) /** * Supported MIPI data type. Keep in sync array in ia_css_isys_private.c */ enum ipu7_insys_mipi_data_type { /** SYNCHRONIZATION SHORT PACKET DATA TYPES */ IPU_INSYS_MIPI_DATA_TYPE_FRAME_START_CODE = 0x00, IPU_INSYS_MIPI_DATA_TYPE_FRAME_END_CODE = 0x01, IPU_INSYS_MIPI_DATA_TYPE_LINE_START_CODE = 0x02, /* Optional */ IPU_INSYS_MIPI_DATA_TYPE_LINE_END_CODE = 0x03, /* Optional */ /** Reserved 0x04-0x07 */ IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x04 = 0x04, IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x05 = 0x05, IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x06 = 0x06, IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x07 = 0x07, /** GENERIC SHORT PACKET DATA TYPES * They are used to keep the timing information for the * opening/closing of shutters, triggering of flashes and etc. */ /** Generic Short Packet Code 1 */ IPU_INSYS_MIPI_DATA_TYPE_GENERIC_SHORT1 = 0x08, /** Generic Short Packet Code 2 */ IPU_INSYS_MIPI_DATA_TYPE_GENERIC_SHORT2 = 0x09, /** Generic Short Packet Code 3 */ IPU_INSYS_MIPI_DATA_TYPE_GENERIC_SHORT3 = 0x0A, /** Generic Short Packet Code 4 */ IPU_INSYS_MIPI_DATA_TYPE_GENERIC_SHORT4 = 0x0B, /** Generic Short Packet Code 5 */ IPU_INSYS_MIPI_DATA_TYPE_GENERIC_SHORT5 = 0x0C, /** Generic Short Packet Code 6 */ IPU_INSYS_MIPI_DATA_TYPE_GENERIC_SHORT6 = 0x0D, /** Generic Short Packet Code 7 */ IPU_INSYS_MIPI_DATA_TYPE_GENERIC_SHORT7 = 0x0E, /** Generic Short Packet Code 8 */ IPU_INSYS_MIPI_DATA_TYPE_GENERIC_SHORT8 = 0x0F, /** GENERIC LONG PACKET DATA TYPES */ IPU_INSYS_MIPI_DATA_TYPE_NULL = 0x10, IPU_INSYS_MIPI_DATA_TYPE_BLANKING_DATA = 0x11, /** Embedded 8-bit non Image Data */ IPU_INSYS_MIPI_DATA_TYPE_EMBEDDED = 0x12, /** Reserved 0x13-0x17 */ IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x13 = 0x13, IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x14 = 0x14, IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x15 = 0x15, IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x16 = 0x16, IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x17 = 0x17, /** YUV DATA TYPES */ /** 8 bits per subpixel */ IPU_INSYS_MIPI_DATA_TYPE_YUV420_8 = 0x18, /** 10 bits per subpixel */ IPU_INSYS_MIPI_DATA_TYPE_YUV420_10 = 0x19, /** 8 bits per subpixel */ IPU_INSYS_MIPI_DATA_TYPE_YUV420_8_LEGACY = 0x1A, /** Reserved 0x1B */ IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x1B = 0x1B, /** YUV420 8-bit (Chroma Shifted Pixel Sampling) */ IPU_INSYS_MIPI_DATA_TYPE_YUV420_8_SHIFT = 0x1C, /** YUV420 10-bit (Chroma Shifted Pixel Sampling) */ IPU_INSYS_MIPI_DATA_TYPE_YUV420_10_SHIFT = 0x1D, /** UYVY..UVYV, 8 bits per subpixel */ IPU_INSYS_MIPI_DATA_TYPE_YUV422_8 = 0x1E, /** UYVY..UVYV, 10 bits per subpixel */ IPU_INSYS_MIPI_DATA_TYPE_YUV422_10 = 0x1F, /** RGB DATA TYPES */ IPU_INSYS_MIPI_DATA_TYPE_RGB_444 = 0x20, /** BGR..BGR, 5 bits per subpixel */ IPU_INSYS_MIPI_DATA_TYPE_RGB_555 = 0x21, /** BGR..BGR, 5 bits B and R, 6 bits G */ IPU_INSYS_MIPI_DATA_TYPE_RGB_565 = 0x22, /** BGR..BGR, 6 bits per subpixel */ IPU_INSYS_MIPI_DATA_TYPE_RGB_666 = 0x23, /** BGR..BGR, 8 bits per subpixel */ IPU_INSYS_MIPI_DATA_TYPE_RGB_888 = 0x24, /** Reserved 0x25-0x27 */ IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x25 = 0x25, IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x26 = 0x26, IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x27 = 0x27, /** RAW DATA TYPES */ /** RAW data, 6 bits per pixel */ IPU_INSYS_MIPI_DATA_TYPE_RAW_6 = 0x28, /** RAW data, 7 bits per pixel */ IPU_INSYS_MIPI_DATA_TYPE_RAW_7 = 0x29, /** RAW data, 8 bits per pixel */ IPU_INSYS_MIPI_DATA_TYPE_RAW_8 = 0x2A, /** RAW data, 10 bits per pixel */ IPU_INSYS_MIPI_DATA_TYPE_RAW_10 = 0x2B, /** RAW data, 12 bits per pixel */ IPU_INSYS_MIPI_DATA_TYPE_RAW_12 = 0x2C, /** RAW data, 14 bits per pixel */ IPU_INSYS_MIPI_DATA_TYPE_RAW_14 = 0x2D, /** Reserved 0x2E-2F are used with assigned meaning */ /** RAW data, 16 bits per pixel, not specified in CSI-MIPI standard */ IPU_INSYS_MIPI_DATA_TYPE_RAW_16 = 0x2E, /** Binary byte stream, which is target at JPEG, not specified in * CSI-MIPI standard */ IPU_INSYS_MIPI_DATA_TYPE_BINARY_8 = 0x2F, /** Should be RAW20 **/ /** USER DEFINED 8-BIT DATA TYPES */ /** For example, the data transmitter (e.g. the SoC sensor) can keep * the JPEG data as the User Defined Data Type 4 and the MPEG data as * the User Defined Data Type 7. */ /** User defined 8-bit data type 1 */ IPU_INSYS_MIPI_DATA_TYPE_USER_DEF1 = 0x30, /** User defined 8-bit data type 2 */ IPU_INSYS_MIPI_DATA_TYPE_USER_DEF2 = 0x31, /** User defined 8-bit data type 3 */ IPU_INSYS_MIPI_DATA_TYPE_USER_DEF3 = 0x32, /** User defined 8-bit data type 4 */ IPU_INSYS_MIPI_DATA_TYPE_USER_DEF4 = 0x33, /** User defined 8-bit data type 5 */ IPU_INSYS_MIPI_DATA_TYPE_USER_DEF5 = 0x34, /** User defined 8-bit data type 6 */ IPU_INSYS_MIPI_DATA_TYPE_USER_DEF6 = 0x35, /** User defined 8-bit data type 7 */ IPU_INSYS_MIPI_DATA_TYPE_USER_DEF7 = 0x36, /** User defined 8-bit data type 8 */ IPU_INSYS_MIPI_DATA_TYPE_USER_DEF8 = 0x37, /** Reserved 0x38-0x3F */ IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x38 = 0x38, IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x39 = 0x39, IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x3A = 0x3A, IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x3B = 0x3B, IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x3C = 0x3C, IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x3D = 0x3D, IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x3E = 0x3E, IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x3F = 0x3F, /** Keep always last and max value */ N_IPU_INSYS_MIPI_DATA_TYPE = 0x40 }; /** * Describes if long MIPI packets have * DT with some other DT format. */ enum ipu7_insys_mipi_dt_rename_mode { IPU_INSYS_MIPI_DT_NO_RENAME = 0, IPU_INSYS_MIPI_DT_RENAMED_MODE = 1, N_IPU_INSYS_MIPI_DT_MODE }; /** * @brief IS Send Message message enable or disable values * @{ */ #define IPU_INSYS_SEND_MSG_ENABLED 1U #define IPU_INSYS_SEND_MSG_DISABLED 0U /** @} */ /** * @brief Per stream configuration of sync messages * * Sync messages are configured per stream, send response and response with trigger. * These messages written to a map of one byte, raise bit to enable. * Each frame in this stream will have the very same configuration, * as it was configured in a stream. * * When enabling IRQ sync message one must enable send response as well. * Meaning there is no case for send interrupt without sending response message. * * It is possible to set bit to send response only, sending without interrupts. * @{ */ #define IPU_INSYS_STREAM_SYNC_MSG_SEND_RESP_SOF (1U << 0U) #define IPU_INSYS_STREAM_SYNC_MSG_SEND_RESP_EOF (1U << 1U) #define IPU_INSYS_STREAM_SYNC_MSG_SEND_IRQ_SOF (1U << 2U) #define IPU_INSYS_STREAM_SYNC_MSG_SEND_IRQ_EOF (1U << 3U) #define IPU_INSYS_STREAM_SYNC_MSG_SEND_RESP_SOF_DISCARDED (1U << 4U) #define IPU_INSYS_STREAM_SYNC_MSG_SEND_RESP_EOF_DISCARDED (1U << 5U) #define IPU_INSYS_STREAM_SYNC_MSG_SEND_IRQ_SOF_DISCARDED (1U << 6U) #define IPU_INSYS_STREAM_SYNC_MSG_SEND_IRQ_EOF_DISCARDED (1U << 7U) /** @} */ /** Enable all Stream Sync Message Send Response */ #define IPU_INSYS_STREAM_SYNC_MSG_ENABLE_MSG_SEND_RESP ( \ IPU_INSYS_STREAM_SYNC_MSG_SEND_RESP_SOF | \ IPU_INSYS_STREAM_SYNC_MSG_SEND_RESP_EOF | \ IPU_INSYS_STREAM_SYNC_MSG_SEND_RESP_SOF_DISCARDED | \ IPU_INSYS_STREAM_SYNC_MSG_SEND_RESP_EOF_DISCARDED) /** Enable all Stream Sync Message Send IRQ */ #define IPU_INSYS_STREAM_SYNC_MSG_ENABLE_MSG_SEND_IRQ ( \ IPU_INSYS_STREAM_SYNC_MSG_SEND_IRQ_SOF | \ IPU_INSYS_STREAM_SYNC_MSG_SEND_IRQ_EOF | \ IPU_INSYS_STREAM_SYNC_MSG_SEND_IRQ_SOF_DISCARDED | \ IPU_INSYS_STREAM_SYNC_MSG_SEND_IRQ_EOF_DISCARDED) /** * @brief Per stream configuration of response messages and interrupt for these messages * * Each message that FW responds with can be configured to either send this message or not. * Each message can be sent as response but interrupt for this message can be configurable. * It is not possible to enable interrupt for a message but disable message itself. * * Send response meaning sending a message. * Send interrupt meaning sending an interrupt request right after a message was sent. * * @{ */ #define IPU_INSYS_STREAM_MSG_SEND_RESP_STREAM_OPEN_DONE (1U << 0U) #define IPU_INSYS_STREAM_MSG_SEND_IRQ_STREAM_OPEN_DONE (1U << 1U) #define IPU_INSYS_STREAM_MSG_SEND_RESP_STREAM_START_ACK (1U << 2U) #define IPU_INSYS_STREAM_MSG_SEND_IRQ_STREAM_START_ACK (1U << 3U) #define IPU_INSYS_STREAM_MSG_SEND_RESP_STREAM_CLOSE_ACK (1U << 4U) #define IPU_INSYS_STREAM_MSG_SEND_IRQ_STREAM_CLOSE_ACK (1U << 5U) #define IPU_INSYS_STREAM_MSG_SEND_RESP_STREAM_FLUSH_ACK (1U << 6U) #define IPU_INSYS_STREAM_MSG_SEND_IRQ_STREAM_FLUSH_ACK (1U << 7U) /* * Note: there is uint32_t cast for (1U) as WA for the KW checker * MISRA.SHIFT.RANGE.201 which interprets 1U's type as unsigned char rather * than unsigned int. */ #define IPU_INSYS_STREAM_MSG_SEND_RESP_STREAM_ABORT_ACK ((uint32_t)(1U) << 8U) #define IPU_INSYS_STREAM_MSG_SEND_IRQ_STREAM_ABORT_ACK ((uint32_t)(1U) << 9U) /** @} */ /** Enable all Stream Message Send Response */ #define IPU_INSYS_STREAM_ENABLE_MSG_SEND_RESP ( \ IPU_INSYS_STREAM_MSG_SEND_RESP_STREAM_OPEN_DONE | \ IPU_INSYS_STREAM_MSG_SEND_RESP_STREAM_START_ACK | \ IPU_INSYS_STREAM_MSG_SEND_RESP_STREAM_CLOSE_ACK | \ IPU_INSYS_STREAM_MSG_SEND_RESP_STREAM_FLUSH_ACK | \ IPU_INSYS_STREAM_MSG_SEND_RESP_STREAM_ABORT_ACK) /** Enable all Stream Message Send IRQ */ #define IPU_INSYS_STREAM_ENABLE_MSG_SEND_IRQ ( \ IPU_INSYS_STREAM_MSG_SEND_IRQ_STREAM_OPEN_DONE | \ IPU_INSYS_STREAM_MSG_SEND_IRQ_STREAM_START_ACK | \ IPU_INSYS_STREAM_MSG_SEND_IRQ_STREAM_CLOSE_ACK | \ IPU_INSYS_STREAM_MSG_SEND_IRQ_STREAM_FLUSH_ACK | \ IPU_INSYS_STREAM_MSG_SEND_IRQ_STREAM_ABORT_ACK) /** * Per frame configuration of response or interrupt for these messages * Each message that FW responds with can be configured to either send IRQ for this message or not. * * It is not possible to enable interrupt for a message but disable message itself. * @{ */ #define IPU_INSYS_FRAME_MSG_SEND_RESP_CAPTURE_ACK (1U << 0U) #define IPU_INSYS_FRAME_MSG_SEND_IRQ_CAPTURE_ACK (1U << 1U) #define IPU_INSYS_FRAME_MSG_SEND_RESP_CAPTURE_DONE (1U << 2U) #define IPU_INSYS_FRAME_MSG_SEND_IRQ_CAPTURE_DONE (1U << 3U) #define IPU_INSYS_FRAME_MSG_SEND_RESP_PIN_DATA_READY (1U << 4U) #define IPU_INSYS_FRAME_MSG_SEND_IRQ_PIN_DATA_READY (1U << 5U) /** @} */ /** Enable all Frame Message Send response message */ #define IPU_INSYS_FRAME_ENABLE_MSG_SEND_RESP ( \ IPU_INSYS_FRAME_MSG_SEND_RESP_CAPTURE_ACK | \ IPU_INSYS_FRAME_MSG_SEND_RESP_CAPTURE_DONE | \ IPU_INSYS_FRAME_MSG_SEND_RESP_PIN_DATA_READY) /** Enable all Frame Message Send IRQ */ #define IPU_INSYS_FRAME_ENABLE_MSG_SEND_IRQ ( \ IPU_INSYS_FRAME_MSG_SEND_IRQ_CAPTURE_ACK | \ IPU_INSYS_FRAME_MSG_SEND_IRQ_CAPTURE_DONE | \ IPU_INSYS_FRAME_MSG_SEND_IRQ_PIN_DATA_READY) /** Describe the destination options for output pins */ enum ipu7_insys_output_link_dest { /** * Destination is memory *for CPU SW processing* * * @note On platforms where bus snooping is supported but optional, snooping will be enabled for this * destination. */ IPU_INSYS_OUTPUT_LINK_DEST_MEM = 0, /** Destination is IPU PSYS for processing */ IPU_INSYS_OUTPUT_LINK_DEST_PSYS = 1, /** Destination is any other non-CPU or non-IPU entity such as GPU */ IPU_INSYS_OUTPUT_LINK_DEST_IPU_EXTERNAL = 2 }; /** * Describes type of decompression */ enum ipu7_insys_dpcm_type { IPU_INSYS_DPCM_TYPE_DISABLED = 0, IPU_INSYS_DPCM_TYPE_10_8_10 = 1, IPU_INSYS_DPCM_TYPE_12_8_12 = 2, IPU_INSYS_DPCM_TYPE_12_10_12 = 3, N_IPU_INSYS_DPCM_TYPE }; /** * Describes type of predictor */ enum ipu7_insys_dpcm_predictor { IPU_INSYS_DPCM_PREDICTOR_1 = 0, IPU_INSYS_DPCM_PREDICTOR_2 = 1, N_IPU_INSYS_DPCM_PREDICTOR }; /** Send queue token flag */ enum ipu7_insys_send_queue_token_flag { /** Default value for send queue token flag */ IPU_INSYS_SEND_QUEUE_TOKEN_FLAG_NONE = 0, /** Intructs Flush forced command, used for HKR1 INSYS streaming to PSYS in BCSM mode */ IPU_INSYS_SEND_QUEUE_TOKEN_FLAG_FLUSH_FORCE = 1 }; #pragma pack(push, 1) /** * @brief Generic resolution structure */ struct ipu7_insys_resolution { /** Width */ uint32_t width; /** Height */ uint32_t height; }; /** * @brief Output pin payload */ struct ipu7_insys_capture_output_pin_payload { /** Points to output pin buffer - buffer identifier */ uint64_t user_token; /** Points to output pin buffer - CSS Virtual Address */ ia_gofo_addr_t addr; /** Padding */ uint8_t pad[4]; }; /** * @brief define to where the output pin connected */ struct ipu7_insys_output_link { /** * Number of Allocated lines in output buffer * * Only used in BCSM, set to 0 if no use. * this is the number of lines available for cyclical use and MUST be smaller than * the output image height. */ uint32_t buffer_lines; /** Globally unique identifier for links that connect to a node outside of the local graph */ uint16_t foreign_key; /** * number of lines to be written before sending a pointer update * - must be multiple of height */ uint16_t granularity_pointer_update; /** Data movement mechanisms between producer and consumer, see ia_gofo_msg_link_streaming_mode */ uint8_t msg_link_streaming_mode; /** * PBK instance for cross IPs connection. * When use ISYS internal PBK use IPU_MSG_LINK_PBK_ID_DONT_CARE. * Links with SOFF streaming mode must use IPU_MSG_LINK_PBK_ID_DONT_CARE. * * @see enum ia_gofo_soc_pbk_instance_id */ uint8_t pbk_id; /** * Relevant entry id in the PBK that manages producer-consumer handshake. * When use ISYS internal PBK use IPU_MSG_LINK_PBK_ID_DONT_CARE. * Links with SOFF streaming mode must use IPU_MSG_LINK_PBK_SLOT_ID_DONT_CARE. */ uint8_t pbk_slot_id; /** to which ip the link connected (ddr/psys/...) , see ipu7_insys_output_link_dest */ uint8_t dest; /** * is the pbk managed by hw or sw * * - HW managed - * HW will confirm that the link buffer has been fully consumed by the consumer before allowed the * producer to transmit the next frame. * * -SW managed- * the SW verify the shared buffer is not been used by anyone else, including consumers of the * previous frames. * * Usage: HW_MANAGED - 0, SW_MANAGED = 1 * BCSM single or private buffer: HW_MANAGED * BCSM double buffer: SW_MANAGED * Other: SW_MANAGED */ uint8_t use_sw_managed; uint8_t pad[3]; }; /** * @brief define the output cropping * if both line top and line bot are 0, cropping is disabled. */ struct ipu7_insys_output_cropping { /** When cropping is enabled, all lines before this line will be cropped. */ uint16_t line_top; /** When cropping is enabled, all lines after this line will be cropped. */ uint16_t line_bottom; }; /** * @brief Output decompression */ struct ipu7_insys_output_dpcm { /** 0 - dpcm disabled, 1 enabled */ uint8_t enable; /** type of compression, see ipu7_insys_dpcm_type */ uint8_t type; /** predictor of compression, see ipu7_insys_dpcm_predictor */ uint8_t predictor; /** not used */ uint8_t pad; }; /** * @brief Output Pin information and details */ struct ipu7_insys_output_pin { /** output pin link */ struct ipu7_insys_output_link link; /** output pin crop */ struct ipu7_insys_output_cropping crop; /** output decompression */ struct ipu7_insys_output_dpcm dpcm; /** output stride in Bytes (not valid for statistics) */ uint32_t stride; /** frame format type, see ipu7_insys_frame_format_type */ uint16_t ft; /** assert if pin event should trigger irq */ uint8_t send_irq; /** input pin id/index which is source of the data for this output pin */ uint8_t input_pin_id; /** * Indicating an early acknowledge. * 1 - Will configure an early ack * 0 - will use default behavior of end of frame ack. */ uint8_t early_ack_en; /** Padding */ uint8_t pad[3]; }; /** * @brief Stream-level configuration of a single output pin affecting all frames in the stream */ struct ipu7_insys_input_pin { /** input resolution */ struct ipu7_insys_resolution input_res; /** * Sync Event messages configuration map * * Consists of different event message types. * Each type can be either configured to send event message response, send an interrupt with event message. * When only the interrupt option is configured by setting the relevant bit, * the event message will be sent together with an interrupt. */ uint16_t sync_msg_map; /** mipi data type, see ipu7_insys_mipi_data_type */ uint8_t dt; /** * Disable unpacking of the MIPI packet * store the incoming stream as is to DDR without the header * (the header will be removed) */ uint8_t disable_mipi_unpacking; /** Defines whether MIPI data is encapsulated in some other data type, see ipu7_insys_mipi_dt_rename_mode */ uint8_t dt_rename_mode; /** mapped_dt */ uint8_t mapped_dt; uint8_t pad[2]; }; /** * @brief ISYS stream configuration top level data structure */ struct ipu7_insys_stream_cfg { /** Input pin descriptors */ struct ipu7_insys_input_pin input_pins[MAX_IPINS]; /** Output pin descriptors */ struct ipu7_insys_output_pin output_pins[MAX_OPINS]; /** * Map describing Stream enabled messages Response Send and Interrupt */ uint16_t stream_msg_map; /** Specifies port_id see enum ipu7_insys_mipi_port */ uint8_t port_id; /** MIPI Virtual Channel (up to 16 virtual per physical channel), see ipu7_insys_mipi_vc */ uint8_t vc; /** Number of input pins */ uint8_t nof_input_pins; /** Number of output pins */ uint8_t nof_output_pins; uint8_t pad[2]; }; /** * @brief Frame buffer set */ struct ipu7_insys_buffset { /** Output pin addresses */ struct ipu7_insys_capture_output_pin_payload output_pins[MAX_OPINS]; /** Capture message map, see IPU_INSYS_FRAME_MSG */ uint8_t capture_msg_map; /** Frame number associated with this buffer set */ uint8_t frame_id; /* * Intentional frame drop boolean flag. * 0 for no drop, else drop frame. */ uint8_t skip_frame; /** Padding */ uint8_t pad[5]; }; /** * @brief Response information */ struct ipu7_insys_resp { /** */ uint64_t buf_id; /** This var is only valid for pin event related responses, contains pin addresses */ struct ipu7_insys_capture_output_pin_payload pin; /** * Generic error structure. err_header.error_code == 0 means ACK, * all others are an error */ struct ia_gofo_msg_err error_info; /** Time information for an event when available */ uint32_t timestamp[2]; /** Response type, see ipu7_insys_resp_type */ uint8_t type; /** Data movement mechanisms between producer and consumer, see ia_gofo_msg_link_streaming_mode */ uint8_t msg_link_streaming_mode; /** Stream ID this response corresponds to */ uint8_t stream_id; /** Pin ID that the pin payload corresponds to */ uint8_t pin_id; /** Valid for STREAM_START_AND_CAPTURE_DONE, STREAM_CAPTURE_DONE and STREAM_CAPTURE_DISCARDED */ uint8_t frame_id; /** Confirmation of intentional frame drop. See ipu7_insys_buffset::skip_frame */ uint8_t skip_frame; /** Padding */ uint8_t pad[2]; }; /** * @brief Response queue token */ struct ipu7_insys_resp_queue_token { /** Response information */ struct ipu7_insys_resp resp_info; }; /** * @brief Send queue token */ struct ipu7_insys_send_queue_token { /** Buffer handle */ uint64_t buf_handle; /** addr */ ia_gofo_addr_t addr; /** Stream ID */ uint16_t stream_id; /** Send type, see ipu7_insys_send_type */ uint8_t send_type; /** Flags provided with the message, see ipu7_insys_send_queue_token_flag */ uint8_t flag; }; #pragma pack(pop) /** @} */ /** Compile time checks only - this function is not to be called! */ static inline void ipu7_insys_types_test_func(void) { CHECK_ALIGN32(struct ipu7_insys_resolution); CHECK_ALIGN64(struct ipu7_insys_capture_output_pin_payload); CHECK_ALIGN32(struct ipu7_insys_output_pin); CHECK_ALIGN32(struct ipu7_insys_input_pin); CHECK_ALIGN32(struct ipu7_insys_output_cropping); CHECK_ALIGN32(struct ipu7_insys_stream_cfg); CHECK_ALIGN64(struct ipu7_insys_buffset); CHECK_ALIGN64(struct ipu7_insys_resp); CHECK_ALIGN64(struct ipu7_insys_resp_queue_token); CHECK_ALIGN64(struct ipu7_insys_send_queue_token); CHECK_ALIGN32(struct ipu7_insys_output_link); } /* Note that the below enums representing ipu message error are located here temporarily */ /** * Error detail enumeration for error group INSYS_MSG_ERR_GROUP_STREAM used exclusively * in the stream ack messages */ enum insys_msg_err_stream { /** No error */ INSYS_MSG_ERR_STREAM_OK = IA_GOFO_MSG_ERR_OK, /** * Invalid stream ID. * err_detail[0] is the offending stream_id * err_detail[1] is the max stream_id */ INSYS_MSG_ERR_STREAM_STREAM_ID = 1, /** Stream Output Pins count violation, err_detail[0] is the offending number of output pins */ INSYS_MSG_ERR_STREAM_MAX_OPINS = 2, /** Stream Input Pins count violation, err_detail[0] is the offending number of input pins */ INSYS_MSG_ERR_STREAM_MAX_IPINS = 3, /** * Stream message map violation, err_detail[0] is the offending messages map * Enabling IRQ for a message but disabling response for the same message is not allowed */ INSYS_MSG_ERR_STREAM_STREAM_MESSAGES_MAP = 4, /** * Sync messages map violation, err_detail[0] is the offending messages map * Enabling IRQ for a message but disabling response for the same message is not allowed * err_detail[0] is sync message map * err_detail[1] is pin_id */ INSYS_MSG_ERR_STREAM_SYNC_MESSAGES_MAP = 5, /** * Output pin's sensor type is invalid. * err_detail[0] is pin_id * err_detail[1] is sensor type */ INSYS_MSG_ERR_STREAM_SENSOR_TYPE = 6, /** * foreign key is invalid. * err_detail[0] is the output pin id * err_detail[1] is the offending foreign_key */ INSYS_MSG_ERR_STREAM_FOREIGN_KEY = 7, /** * streaming mode is invalid. * err_detail[0] is the output pin id * err_detail[1] is the streaming mode */ INSYS_MSG_ERR_STREAM_STREAMING_MODE = 8, /** * DPCM enable value is invalid. * err_detail[0] is the output pin id * err_detail[1] is the DPCM enable value */ INSYS_MSG_ERR_STREAM_DPCM_EN = 9, /** * DPCM type value is invalid. * err_detail[0] is the output pin id * err_detail[1] is the DPCM type value */ INSYS_MSG_ERR_STREAM_DPCM_TYPE = 10, /** * DPCM predictor value is invalid. * err_detail[0] is the output pin id * err_detail[1] is the DPCM predictor value */ INSYS_MSG_ERR_STREAM_DPCM_PREDICTOR = 11, /** * Granularity pointer update is invalid. * err_detail[0] is the output pin id * err_detail[1] is the granularity pointer update */ INSYS_MSG_ERR_STREAM_GRANULARITY_POINTER_UPDATE = 12, /** MPF Entry LUT entry resources are all busy, err_detail[0] is MPF device id */ INSYS_MSG_ERR_STREAM_MPF_LUT_ENTRY_RESOURCES_BUSY = 13, /** MPF Device ID being used is invalid, err_detail[0] is the offending mpf device id */ INSYS_MSG_ERR_STREAM_MPF_DEV_ID = 14, /** * Buffer lines is invalid * err_detail[0] is the output pin id * err_detail[1] is the offending buffer lines value */ INSYS_MSG_ERR_STREAM_BUFFER_LINES = 15, /** Failure matching an output pin to input pin, err_detail[0] is the offending input pin id */ INSYS_MSG_ERR_STREAM_IPIN_ID = 16, /** * Data type is invalid * err_detail[0] is the output pin id * err_detail[1] is the offending data type */ INSYS_MSG_ERR_STREAM_DATA_TYPE = 17, /** * Invalid stream streaming protocol state, can be internal fw error related to streaming protocol * err_detail[0] is the foreign key * err_detail[1] is the streaming mode */ INSYS_MSG_ERR_STREAM_STREAMING_PROTOCOL_STATE = 18, /** * Failure flushing Syscom IN queue * err_detail[0] is the stream id * err_detail[1] is the queue id */ INSYS_MSG_ERR_STREAM_SYSCOM_FLUSH = 19, /** Stream MIPI VC violation, err_detail[0] is the offending value for MIPI VC */ INSYS_MSG_ERR_STREAM_MIPI_VC = 20, /** Stream source violation, err_detail[0] is the offending value for src-stream source */ INSYS_MSG_ERR_STREAM_STREAM_SRC = 21, /** * pbk_id is invalid * err_detail[0] is the output pin id * err_detail[1] is the violating pbk_id */ INSYS_MSG_ERR_STREAM_PBK_ID = 22, /** Command queue deallocation failure, err_detail[0] is the stream id */ INSYS_MSG_ERR_STREAM_CMD_QUEUE_DEALLOCATE = 23, /** * There are no free resources to acquire from. * err_detail[0] is the stream id */ INSYS_MSG_ERR_STREAM_INSUFFICIENT_RESOURCES = 24, /** Failure acquiring resources based on provided input pin configuration */ INSYS_MSG_ERR_STREAM_IPIN_CONFIGURATION = 25, /** * The state of the stream is not valid * err_detail[0] is the stream id * err_detail[1] is the state */ INSYS_MSG_ERR_STREAM_INVALID_STATE = 26, /** * sw managed is invalid * err_detail[0] is the output pin id * err_detail[1] is the offending SW managed value */ INSYS_MSG_ERR_STREAM_SW_MANAGED = 27, /** * pbk_slot_id is invalid * err_detail[0] is the output pin id * err_detail[1] is the violating pbk_slot_id */ INSYS_MSG_ERR_STREAM_PBK_SLOT_ID = 28, /** * hw flush command is timedout * err_detail[0] is the stream id */ INSYS_MSG_ERR_STREAM_FLUSH_TIMEOUT = 29, /** * inpur_pin width is invalid * err_detail[0] is the violating width * err_detail[1] is the input pin id */ INSYS_MSG_ERR_STREAM_IPIN_WIDTH = 30, /** * inpur_pin height is invalid * err_detail[0] is the violating height * err_detail[1] is the input pin id */ INSYS_MSG_ERR_STREAM_IPIN_HEIGHT = 31, /** * Output pin early ack indication configuration error * Configuring all output pins to an early ack results in violation. * Configuring an early ack for a stream with single output pin results in violation. * err_detail[0] is the output pin number * err_detail[1] is the violating early ack configuration */ INSYS_MSG_ERR_STREAM_OUTPUT_PIN_EARLY_ACK_EN = 32, /** * Inconsistency detected. ((width * bits_per_pixel_by_packing) / 8) must be <= stride * err_detail[0] is ((width * bits_per_pixel_by_packing) / 8) * err_detail[1] is the stride */ INSYS_MSG_ERR_STREAM_INCONSISTENT_PARAMS = 33, /** * Unsupported plane count. Currently, only single plane is supported * err_detail[0] is the invalid plane count */ INSYS_MSG_ERR_STREAM_PLANE_COUNT = 34, /** * Frame format type is invalid * err_detail[0] is the output pin id * err_detail[1] is the offending frame format type */ INSYS_MSG_ERR_STREAM_FRAME_FORMAT_TYPE = 35, /** number of ipu message errors for stream type */ INSYS_MSG_ERR_STREAM_N }; /** * Error detail enumeration for error group INSYS_MSG_ERR_GROUP_CAPTURE used exclusively * in the capture ack messages */ enum insys_msg_err_capture { /** No error */ INSYS_MSG_ERR_CAPTURE_OK = IA_GOFO_MSG_ERR_OK, /** * Invalid stream ID. * err_detail[0] is the offending stream_id * err_detail[1] is the max stream_id */ INSYS_MSG_ERR_CAPTURE_STREAM_ID = 1, /** * Invalid frame payload ptr. * Received NULL where a non-NULL value is required or other illegal value. * err_detail[0] is the offending offending pointer value * err_detail[1] is the stream id */ INSYS_MSG_ERR_CAPTURE_PAYLOAD_PTR = 2, /** No memory slot left for another capture command, err_detail[0] is the stream id */ INSYS_MSG_ERR_CAPTURE_MEM_SLOT = 3, /** * Invalid streaming mode * err_detail[0] is the offending streaming mode * err_detail[1] is the output pin */ INSYS_MSG_ERR_CAPTURE_STREAMING_MODE = 4, /** * There was no free available slot for capture command. * err_detail[0] is the stream id */ INSYS_MSG_ERR_CAPTURE_AVAILABLE_CMD_SLOT = 5, /** * An error consuming command * err_detail[0] is the stream id * err_detail[1] is the output pin id */ INSYS_MSG_ERR_CAPTURE_CONSUMED_CMD_SLOT = 6, /** * Command slot payload pointer is null * err_detail[0] is the stream id */ INSYS_MSG_ERR_CAPTURE_CMD_SLOT_PAYLOAD_PTR = 7, /** * Error occurs during preparation of command before submitting to hw * err_detail[0] is the stream id */ INSYS_MSG_ERR_CAPTURE_CMD_PREPARE = 8, /** * Invalid output pin id * err_detail[0] is the offending output pin id * err_detail[1] is the stream id */ INSYS_MSG_ERR_CAPTURE_OUTPUT_PIN = 9, /** * Received a small packet, without command, dropping the frame. * err_detail[0] is the stream id * err_detail[1] is the frame id to be dropped */ INSYS_MSG_ERR_CAPTURE_SYNC_FRAME_DROP = 10, /** * Frame messages map violation, err_detail[0] is the offending messages map. * Enabling IRQ for a message while disabling response for the same message is not allowed. */ INSYS_MSG_ERR_CAPTURE_FRAME_MESSAGES_MAP = 11, /** * Timeout on this capture request * err_detail[0] is the stream id * err_detail[1] is the output pin */ INSYS_MSG_ERR_CAPTURE_TIMEOUT = 12, /** * The state of the stream is not valid * err_detail[0] is the stream id * err_detail[1] is the state */ INSYS_MSG_ERR_CAPTURE_INVALID_STREAM_STATE = 13, /** * Multiple packet header errors * err_detail[0] is output pin */ INSYS_MSG_ERR_CAPTURE_HW_ERR_MULTIBIT_PH_ERROR_DETECTED = 14, /** * Payload checksum (CRC) error * err_detail[0] is output pin */ INSYS_MSG_ERR_CAPTURE_HW_ERR_PAYLOAD_CRC_ERROR = 15, /** * Input data loss happening before CSI-FE due to elastic FIFO overflow in CSI2PPI * err_detail[0] is output pin */ INSYS_MSG_ERR_CAPTURE_HW_ERR_INPUT_DATA_LOSS_ELASTIC_FIFO_OVFL = 16, /** * Pixel Buffer overflowing and GDA dropping writes. * Resulting in losing frame data due to system backpressure or programming errors. * err_detail[0] is output pin */ INSYS_MSG_ERR_CAPTURE_HW_ERR_PIXEL_BUFFER_OVERFLOW = 17, /** * The incoming frame dimensions, incoming from the sensor, do not match the expected frame dimension values. * err_detail[0] is output pin */ INSYS_MSG_ERR_CAPTURE_HW_ERR_BAD_FRAME_DIM = 18, /** * Missing short synchronization packets (FS/FE). FS after FS/FE after FE. * err_detail[0] is output pin */ INSYS_MSG_ERR_CAPTURE_HW_ERR_PHY_SYNC_ERR = 19, /** * Frame was partially masked with secure touch enabled. * err_detail[0] is output pin */ INSYS_MSG_ERR_CAPTURE_HW_ERR_SECURE_TOUCH = 20, /** * A master slave IID capture error occurred. * Happens when a slave IID doesn’t receive FS during capture window. * err_detail[0] is output pin */ INSYS_MSG_ERR_CAPTURE_HW_ERR_MASTER_SLAVE_SYNC_ERR = 21, /** * Frame skip requested but at least one of the output pin addresses is not null * err_detail[0] is the offending output address * err_detail[1] is the problem output pin */ INSYS_MSG_ERR_CAPTURE_FRAME_SKIP_ERR = 22, /** * FE input FIFO overflow * HW Can't keep up with the incoming rate * err_detail[0] is output pin */ INSYS_MSG_ERR_CAPTURE_FE_INPUT_FIFO_OVERFLOW_ERR = 23, /** * FW failed submitting the buffer-set to HW * err_detail[0] is the stream id * err_detail[1] is output pin */ INSYS_MSG_ERR_CAPTURE_CMD_SUBMIT_TO_HW = 24, /** number of ipu message errors for capture type */ INSYS_MSG_ERR_CAPTURE_N }; /** * Error groups define broad categories of errors in ack messages. * This enumeration defines the error groups for INSYS. */ enum insys_msg_err_groups { /** Reserved value. Used only with IA_GOFO_MSG_ERR_OK & IA_GOFO_MSG_ERR_UNSPECIFED */ INSYS_MSG_ERR_GROUP_RESERVED = IA_GOFO_MSG_ERR_GROUP_RESERVED, /** * Generic message general errors that aren't specific to another group. See ia_gofo_msg_err_general for * individual errors. */ INSYS_MSG_ERR_GROUP_GENERAL = IA_GOFO_MSG_ERR_GROUP_GENERAL, /** Stream open/start/flush/abort/stop messages errors. See insys_msg_err_stream for individual errors. */ INSYS_MSG_ERR_GROUP_STREAM = 2, /** Capture messages errors. See insys_msg_err_capture for individual errors. */ INSYS_MSG_ERR_GROUP_CAPTURE = 3, /** * Number of items in this enumeration. */ INSYS_MSG_ERR_GROUP_N, }; #endifipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/abi/ipu7_fw_msg_abi.h000066400000000000000000001455161465530421300302350ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2020 - 2024 Intel Corporation */ #ifndef IPU7_FW_MSG_ABI_H #define IPU7_FW_MSG_ABI_H #include "ipu7_fw_common_abi.h" #pragma pack(push, 1) /** Message type enumeration for the message headers */ enum ipu7_msg_type { /** Type 0 is padding, which is irrelevant here. */ IPU_MSG_TYPE_RESERVED = IA_GOFO_MSG_TYPE_RESERVED, /** See ia_gofo_msg_indirect */ IPU_MSG_TYPE_INDIRECT = IA_GOFO_MSG_TYPE_INDIRECT, /** See ia_gofo_msg_log */ IPU_MSG_TYPE_DEV_LOG = IA_GOFO_MSG_TYPE_LOG, /** See ia_gofo_msg_general_err */ IPU_MSG_TYPE_GENERAL_ERR = IA_GOFO_MSG_TYPE_GENERAL_ERR, IPU_MSG_TYPE_DEV_OPEN = 4, IPU_MSG_TYPE_DEV_OPEN_ACK = 5, IPU_MSG_TYPE_GRAPH_OPEN = 6, IPU_MSG_TYPE_GRAPH_OPEN_ACK = 7, IPU_MSG_TYPE_TASK_REQ = 8, IPU_MSG_TYPE_TASK_DONE = 9, IPU_MSG_TYPE_GRAPH_CLOSE = 10, IPU_MSG_TYPE_GRAPH_CLOSE_ACK = 11, IPU_MSG_TYPE_DEV_CLOSE = 12, IPU_MSG_TYPE_DEV_CLOSE_ACK = 13, IPU_MSG_TYPE_TERM_EVENT = 14, IPU_MSG_TYPE_N, }; #pragma pack(pop) #pragma pack(push, 1) /** Maximum terminals per node */ #define IPU_MSG_MAX_NODE_TERMS (64U) /** Maximum image fragments */ #define IPU_MSG_MAX_FRAGS (7U) /** Enumeration of different kinds of nodes. */ enum ipu7_msg_node_type { /** Type zero is always padding */ IPU_MSG_NODE_TYPE_PAD = 0, /** Basic generic node. May be extended with options. */ IPU_MSG_NODE_TYPE_BASE, IPU_MSG_NODE_TYPE_N }; /** * Maximum number of devices possible in a CB * @todo This number copied from CB HAS "MAX_NUM_OF_DEVICE". Should get from geneated headers. * @todo The actual number can be much smaller, so INTERNAL data structures might benefit from * lower memory use. Consider differentiating ABI definition from internal usage. */ #define IPU_MSG_NODE_MAX_DEVICES (128U) /** Number of 32 bit integers in a DEB */ #define DEB_NUM_UINT32 (IPU_MSG_NODE_MAX_DEVICES/(sizeof(uint32_t) * 8U)) /** Type for Terminal Enable Bitmaps (TEB) */ typedef uint32_t ipu7_msg_teb_t[2]; /** Type for Device Enable Bitmap (DEB) bitmaps */ typedef uint32_t ipu7_msg_deb_t[DEB_NUM_UINT32]; /** * Maximum number of routing bits within a CB * @todo This number copied from CB HAS "MAX_RBM_SIZE". Should get from geneated headers. */ #define IPU_MSG_NODE_MAX_ROUTE_ENABLES (128U) /** Number of 32 bit integers in a REB */ #define RBM_NUM_UINT32 (IPU_MSG_NODE_MAX_ROUTE_ENABLES/(sizeof(uint32_t) * 8U)) /** Type for Routing BitMap (RBM) */ typedef uint32_t ipu7_msg_rbm_t[RBM_NUM_UINT32]; /** Type for Route Enable Bitmap (REB). Tied to RBM definition. */ typedef ipu7_msg_rbm_t ipu7_msg_reb_t; /** TLV types for node profiles. All types must extend IPU_MSG_NODE_PROFILE_TYPE_BASE */ enum ipu7_msg_node_profile_type { /** Type zero is always padding */ IPU_MSG_NODE_PROFILE_TYPE_PAD = 0, /** Profile for a basic generic node - see ipu7_msg_node_profile */ IPU_MSG_NODE_PROFILE_TYPE_BASE, /** Profile for a CB node.- see ipu7_msg_cb_profile */ IPU_MSG_NODE_PROFILE_TYPE_CB, IPU_MSG_NODE_PROFILE_TYPE_N }; /** * Node profile for generic nodes. Represents one of possibly several ways to configure the * terminals of a node within the same node context. */ struct ipu7_msg_node_profile { /** Type here is one of ipu7_msg_node_profile_type */ struct ia_gofo_tlv_header tlv_header; /** Terminal Enable Bitmap - must be a subset of that declared during graph open. */ ipu7_msg_teb_t teb; }; /** Node profile for CB nodes. Includes additional bitmaps that CB nodes require */ struct ipu7_msg_cb_profile { /** Generic node profile. CB extensions follow. */ struct ipu7_msg_node_profile profile_base; /** Internal CB Device Enable Bitmask */ ipu7_msg_deb_t deb; /** Internal CB Routing BitMask */ ipu7_msg_rbm_t rbm; /** Internal CB Routing element Enable Bitmask */ ipu7_msg_reb_t reb; }; /** Maximum number of profiles allowed in ipu7_msg_node::profile_list */ #define IPU_MSG_NODE_MAX_PROFILES (2U) /** Default profile index. Must always exist as there must always be at least one profile. */ #define IPU_MSG_NODE_DEF_PROFILE_IDX (0U) /** * Special node resource ID to identify a generic external node. Required when there is a link * to/from IPU and that node. * * Usage of this ID assumes that all external IP's have the same abstracted streaming link * interface with IPU, such that IPU need not know or care who they really are. Thus we need not * define specific ID's for each external node and maintain it separately per project. * * If an external node has a interface requiring specific behavior from IPU, then a specific * resource ID must be defined higher in the stack, just like IPU nodes. * * Generic EXT_IP node instances (see ipu7_msg_node) should use a single node profile (see * ipu7_msg_node::profiles_list) with TLV type IPU_MSG_NODE_PROFILE_TYPE_BASE and structure * ipu7_msg_node_profile. ipu7_msg_node_profile::teb should be set to all-one's, as if all terminals * are enabled. * * Links to/from EXT_IP nodes should specify the node node_ctx_id of the EXT_IP instance as for all * links in their endpoints. The endpoint terminal ID should be set to * IPU_MSG_LINK_TERM_ID_DONT_CARE. */ #define IPU_MSG_NODE_RSRC_ID_EXT_IP (0xFFU) /** * External IP Nodes have all their TEB bits set. * @{ */ #define IPU_MSG_NODE_DONT_CARE_TEB_HI (0xFFFFFFFFU) #define IPU_MSG_NODE_DONT_CARE_TEB_LO (0xFFFFFFFFU) /** @} */ /** * Node resource ID of INSYS, required when there is a link from INSYS to PSYS. */ #define IPU_MSG_NODE_RSRC_ID_IS (0xFEU) /** Base structure for all node objects */ struct ipu7_msg_node { /** Type here is one of ipu7_msg_node_type */ struct ia_gofo_tlv_header tlv_header; /** * Identifier of node resource ID in the manifest graph model. See also * IPU_MSG_NODE_RSRC_ID_EXT_IP, IPU_MSG_NODE_RSRC_ID_IS */ uint8_t node_rsrc_id; /** * Zero-based index of this node in the fgraph. Must be numbered in the * same order as the node appears in the fgraph's node-list. */ uint8_t node_ctx_id; /** Number of fragments for all future frames in this node context */ uint8_t num_frags; /** Alignment. Set to zero. */ uint8_t reserved[1]; /** * List of configuration alternatives for this node. At least one is required. * All TLV profile types must derive from ipu7_msg_node_profile. Profile order is * significant as task generation will be defined by the the order of the profiles * in the list. See struct ipu7_msg_node_profile. */ struct ia_gofo_tlv_list profiles_list; /** * List of terminal objects of type @see ipu7_msg_term or derivations. * Only enabled terminals are allowed - and even then they are optional. Generally, * terminals need not be in the list if they have no special options as the superteb * defines which are enabled. */ struct ia_gofo_tlv_list terms_list; /** Extension options per node */ struct ia_gofo_tlv_list node_options; }; /** Enumeration of node option types */ enum ipu7_msg_node_option_types { /** All TLV headers must define value zero as padding */ IPU_MSG_NODE_OPTION_TYPES_PADDING = 0, /** Number of items in this enumeration */ IPU_MSG_NODE_OPTION_TYPES_N }; #pragma pack(pop) /** @} */ /** Compile time checks only - this function is not to be called! */ static inline void ipu7_msg_node_test_func(void) { CHECK_ALIGN32(struct ipu7_msg_node); CHECK_ALIGN32(struct ipu7_msg_node_profile); CHECK_ALIGN32(struct ipu7_msg_cb_profile); } #pragma pack(push, 1) /** Type of link for PARSING. Currently only support a generic type. */ enum ipu7_msg_link_type { /** Type zero is always padding */ IPU_MSG_LINK_TYPE_PAD = 0, /** Basic generic link */ IPU_MSG_LINK_TYPE_GENERIC = 1, IPU_MSG_LINK_TYPE_N }; /** * Enumeration of known link options * Link extension can be attached to the terminal through these options. */ enum ipu7_msg_link_option_types { /** Type zero is always padding */ IPU_MSG_LINK_OPTION_TYPES_PADDING = 0, /** Compression parameters - see ipu7_msg_link_cmprs_option */ IPU_MSG_LINK_OPTION_TYPES_CMPRS = 1, /** Number of link options */ IPU_MSG_LINK_OPTION_TYPES_N }; /** * Possible values for struct ipu7_msg_link_cmprs_plane_desc.bit_depth. * Added here as an enum for reference only. */ enum ipu7_msg_link_cmprs_option_bit_depth { IPU_MSG_LINK_CMPRS_OPTION_8BPP = 0, IPU_MSG_LINK_CMPRS_OPTION_10BPP = 1, IPU_MSG_LINK_CMPRS_OPTION_12BPP = 2, }; /** * Denominator value for usage of space saving ratio field * * See ipu7_msg_link_cmprs_option::space_saving_ratio_numerator */ #define IPU_MSG_LINK_CMPRS_SPACE_SAVING_DENOM (128U) /** Number of lossy compression algorithm parameters. */ #define IPU_MSG_LINK_CMPRS_LOSSY_CFG_PAYLOAD_SIZE (5U) /** * Maximum configurable space saving ratio numerator for footprint compression * * See ipu7_msg_link_cmprs_option::space_saving_ratio_numerator */ #define IPU_MSG_LINK_CMPRS_SPACE_SAVING_NUM_MAX (IPU_MSG_LINK_CMPRS_SPACE_SAVING_DENOM - 1U) /** * Per-plane information required for compression */ struct ipu7_msg_link_cmprs_plane_desc { /** 1 if plane exists, otherwise 0. If 0, the rest of the structure is to be ignored. */ uint8_t plane_enable; /** * 1 if compression is enabled, else 0. * Note that one plane may be compressed while the other(s), if any, are not. */ uint8_t cmprs_enable; /** Plane id for the encoder, must match the arch spec */ uint8_t encoder_plane_id; /** Plane id for the decoder, must match the arch spec */ uint8_t decoder_plane_id; /** * 1 if lossy compression is enabled (with or without footprint compression), * else 0 for lossless compression. */ uint8_t cmprs_is_lossy; /** * 1 if footprint compression is enabled, else 0. * Enabling footprint compression is valid only when cmprs_is_lossy is enabled. */ uint8_t cmprs_is_footprint; /** * Planar bits per pixel. This field contains a HW register mapped value. * See enum ipu7_msg_link_cmprs_option_bit_depth. */ uint8_t bit_depth; /** * Footprint space savings scaled to IPU_MSG_LINK_CMPRS_SPACE_SAVING_DENOM, * such that: * space_saving_ratio = 1 - * (space_saving_numerator / IPU_MSG_LINK_CMPRS_SPACE_SAVING_DENOM) * * For example, a value of (IPU_MSG_LINK_CMPRS_SPACE_SAVING_DENOM / 4) here results in a * 75% memory footprint reduction. * * Largest value is IPU_MSG_LINK_CMPRS_SPACE_SAVING_NUM_MAX * * Not used for lossless compression mode. * * @note Corresponds to HW register FP_CRATIO, but the term "compression ratio" there seems * to be a misnomer, with an inverse relationship to the more accepted cratio definition, * which is why "space saving" is used here instead. * */ uint8_t space_saving_numerator; /** * Offset in bytes of the plane buffer from the beginning of the buffer. * First plane should start at offset zero. * * Buffer base pointer + pixels_offset must be 4KB page aligned. */ uint32_t pixels_offset; /** * Offset in bytes of the compression Tile Status meta-data from the beginning of the * link buffer. * * Buffer base pointer + ts_offset must be at least cache-line (64B) aligned. On some platforms, * may need to be 4KB page aligned for best performance. */ uint32_t ts_offset; /** * Stride of a row of *compression tiles* in bytes * * Calculated from the first byte in a compression tile to the first byte in the next row of * compression tiles. Compression tile dimensions depend on the link and plane. * * Note the implied requirement that the plane's line stride be a multiple of the compression tile * width *at least*. Some planes may have more strict requirements. * */ uint32_t tile_row_to_tile_row_stride; /** * Height in compression tiles * * The plane's height be a multiple of the compression tile height *at least*, where compression * tile dimensions depend on the link and plane. Some planes may have more strict requirements. */ uint32_t rows_of_tiles; /** * Lossy compression configuration data written as-is to the hardware. * Will be ignored when compression is lossless (i.e. cmprs_is_lossy is 0) * * LOSSY_CFG0 – Relevant to both BW and FP compression * [0:7] BW_CRATIO_PLUS * [8:15] BW_CRATIO_MINS * [16:23] BW_INST_UPPER * [24:31] BW_INST_LOWER * * LOSSY_CFG1 – Relevant to both BW and FP compression * [0:7] INIT_HISTORY * * LOSSY_CFG2 – Relevant to both BW and FP compression * [0:4] INIT_QP * [8:12] MAX_QP * [16:20] MIN_QP * [24:25] MA_WIN_SIZE * * LOSSY_CFG3 – Relevant only to FP compression * [8:11] MAX_QP_INC * [16:19] MAX_QP_DEC * [24:26] QP_INC_RST_VALUE * * LOSSY_CFG4 – Relevant only to FP compression * [0:3] LOG_FP_GUARD_BAND */ uint32_t lossy_cfg[IPU_MSG_LINK_CMPRS_LOSSY_CFG_PAYLOAD_SIZE]; }; /** Maximum number of planes supported by the compression feature */ #define IPU_MSG_LINK_CMPRS_MAX_PLANES (2U) /** Value for ipu7_msg_link_cmprs_option::align_interval when streaming is disabled */ #define IPU_MSG_LINK_CMPRS_NO_ALIGN_INTERVAL (0U) /** Maximum value allowed in ipu7_msg_link_cmprs_option::align_interval when streaming is enabled */ #define IPU_MSG_LINK_CMPRS_MIN_ALIGN_INTERVAL (16U) /** Maximum value allowed in ipu7_msg_link_cmprs_option::align_interval */ #define IPU_MSG_LINK_CMPRS_MAX_ALIGN_INTERVAL (1024U) /** * Additional node-task request data specific to the compression on * links between nodes. Only relevant for link endpoints where compression support * is available on both sides of the link. * * If compression is not enabled for the link, this option must not be instantiated. * */ struct ipu7_msg_link_cmprs_option { /** Header common to all link options. TLV type ID here is IPU_MSG_LINK_OPTION_TYPES_CMPRS. */ struct ia_gofo_tlv_header header; /** * Size of the buffer allocated for the compressed frame, including all pixel planes * and any tile status planes. * * May be larger than the uncompressed buffer size (see ipu7_msg_term::payload_size) in * many compression configurations as the compression meta-data takes up additional space. */ uint32_t cmprs_buf_size; /** * Interval of TS alignment in buffer chasing streaming modes (i.e. BCSM or BCLM). Must be the * same as the streaming pointer update granularity set in the hardware streaming configuration, * which is opaque to this SW-FW interface. Set to IPU_MSG_LINK_CMPRS_NO_ALIGN_INTERVAL for SOFF * or DOFF links. * * For streaming links, valid values are power-of-2 only, from * IPU_MSG_LINK_CMPRS_MIN_ALIGN_INTERVAL to IPU_MSG_LINK_CMPRS_MAX_ALIGN_INTERVAL, inclusive. When * compression is enabled on streamng links, this is a constraint on the pointer update granularity * as well. * * @note (Theoretically, there are special cases where IPU_MSG_LINK_CMPRS_NO_ALIGN_INTERVAL here * could be set for a buffer chasing streaming link, but this would be very fragile and is * not supported in this interface.) * * @todo OPEN: Should this be defined in pow2 exponents like HW? Tend no. */ uint16_t align_interval; /** Reserved for alignment. Set to zero. */ uint8_t reserved[2]; /** Compressed configuration and dimensions description, per plane. */ struct ipu7_msg_link_cmprs_plane_desc plane_descs[IPU_MSG_LINK_CMPRS_MAX_PLANES]; }; /** EndPoint of a link comprised of node and terminal ID's */ struct ipu7_msg_link_ep { /** Node ctx ID as described in the list of nodes in the fgraph */ uint8_t node_ctx_id; /** Node terminal ID from manifest */ uint8_t term_id; }; /** EndPoint pair uniquely indentifying the link */ struct ipu7_msg_link_ep_pair { /** Source side of the link */ struct ipu7_msg_link_ep ep_src; /** Destination (sink) side of the link */ struct ipu7_msg_link_ep ep_dst; }; /** * All local links (links between nodes within a subsystem) require this value to be set. * There is no need for true foreign ID in this case, since the same firmware manages both side of the link. */ #define IPU_MSG_LINK_FOREIGN_KEY_NONE UINT16_MAX /** Maximum number of foreign key IDs that can allocated by the host */ #define IPU_MSG_LINK_FOREIGN_KEY_MAX (64U) /** * Links with SOFF streaming mode require this value to be set. * All local links (links between nodes within a subsystem) require this value to be set. * There is no need for real pbk and slot id in this case, * since the same firmware manages both sides of the link. */ #define IPU_MSG_LINK_PBK_ID_DONT_CARE UINT8_MAX /** * Same as @see 'IPU_MSG_LINK_PBK_ID_DONT_CARE' */ #define IPU_MSG_LINK_PBK_SLOT_ID_DONT_CARE UINT8_MAX /** * Placeholder terminal ID for link endpoints in EXT_IP nodes * * On parsing, firmware will replace this value with a concrete terminal ID of its choice. * * See IPU_MSG_NODE_RSRC_ID_EXT_IP */ #define IPU_MSG_LINK_TERM_ID_DONT_CARE (0xFFU) /** * Link ABI object. The link describes a directional data flow * where "data" here is in the most general sense (configuration, image, stats, etc.) * * Note that the data flow implies a dependency relationship between producer and consumer */ struct ipu7_msg_link { /**Type here is one of ipu7_msg_link_type */ struct ia_gofo_tlv_header tlv_header; /** Identifer of the link's terminal endpoints */ struct ipu7_msg_link_ep_pair endpoints; /** * Unique identifier for links that connect to a node outside of the local graph. * Unique foreign key is allocated by the host and the value is arbitrary (except local links). * Local links must use IPU_MSG_LINK_FOREIGN_KEY_NONE. * The maximum value is IPU_MSG_LINK_FOREIGN_KEY_MAX. */ uint16_t foreign_key; /** * Data movement mechanisms between producer and consumer * @see enum ia_gofo_msg_link_streaming_mode */ uint8_t streaming_mode; /** * PBK instance for cross IPs connection. * Local links must use IPU_MSG_LINK_PBK_ID_DONT_CARE. * Links with SOFF streaming mode must use IPU_MSG_LINK_PBK_ID_DONT_CARE. * * @see enum ia_gofo_soc_pbk_instance_id */ uint8_t pbk_id; /** * Relevant entry id in the PBK that manages producer-consumer handshake. * Local links must use IPU_MSG_LINK_PBK_SLOT_ID_DONT_CARE. * Links with SOFF streaming mode must use IPU_MSG_LINK_PBK_SLOT_ID_DONT_CARE. */ uint8_t pbk_slot_id; /** * delayed processing link flag * A delayed link is a link between the producer of a previous frame and a consumer of that data on the next frame. * Set to 1 to mark as delayed, 0 if not. */ uint8_t delayed_link; /** Align to 32 bits. Set to zero. */ uint8_t reserved[2]; /** Extension fields for links */ struct ia_gofo_tlv_list link_options; }; #pragma pack(pop) /** @} */ /** Compile time checks only - this function is not to be called! */ static inline void ipu7_msg_abi_link_test_func(void) { CHECK_ALIGN32(struct ipu7_msg_link); CHECK_ALIGN32(struct ipu7_msg_link_cmprs_option); CHECK_ALIGN32(struct ipu7_msg_link_cmprs_plane_desc); } /** * @file * State definitions for various context objects * While these are not explicitly in the messages, they are * defined in the messaging state machine. * * For example, a sending graph_open message will change * the state of a graph context in the host to "OPEN_WAIT". * Successful handling of the graph_open message in firmware * results in ! changing the firmware graph context state to * "OPEN" and receipt of the graph_open_ack response will change * the graph state to "OPEN" in the host. */ /** * States of a runtime context * @todo Need to add power states */ enum ipu7_msg_dev_state { /** Starting state - device open message not yet sent */ IPU_MSG_DEV_STATE_CLOSED = 0, /** Device open message sent, waiting for ack */ IPU_MSG_DEV_STATE_OPEN_WAIT = 1, /** Only in this state can graph's be opened */ IPU_MSG_DEV_STATE_OPEN = 2, /** Close message sent, waiting for ack */ IPU_MSG_DEV_STATE_CLOSE_WAIT = 3, IPU_MSG_DEV_STATE_N }; /** * States of a graph context * @note The state of the componenet node contexts are tied * to the graph context state - so the nodes contexts don't have a state of * their own. */ enum ipu7_msg_graph_state { /** Starting state - device open message not yet sent */ IPU_MSG_GRAPH_STATE_CLOSED = 0, /** Graph open message sent, waiting for ack */ IPU_MSG_GRAPH_STATE_OPEN_WAIT = 1, /** Only in this state can node messages be sent */ IPU_MSG_GRAPH_STATE_OPEN = 2, /** Close message sent, waiting for ack */ IPU_MSG_GRAPH_STATE_CLOSE_WAIT = 3, IPU_MSG_GRAPH_STATE_N }; /** States of a task */ enum ipu7_msg_task_state { /** Task is complete */ IPU_MSG_TASK_STATE_DONE = 0, /** Task has been sent, but is not yet complete. */ IPU_MSG_TASK_STATE_WAIT_DONE = 1, IPU_MSG_TASK_STATE_N }; /** * Error groups define broad categories of errors in ack messages * @todo log messages too? * These are the basic groups common to all applications based on * this messaging protocol. * Some groups ID are reserved for application specific extensions. * See IPU_MSG_ERR_GROUP_APP_EXT_START */ enum ipu7_msg_err_groups { /** Reserved value. Used only with IPU_MSG_ERR_OK & IPU_MSG_ERR_UNSPECIFED */ IPU_MSG_ERR_GROUP_RESERVED = IA_GOFO_MSG_ERR_GROUP_RESERVED, /** Generic message general errors that aren't specific to another group */ IPU_MSG_ERR_GROUP_GENERAL = IA_GOFO_MSG_ERR_GROUP_GENERAL, /** Device open/close errors */ IPU_MSG_ERR_GROUP_DEVICE = 2, /** Graph open/close errors */ IPU_MSG_ERR_GROUP_GRAPH = 3, /** Task request errors */ IPU_MSG_ERR_GROUP_TASK = 4, /** * Number of items in this enumeration. * Cannot be larger than IPU_MSG_ERR_GROUP_APP_EXT_START */ IPU_MSG_ERR_GROUP_N, }; #pragma pack(push, 1) /** * Request a processing task. * * Only may be sent once the graph has been opened successfully, which * also creates the node contexts and their associated task queues. */ struct ipu7_msg_task { /** Message header */ struct ia_gofo_msg_header header; /** Graph instance identifier as specified in the graph open message */ uint8_t graph_id; /** * Index of node context configuration profile (variant) as * set in the graph_open message per node */ uint8_t profile_idx; /** Node context identifier as specified in the graph open message */ uint8_t node_ctx_id; /** Frame identifier - cyclical */ uint8_t frame_id; /** * Fragment identifier (zero-based index) within a frame. * Must be set to zero for unfragmented frames. */ uint8_t frag_id; /** * Request a "done" message when this task is complete. * 1 --> send done, 0 --> don't send. @see ipu7_msg_task_done */ uint8_t req_done_msg; /** * Request an interrupt when a "done" message is enqueued. Ignored if req_done_msg is 0. */ uint8_t req_done_irq; /** Reserved for alignment. Set to zero. */ uint8_t reserved[1]; /** * Bitmap of *load* terminals (by terminal ID) who's payload has NOT changed since * the last task message *for the same node context* AND * there is thus potential to skip the loading of those payloads in hardware. * Any terminal marked here must be enabled in the profile * inicated by profile_idx both in this task and the previous task on this node context. * * The payload buffer pointer for terminals marked here MUST still be supplied in * term_buffers and the payload content MUST be valid as firmware cannot guarantee * that the load will be skipped. Examples: * - The hardware state was lost in a power saving flow between tasks. * - A task from a different node context was scheduled since the last task on this * task's node context. */ ipu7_msg_teb_t payload_reuse_bm; /** * Pointers to buffers that will be used by the enabled terminals, * in the IPU address space. * Each entry corresponds to a terminal where the array index is the terminal ID * Terminals not enabled in the profile TEB are not required to be set, * but for diagnostics purposes zero'ing is preferred. */ ia_gofo_addr_t term_buffers[IPU_MSG_MAX_NODE_TERMS]; }; /** * Signals completion of a processing node * * Roughly parallel to an acknowledge message, except that: * - Is deferred in time * - Is optional */ struct ipu7_msg_task_done { /** * ABI ACK message header. Includes the common message header too. * Type will be IPU_MSG_TYPE_GRAPH_TASK_DONE */ struct ia_gofo_msg_header_ack header; /** Identifier of the graph containing the node context */ uint8_t graph_id; /** Identifier of the frame that has completed */ uint8_t frame_id; /** Node context identifier as specified in the graph open message */ uint8_t node_ctx_id; /** * Index of node context configuration profile (variant) for * the node task that has completed */ uint8_t profile_idx; /** Identifier of the fragment in the frame that has completed */ uint8_t frag_id; /** Reserved for alignment. Set to zero. */ uint8_t reserved[3]; }; /** * Error detail enumeration for error group IPU_MSG_ERR_GROUP_TASK used exclusively * in the task done messages */ enum ipu7_msg_err_task { /** No error */ IPU_MSG_ERR_TASK_OK = IA_GOFO_MSG_ERR_OK, /** * Invalid graph ID. * err_detail[0] is the offending graph_id */ IPU_MSG_ERR_TASK_GRAPH_ID = 1, /** * Invalid node ctx ID. * err_detail[0] is the offending node_ctx_id */ IPU_MSG_ERR_TASK_NODE_CTX_ID = 2, /** * Invalid profile index. * err_detail[0] is the offending profile_idx. * err_detail[1] is the node_ctx_id */ IPU_MSG_ERR_TASK_PROFILE_IDX = 3, /** Task object memory allocation failure */ IPU_MSG_ERR_TASK_CTX_MEMORY_TASK = 4, /** * Invalid terminal payload ptr. * Received NULL where a non-NULL value is required or other illegal value . * err_detail[0] is the offending offending pointer value. * err_detail[1] is the terminal id. */ IPU_MSG_ERR_TASK_TERM_PAYLOAD_PTR = 5, /** * Unexpected frame ID in a task message. * err_detail[0] is the offending frame_id and * err_detail[1] is the expected frame_id, */ IPU_MSG_ERR_TASK_FRAME_ID = 6, /** * Unexpected fragment ID in a task message. * err_detail[0] is the offending frag_id and * err_detail[1] is the expected frag_id, */ IPU_MSG_ERR_TASK_FRAG_ID = 7, /** * Error on task execution - external error. * err_detail[0] terminal id * err_detail[1] device id */ IPU_MSG_ERR_TASK_EXEC_EXT = 8, /** * Error on task execution - SBX error. * err_detail[0] terminal id * err_detail[1] device id */ IPU_MSG_ERR_TASK_EXEC_SBX = 9, /** * Error on task execution - internal error. * err_detail[0] terminal id * err_detail[1] device id */ IPU_MSG_ERR_TASK_EXEC_INT = 10, /** * Error on task execution - unknown error. * err_detail[0] terminal id * err_detail[1] device id */ IPU_MSG_ERR_TASK_EXEC_UNKNOWN = 11, /** Size of enumeration */ IPU_MSG_ERR_TASK_N }; #pragma pack(pop) /** @} */ /** Compile time checks only - this function is not to be called! */ static inline void ipu7_msg_task_test_func(void) { CHECK_ALIGN64(struct ipu7_msg_task); CHECK_ALIGN64(struct ipu7_msg_task_done); CHECK_ALIGN32(struct ipu7_msg_link_cmprs_option); } #pragma pack(push, 1) /** * Enumeration of terminal types * @todo As terminals are described by their attributes in the master * graph, it is unclear if this is at all necessary */ enum ipu7_msg_term_type { /** Type zero is always padding */ IPU_MSG_TERM_TYPE_PAD = 0, /** Generic terminal - and the base type for all others */ IPU_MSG_TERM_TYPE_BASE, /** Number of terminal types */ IPU_MSG_TERM_TYPE_N, }; /** * Terminal events that FW will reflect to client SW. * If an event is not directly available in hardware, firmware may * internally and silently work-around this at its discretion. */ /** No event notification request */ #define IPU_MSG_TERM_EVENT_TYPE_NONE 0U /** Progress event. */ #define IPU_MSG_TERM_EVENT_TYPE_PROGRESS 1U /** Number of terminal event types */ #define IPU_MSG_TERM_EVENT_TYPE_N (IPU_MSG_TERM_EVENT_TYPE_PROGRESS + 1U) /** * Base terminal structure * * Note the lack of an options field. * This was omitted to save space as most terminals probably won't need this. * This may be added as a subclass extension or alternatively, * node options could be used instead at the price * of specifying the terminal ID in the option. */ struct ipu7_msg_term { /** Type here is one of ipu7_msg_term_type. */ struct ia_gofo_tlv_header tlv_header; /** * ID of terminal in the static node description. * @note Only connect terminals support events. */ uint8_t term_id; /** * Request terminal event. See IPU_MSG_TERM_EVENT_TYPE_BIT macros. * * FW will send an event notification message to client SW when the event occurs. * When multiple event types are supported, they may be requested together by * bitwise OR'ing their bitmapped values here. */ uint8_t event_req_bm; /** Reserved for alignment padding. Set to zero. */ uint8_t reserved[2]; /** * Size of payload associated with this terminal without any compression that may * optionally be applied, if supported. */ uint32_t payload_size; /** Terminal options */ struct ia_gofo_tlv_list term_options; }; /** * Enumeration of known terminal options * Terminal meta-data can be attached to the terminal through these options. */ enum ipu7_msg_term_option_types { IPU_MSG_TERM_OPTION_TYPES_PADDING = 0, /** Number of terminal options */ IPU_MSG_TERM_OPTION_TYPES_N }; /** * Terminal event notification message. Sent when an requested event occurs. * * See ipu7_msg_term::event_req_bm */ struct ipu7_msg_term_event { /** * ABI message header. Type will be IPU_MSG_TYPE_TERM_EVENT. * user_token will be taken from the task wherein the event occurred. */ struct ia_gofo_msg_header header; /** Identifier of the graph containing the node context */ uint8_t graph_id; /** Identifier of the frame where the event occurred */ uint8_t frame_id; /** Node context identifier as specified in the graph open message */ uint8_t node_ctx_id; /** * Index of node context configuration profile (variant) for * the node task where the event occurred */ uint8_t profile_idx; /** Identifier of the fragment where the event occurred */ uint8_t frag_id; /** ID of terminal in the static node description. */ uint8_t term_id; /** Terminal event who's occurrence is now being notified. See ipu7_msg_term_event_type. */ uint8_t event_type; /** Reserved for alignment padding. Set to zero. */ uint8_t reserved[1]; /** * Event Timestamp. * * Should be set as close as possible to the event that triggered this notification message. Units * are at the discretion of the sender, and there is no guarantee of time source sync or a * proportional relationship with the client SW clock or even the stability of the time source. * * With all these caveats, this timestamp is only debug aid at best and should not be relied on for * operational use. The client SW can use the reception time of the message as the next best thing, * if necessary. */ uint64_t event_ts; }; #pragma pack(pop) /** @} */ /** Compile time checks only - this function is not to be called! */ static inline void ipu7_msg_term_test_func(void) { CHECK_ALIGN32(struct ipu7_msg_term); CHECK_ALIGN64(struct ipu7_msg_term_event); } #pragma pack(push, 1) /** * Device messages map for messages Device Open and Close * @{ */ #define IPU_MSG_DEVICE_SEND_MSG_ENABLED 1U #define IPU_MSG_DEVICE_SEND_MSG_DISABLED 0U #define IPU_MSG_DEVICE_OPEN_SEND_RESP (1U << 0U) #define IPU_MSG_DEVICE_OPEN_SEND_IRQ (1U << 1U) #define IPU_MSG_DEVICE_CLOSE_SEND_RESP (1U << 0U) #define IPU_MSG_DEVICE_CLOSE_SEND_IRQ (1U << 1U) /** @} */ /** * Device open message. Must be sent before any other message. * Negotiates protocol version with firmare and sets global parameters * In products with both secured and non-secure drivers, * this message must be sent by both drivers separately, each on * its own device queue. This implies that each driver will have its * own logical instantiation of the same IPU hardware; We call this a * "device context". * * No other message types may be sent until this one is positively ACK'd */ struct ipu7_msg_dev_open { /** Common ABI message header. Type will be IPU_MSG_TYPE_DEV_OPEN */ struct ia_gofo_msg_header header; /** Maximum number of graphs that may be opened for this device */ uint32_t max_graphs; /** * Device messages map, configurable send response message and interrupt trigger for that message * * See IPU_MSG_DEVICE_OPEN_SEND for more information about supported device messages. */ uint8_t dev_msg_map; /** * Enables power gating for PSYS power domain. * Write 1 to enable power save mode, 0 to disable it. */ uint8_t enable_power_gating; /** Reserved for alignment. Set to zero. */ uint8_t reserved[2]; }; /** Acknowledges a device open message */ struct ipu7_msg_dev_open_ack { /** * ABI ACK message header. Includes the common message header too. * Type will be IPU_MSG_TYPE_DEV_OPEN_ACK */ struct ia_gofo_msg_header_ack header; }; /** * Device close message. No other messages except device open * may be sent after this message is sent. * In products with both secured and non-secure drivers, this message must * be sent by the secure driver. */ struct ipu7_msg_dev_close { /** Common ABI message header. Type will be IPU_MSG_TYPE_DEV_CLOSE */ struct ia_gofo_msg_header header; /** * Device messages map, configurable send response message and interrupt trigger for that message * * See IPU_MSG_DEVICE_CLOSE_SEND for more information about supported device messages. */ uint8_t dev_msg_map; /** Reserved for alignment. Set to zero. */ uint8_t reserved[7]; }; /** Acknowledges a device close message. */ struct ipu7_msg_dev_close_ack { /** * ABI ACK message header. Includes the common message header too. * Type will be IPU_MSG_TYPE_DEV_CLOSE_ACK */ struct ia_gofo_msg_header_ack header; }; /** * Error detail enumeration for error group IPU_MSG_ERR_GROUP_DEVICE used exclusively * in the device open ack and device close ack messages */ enum ipu7_msg_err_device { /** No error */ IPU_MSG_ERR_DEVICE_OK = IA_GOFO_MSG_ERR_OK, /** * Attempt to reserve too many graphs in a device context. err_detail[0] is the * offending max_graphs. err_detail[1] is firmware maximum. */ IPU_MSG_ERR_DEVICE_MAX_GRAPHS = 1, /** Message map configuration is wrong, Send msg response disabled but IRQ for that msg enabled */ IPU_MSG_ERR_DEVICE_MSG_MAP = 2, /** Size of enumeration */ IPU_MSG_ERR_DEVICE_N }; #pragma pack(pop) /** @} */ /** Compile time checks only - this function is not to be called! */ static inline void ipu7_msg_device_test_func(void) { CHECK_ALIGN32(struct ia_gofo_version_s); CHECK_ALIGN64(struct ipu7_msg_dev_open); CHECK_ALIGN64(struct ipu7_msg_dev_open_ack); CHECK_ALIGN64(struct ipu7_msg_dev_close); CHECK_ALIGN64(struct ipu7_msg_dev_close_ack); } #pragma pack(push, 1) /** * Graph ID placeholder until the allocated ID becomes known * in a graph_open ACK response */ #define IPU_MSG_GRAPH_ID_UNKNOWN (0xFFU) /** * Graph messages map for messages Graph Open and Close * @{ */ #define IPU_MSG_GRAPH_SEND_MSG_ENABLED 1U #define IPU_MSG_GRAPH_SEND_MSG_DISABLED 0U #define IPU_MSG_GRAPH_OPEN_SEND_RESP (1U << 0U) #define IPU_MSG_GRAPH_OPEN_SEND_IRQ (1U << 1U) #define IPU_MSG_GRAPH_CLOSE_SEND_RESP (1U << 0U) #define IPU_MSG_GRAPH_CLOSE_SEND_IRQ (1U << 1U) /** @} */ /** * Open a graph instance. This is roughly parallel to a frame stream, but it * defines the node topology (actually, the topology superset) of the processing * graph. * No node tasks may be queued until this message is positively acknowledged. */ struct ipu7_msg_graph_open { /** Common ABI message header. Type will be IPU_MSG_TYPE_GRAPH_OPEN */ struct ia_gofo_msg_header header; /** * List of *enabled* node objects that are active in the graph. * Must be of type ipu7_msg_node_t or a derivative. */ struct ia_gofo_tlv_list nodes; /** * List of *enabled* link objects. Must be a of type ipu7_msg_link or a derivative. * Links not specified here are assumed to be to/from * the host and with full buffer atomicity. */ struct ia_gofo_tlv_list links; /** * Identifier of graph to be opened. Must be 0 <= graph_id < max_graphs, * where max_graphs was declared in the device_open message. See ipu7_msg_dev_open. * Can be set to IPU_MSG_GRAPH_ID_UNKNOWN, in which case firmware will decide. */ uint8_t graph_id; /** * Graph messages map, configurable send response message and interrupt trigger for that message * * See IPU_MSG_GRAPH_OPEN_SEND for more information about graph messages. */ uint8_t graph_msg_map; /** Reserved for alignment AND future close instructions like */ uint8_t reserved[6]; }; /** Enumeration of fgraph open ACK option types for use in ipu7_msg_graph_open_ack */ enum ipu7_msg_graph_ack_option_types { /** All TLV headers must define value zero as padding */ IPU_MSG_GRAPH_ACK_OPTION_TYPES_PADDING = 0, /** * Queue info set by FW for each managed node. * See ipu7_msg_graph_open_ack_node_info_t * @note Slated for decommisioning once SW-FW task interface is retired. */ IPU_MSG_GRAPH_ACK_TASK_Q_INFO, /** Number of items in this enumeration */ IPU_MSG_GRAPH_ACK_OPTION_TYPES_N }; /** * Structure for graph_ack option IPU_MSG_GRAPH_ACK_TASK_Q_INFO * which contains information set by FW, but required by SW * Only managed nodes have task queues and thus this option */ struct ipu7_msg_graph_open_ack_task_q_info { /** Option header */ struct ia_gofo_tlv_header header; /** * Zero-based index of this node in the fgraph. * Must be numbered in the same order as the node appears * in the fgraph's node-list. */ uint8_t node_ctx_id; /** Identifier of the queue that will order task requests for this node */ uint8_t q_id; /** Reserved for padding - set to zero */ uint8_t reserved[2]; }; /** * Acknowledges a graph open message. Sent only after the graph has actually been opened * in firmware or when the request has been rejected. * * Extension option types are defined in ipu7_msg_graph_ack_option_types */ struct ipu7_msg_graph_open_ack { /** * ABI ACK message header. Includes the common message header too. * Type will be IPU_MSG_TYPE_GRAPH_OPEN_ACK */ struct ia_gofo_msg_header_ack header; /** Graph ID as specified in the graph open message */ uint8_t graph_id; /** Reserved for alignment. Set to zero. */ uint8_t reserved[7]; }; /** * Close a graph instance. * No node tasks may be queued after this message is sent. * * Close must fail if incomplete tasks are still pending inside IPU * @todo Add wait_pending_tasks and force_close capablities */ struct ipu7_msg_graph_close { /** Common ABI message header. Type will be IPU_MSG_TYPE_GRAPH_CLOSE */ struct ia_gofo_msg_header header; /** Graph to be closed as specified in the graph open message */ uint8_t graph_id; /** * Graph messages map, configurable send response message and interrupt trigger for that message * * See IPU_MSG_GRAPH_CLOSE_SEND for more information about graph messages. */ uint8_t graph_msg_map; /** * Reserved for alignment AND future close instructions like * wait_pending_tasks and force_close. Set to zero. */ uint8_t reserved[6]; }; /** * Acknowledges a graph close message. Sent only after the graph has actually been closed * in firmware or when the request has been rejected. */ struct ipu7_msg_graph_close_ack { /** * ABI ACK message header. Includes the common message header too. * Type will be IPU_MSG_TYPE_GRAPH_CLOSE_ACK */ struct ia_gofo_msg_header_ack header; /** Graph ID as specified in the graph close message */ uint8_t graph_id; /** Reserved for alignment. Set to zero. */ uint8_t reserved[7]; }; /** * Error detail enumeration for error group IPU_MSG_ERR_GROUP_GRAPH used exclusively * in the graph open ack and graph close ack messages */ enum ipu7_msg_err_graph { /** No error */ IPU_MSG_ERR_GRAPH_OK = IA_GOFO_MSG_ERR_OK, /** * State machine error. Messge received doesn't fit with graph state. * err_detail[0] will be the actual graph state. */ IPU_MSG_ERR_GRAPH_GRAPH_STATE = 1, /** * Couldn't allocate graph ID (none left) - only when * graph_id==IPU_MSG_GRAPH_ID_UNKNOWN * in the graph_open message. * err_detail[0] is the max_graphs of the device context. */ IPU_MSG_ERR_GRAPH_MAX_GRAPHS = 2, /** * Invalid graph ID. * err_detail[0] is the offending graph_id */ IPU_MSG_ERR_GRAPH_GRAPH_ID = 3, /** * Invalid node ctx ID. * err_detail[0] is the offending node_ctx_id */ IPU_MSG_ERR_GRAPH_NODE_CTX_ID = 4, /** * Invalid node rsrc ID. * err_detail[0] is the offending node_rsrc_id */ IPU_MSG_ERR_GRAPH_NODE_RSRC_ID = 5, /** * Invalid profile index. * err_detail[0] is the offending profile_idx. * err_detail[1] is the node_ctx_id * * NOTE: Profile idx is not currently passed in graph open. This error is therefore reserved. */ IPU_MSG_ERR_GRAPH_PROFILE_IDX = 6, /** * Invalid terminal ID. * err_detail[0] is the offending terminal id. * err_detail[1] is the node_ctx_id */ IPU_MSG_ERR_GRAPH_TERM_ID = 7, /** * Invalid terminal payload size. * err_detail[0] is the offending payload size. * * Note: FW doesn't know the required size of the payload since it's not aware of the * frame resolution. Therefore the payload size is checked only for a non zero value. */ IPU_MSG_ERR_GRAPH_TERM_PAYLOAD_SIZE = 8, /** * Invalid node ctx ID in a link endpoint. * err_detail[0] is the offending node_ctx_id. * err_detail[1] is the term_id of the node. */ IPU_MSG_ERR_GRAPH_LINK_NODE_CTX_ID = 9, /** * Invalid terminal ID in a link endpoint. * err_detail[0] is the offending term_id. * err_detail[1] is the node_ctx_id of the terminal. */ IPU_MSG_ERR_GRAPH_LINK_TERM_ID = 10, /** * Profile supplied is unsupported AND silently ignoring it would be a problem. * err_detail[0] is the offending tlv type */ IPU_MSG_ERR_GRAPH_PROFILE_TYPE = 11, /** * Illegal number of fragments. * err_detail[0] is the offending number of fragments * err_detail[1] is the node ctx ID */ IPU_MSG_ERR_GRAPH_NUM_FRAGS = 12, /** * Message type doesn't belong on the received queue. * err_detail[0] is the q_id used. * err_detail[1] is the q_id that should have been used or IPU_MSG_Q_ID_UNKNOWN * if a single proper q_id cannot be determined. */ IPU_MSG_ERR_GRAPH_QUEUE_ID_USAGE = 13, /** * Error on opening queue for node context tasks. * err_detail[0] is the q_id that could not be opened. * err_detail[1] is the node_ctx_id it was supposed to serve. */ IPU_MSG_ERR_GRAPH_QUEUE_OPEN = 14, /** * Error on closing node context task queue. * err_detail[0] is the q_id that could not be closed. * err_detail[1] is the node_ctx_id it serves. */ IPU_MSG_ERR_GRAPH_QUEUE_CLOSE = 15, /** * Sent message has incorrect queue id associated with it. * err_detail[0] is the node_ctx_id the message is for, * err_detail[1] is q_id is used. */ IPU_MSG_ERR_GRAPH_QUEUE_ID_TASK_REQ_MISMATCH = 16, /** * FGraph context object memory allocation failure. * err_detail[0] is graph_id */ IPU_MSG_ERR_GRAPH_CTX_MEMORY_FGRAPH = 17, /** * Node context object memory allocation failure. * err_detail[0] is node_ctx_id */ IPU_MSG_ERR_GRAPH_CTX_MEMORY_NODE = 18, /** * Node context profile memory allocation failure. * err_detail[0] is node_ctx_id */ IPU_MSG_ERR_GRAPH_CTX_MEMORY_NODE_PROFILE = 19, /** * Terminal context object memory allocation failure. * err_detail[0] is node_ctx_id. * err_detail[1] is term_id */ IPU_MSG_ERR_GRAPH_CTX_MEMORY_TERM = 20, /** * Link context object memory allocation failure. * err_detail[0] is dest node_ctx_id. * err_detail[1] is dest term_id. * @todo can we add source node and term? */ IPU_MSG_ERR_GRAPH_CTX_MEMORY_LINK = 21, /** * Error configuring graph messages map. It is error to configure for the same message type to send irq but * do not send response message. * err_detail[0] value of the msg_map. */ IPU_MSG_ERR_GRAPH_CTX_MSG_MAP = 22, /** * foreign key is invalid. * err_detail[0] is foreign key value. * err_detail[1] is maximum foreign key value. */ IPU_MSG_ERR_GRAPH_CTX_FOREIGN_KEY = 23, /** * streaming mode is invalid. * err_detail[0] is link_ctx_id. * err_detail[1] is streaming mode value. */ IPU_MSG_ERR_GRAPH_CTX_STREAMING_MODE = 24, /** * pbk acquire failure. * err_detail[0] is soc_pbk_id. * err_detail[1] is slot_id. */ IPU_MSG_ERR_GRAPH_CTX_PBK_RSRC = 25, /** * Invalid event type. * err_detail[0] is the term_id. * err_detail[1] is the offending event type. */ IPU_MSG_ERR_GRAPH_UNSUPPORTED_EVENT_TYPE = 26, /** * Invalid num of events. * err_detail[0] is the term_id. * err_detail[1] is the is maximum events. */ IPU_MSG_ERR_GRAPH_TOO_MANY_EVENTS = 27, /** * Compression option memory allocation failure. * err_detail[0] is link_ctx_id of the link that contains the compression option. */ IPU_MSG_ERR_GRAPH_CTX_MEMORY_CMPRS = 28, /** * Alignment interval is not a valid value. * err_detail[0] is link_ctx_id of the link that contains the compression option. * err_detail[1] is the offending value. */ IPU_MSG_ERR_GRAPH_CTX_CMPRS_ALIGN_INTERVAL = 29, /** * Unrecognized plane type. * err_detail[0] is link_ctx_id of the link that contains the compression option. * err_detail[1] is the offending value. */ IPU_MSG_ERR_GRAPH_CTX_CMPRS_PLANE_ID = 30, /** * Invalid compression mode for the link source endpoint. The type of compression (or any * compression at all) is not supported by the data source endpoints of the link. * err_detail[0] is link_ctx_id of the link that contains the compression option. * err_detail[1] is the plane index. */ IPU_MSG_ERR_GRAPH_CTX_CMPRS_UNSUPPORTED_MODE = 31, /** * Unrecognized bit depth or a bit depth unsupported by compression on this link. * err_detail[0] is link_ctx_id of the link that contains the compression option. * err_detail[1] is the offending value. */ IPU_MSG_ERR_GRAPH_CTX_CMPRS_BIT_DEPTH = 32, /** * Stride is not aligned to a valid multiple. Stride must be a multiple of tile size. * err_detail[0] is link_ctx_id of the link that contains the compression option. * err_detail[1] is the offending value. */ IPU_MSG_ERR_GRAPH_CTX_CMPRS_STRIDE_ALIGNMENT = 33, /** * A sub-buffer base address (== frame base address + subbuffer offset) is not properly aligned. * err_detail[0] is link_ctx_id of the link that contains the compression option. * err_detail[1] is the offending value. */ IPU_MSG_ERR_GRAPH_CTX_CMPRS_SUB_BUFFER_ALIGNMENT = 34, /** * One of pixel sub-buffers or tile status sub-buffers is mis-ordered. * Assuming the following order: y_pixel_buffer, uv_pixel_buffer, y_ts_buffer, uv_ts_buffer. * err_detail[0] is link_ctx_id of the link that contains the compression option. * err_detail[1] is the first detected out-of-order sub-buffer with 0 for pixel, 1 for tile status. */ IPU_MSG_ERR_GRAPH_CTX_CMPRS_LAYOUT_ORDER = 35, /** * One of pixel sub-buffers or tile status sub-buffers overlaps another. * err_detail[0] is link_ctx_id of the link that contains the compression option. * err_detail[1] is the plane index. */ IPU_MSG_ERR_GRAPH_CTX_CMPRS_LAYOUT_OVERLAP = 36, /** * Compressed buffer size is too small. * err_detail[0] is the required compression buffer size as calculated by FW. * err_detail[1] is link_ctx_id of the link that contains the compression option. * * * @note FW calculates the compression buffer size based on the values * supplied in the compression option, not based on the frame resolution. * If the values supplied in the compression option are incorrect, the calculated * buffer size returned here would be incorrect as well. * * @note This buffer size error is specific to the compressed buffer size and says nothing about the * nominal size of the uncompressed buffer declared in the terminal. */ IPU_MSG_ERR_GRAPH_CTX_CMPRS_BUFFER_TOO_SMALL = 37, /** * Delayed processing link creation is invalid. * can be caused by one of the following violations: * delayed processing link must be a self link * delayed processing link cannot be SOFF * err_detail[0] is link_ctx_id of the delayed processing link. */ IPU_MSG_ERR_GRAPH_CTX_DELAYED_LINK = 38, /** Size of enumeration */ IPU_MSG_ERR_GRAPH_N }; #pragma pack(pop) /** Compile time checks only - this function is not to be called! */ static inline void ipu7_msg_graph_test_func(void) { CHECK_ALIGN64(struct ipu7_msg_graph_open); CHECK_ALIGN64(struct ipu7_msg_graph_open_ack); CHECK_ALIGN64(struct ipu7_msg_graph_close); CHECK_ALIGN64(struct ipu7_msg_graph_close_ack); CHECK_ALIGN32(struct ipu7_msg_graph_open_ack_task_q_info); } /** Maximum PSYS syscom INPUT queues */ #define FWPS_MSG_ABI_MAX_INPUT_QUEUES (60U) /** Maximum PSYS syscom OUTPUT queues */ #define FWPS_MSG_ABI_MAX_OUTPUT_QUEUES (2U) /** Maximum number of all queues, input and output together */ #define FWPS_MSG_ABI_MAX_QUEUES (FWPS_MSG_ABI_MAX_OUTPUT_QUEUES + FWPS_MSG_ABI_MAX_INPUT_QUEUES) /** * Output queue for acknowledge/done messages * This queue is opened by firmware at startup */ #define FWPS_MSG_ABI_OUT_ACK_QUEUE_ID (IA_GOFO_MSG_ABI_OUT_ACK_QUEUE_ID) /** * Output queue for log messages, including error messages * This queue is opened by firmware at startup */ #define FWPS_MSG_ABI_OUT_LOG_QUEUE_ID (IA_GOFO_MSG_ABI_OUT_LOG_QUEUE_ID) #if (FWPS_MSG_ABI_OUT_LOG_QUEUE_ID >= FWPS_MSG_ABI_MAX_OUTPUT_QUEUES) #error "Maximum output queues configuration is too small to fit ACK and LOG queues" #endif /** * Input queue for device level messages * This queue is opened by firmware at startup */ #define FWPS_MSG_ABI_IN_DEV_QUEUE_ID (IA_GOFO_MSG_ABI_IN_DEV_QUEUE_ID) /** Reserved input queue for possible future use */ #define FWPS_MSG_ABI_IN_RESERVED_QUEUE_ID (3U) /** * First task input queue. Other input task queues may follow * up to the maximum number of queues. */ #define FWPS_MSG_ABI_IN_FIRST_TASK_QUEUE_ID (FWPS_MSG_ABI_IN_RESERVED_QUEUE_ID + 1U) #if (FWPS_MSG_ABI_IN_FIRST_TASK_QUEUE_ID >= FWPS_MSG_ABI_MAX_INPUT_QUEUES) #error "Maximum queues configuration is too small to fit minimum number of useful queues" #endif /** Last task input queue */ #define FWPS_MSG_ABI_IN_LAST_TASK_QUEUE_ID (FWPS_MSG_ABI_MAX_QUEUES - 1U) /** Maximum number of task queues for sending ordered tasks to a node context */ #define FWPS_MSG_ABI_IN_MAX_TASK_QUEUES \ (FWPS_MSG_ABI_IN_LAST_TASK_QUEUE_ID - FWPS_MSG_ABI_IN_FIRST_TASK_QUEUE_ID + 1U) /** First output queue ID */ #define FWPS_MSG_ABI_OUT_FIRST_QUEUE_ID (FWPS_MSG_ABI_OUT_ACK_QUEUE_ID) /** Last output queue ID */ #define FWPS_MSG_ABI_OUT_LAST_QUEUE_ID (FWPS_MSG_ABI_MAX_OUTPUT_QUEUES - 1U) /** First input queue ID */ #define FWPS_MSG_ABI_IN_FIRST_QUEUE_ID (FWPS_MSG_ABI_IN_DEV_QUEUE_ID) /** Last input queue ID */ #define FWPS_MSG_ABI_IN_LAST_QUEUE_ID (FWPS_MSG_ABI_IN_LAST_TASK_QUEUE_ID) /** Evaluates to true if the queue_id identifies an input (SW-->FW) queue */ #define FWPS_MSG_ABI_IS_OUTPUT_QUEUE(queue_id) (\ (queue_id >= FWPS_MSG_ABI_OUT_FIRST_QUEUE_ID) && (\ queue_id <= FWPS_MSG_ABI_OUT_LAST_QUEUE_ID)) /** Evaluates to true if the queue_id identifies an output (FW-->SW) queue */ #define FWPS_MSG_ABI_IS_INPUT_QUEUE(queue_id) (!FWPS_MSG_ABI_IS_OUTPUT_QUEUE(queue_id)) /** * Maximum size for all host-fw * Guessing as of now */ #define FWPS_MSG_HOST2FW_MAX_SIZE (2U * 1024U) /** * Maximum size for all fw-host messages * Guessing as of now */ #define FWPS_MSG_FW2HOST_MAX_SIZE (256U) #endifipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/abi/ipu7_fw_psys_config_abi.h000066400000000000000000000042671465530421300317670ustar00rootroot00000000000000/* INTEL CONFIDENTIAL * * Copyright (C) 2020 - 2023 Intel Corporation. * All Rights Reserved. * * The source code contained or described herein and all documents * related to the source code ("Material") are owned by Intel Corporation * or licensors. Title to the Material remains with Intel * Corporation or its licensors. The Material contains trade * secrets and proprietary and confidential information of Intel or its * licensors. The Material is protected by worldwide copyright * and trade secret laws and treaty provisions. No part of the Material may * be used, copied, reproduced, modified, published, uploaded, posted, * transmitted, distributed, or disclosed in any way without Intel's prior * express written permission. * * No License under any patent, copyright, trade secret or other intellectual * property right is granted to or conferred upon you by disclosure or * delivery of the Materials, either expressly, by implication, inducement, * estoppel or otherwise. Any license under such intellectual property rights * must be express and approved by Intel in writing. */ #ifndef IPU7_PSYS_CONFIG_ABI_H_INCLUDED__ #define IPU7_PSYS_CONFIG_ABI_H_INCLUDED__ #include #include "ipu7_fw_boot_abi.h" #include "ipu7_fw_config_abi.h" /** * PSYS specific config. * @see ia_gofo_boot_config.subsys_config */ struct ipu7_psys_config { /** * Enable HW agnostic debug manifest. * (Formerly ddr_dmem_ddr) */ uint32_t use_debug_manifest; /** * timeout val in millisecond. */ uint32_t timeout_val_ms; /** * Enables compression support in FW. * For HKR-1, enabling compression support means: * 1. Pipelining is disabled for all CBs. * 2. BCSM is not supported for any graphs. * Note that enabling this bit does not mean that compression is active, just that it can be activated * (using the compression TLV message in graph open). * By default compression is not enabled in FW and therefore TLV compression messages would trigger an error. */ uint32_t compression_support_enabled; /** @see ia_gofo_boot_config.subsys_config */ struct ia_gofo_logger_config logger_config; /** HW watchdog configuration */ struct ipu7_wdt_abi wdt_config; }; #endif ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/abi/ipu7_fw_syscom_abi.h000066400000000000000000000112041465530421300307460ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2020 - 2024 Intel Corporation */ #ifndef IPU7_FW_SYSCOM_ABI_H #define IPU7_FW_SYSCOM_ABI_H #include #include "ipu7_fw_common_abi.h" /** * @file * This file describes binary data structures and registers * that are shared between host and firmware and are required * for syscom queue setup and operation. * */ /* * Tool not yet available * @mscfile simplified_syscom.signalling "SW-FW Syscom & Boot Interface Sequence * (Place holder - MSCGEN tool integration not yet available" */ /* * Tool not yet available * @mscfile simplified_syscom.signalling "SW-FW Syscom & Boot Interface Sequence * (Place holder - MSCGEN tool integration not yet available" */ #pragma pack(push, 1) /** * Syscom queue size (max_capacity) cannot be smaller than this value. */ #define SYSCOM_QUEUE_MIN_CAPACITY 2U /** * Basic queue parameters defining the backing shared memory * that will hold the queue array, and that array's geometry */ struct syscom_queue_params_config { /** Actual message memory. MUST be cache line (64 byte) aligned. */ ia_gofo_addr_t token_array_mem; /** * Size of buffer at token_array_mem. Must be a multiple of 4 bytes. * If token size is greater than 64 bytes and thus a candidate for * DMA transfers, it must be a multiple of 64 bytes to conform to * uC iDMA constraints on the starting address. * @todo TBD on threshold for using DMA. */ uint16_t token_size_in_bytes; /** * Max capacity of the queue. This is the number of elements in * the underlying allocated array. * Note: max_capacity must be at least SYSCOM_QUEUE_MIN_CAPACITY! */ uint16_t max_capacity; }; /** * The syscom config structure that is passed to FW at boot. * This is the initial syscom config structure that is initialized by * the host and passed to FW as part of its boot process. This defines the * initial number of queues. */ struct syscom_config_s { /** * Maximum number of input queues that host has allocated. * Currently must be exactly 2. One for acks and one for logs. */ uint16_t max_output_queues; /** * Maximum number of input queues that host has allocated, * including the two reserved queues for non-task related messaging * @todo My preference is to move this to the device_open messages */ uint16_t max_input_queues; /* * Queue config for all queues required at bootstrapping ONLY with * output queues are first, followed by input queues * This array MUST be of size (max_output_queues + max_input_queues) * Were we to use C99 flexible arrays, it would look like this: * syscom_queue_params_config queue_configs[]; * See QueueParamsConfigGetQueueConfigs */ }; /** * Calculate allocation size of Syscom configuration structure as * a parameter of the number of queues. */ #define SYSCOM_CONFIG_ALLOC_SIZE(num_queues) \ ((sizeof(struct syscom_config_s) + \ (sizeof(struct syscom_queue_params_config) * num_queues))) #pragma pack(pop) static inline struct syscom_queue_params_config *syscom_config_get_queue_configs (struct syscom_config_s *config) { return (struct syscom_queue_params_config *)(&config[1]); } static inline const struct syscom_queue_params_config *syscom_config_get_queue_configs_const(const struct syscom_config_s *config) { return (const struct syscom_queue_params_config *)(&config[1]); } /** Compile time checks only - this function is not to be called! */ static inline void syscom_abi_test_func(void) { CHECK_ALIGN32(struct syscom_queue_params_config); CHECK_ALIGN32(struct syscom_config_s); } #pragma pack(push, 1) /** * Queue indices structure to reside in DMEM. * This is the shared * part of syscom that both host and firmware read/write. */ struct syscom_queue_indices_s { /** * Index to token in the front of queue * This will be the next token to dequeue. Only consumer can * write to this variable. * * Note: For now, only lowest byte is in use --> max value is 127. * Defining at 32 bit because * that is the smallest atomic read/write that * both host and FW can support together */ uint32_t read_index; /** * Index to the token in the rear of queue * This was the last token queued. The next token will go after this. * Only producer can write to this variable. * * Note: For now, only lowest byte is in use --> max value is 127. * Defining at 32 bit because * that is the smallest atomic read/write that * both host and FW can support together */ uint32_t write_index; }; #pragma pack(pop) /** Compile time checks only - this function is not to be called! */ static inline void syscom_queue_abi_test_func(void) { CHECK_ALIGN64(struct syscom_queue_indices_s); } #endif ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/ipu7-boot.c000066400000000000000000000325741465530421300262600ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2022 - 2024 Intel Corporation */ #include #include #include #include #include #include #include #include #include "abi/ipu7_fw_boot_abi.h" #include "ipu7.h" #include "ipu7-boot.h" #include "ipu7-bus.h" #include "ipu7-buttress-regs.h" #include "ipu7-platform-regs.h" #include "ipu7-syscom.h" #define IPU_FW_START_STOP_TIMEOUT 2000 #define IPU_BOOT_CELL_RESET_TIMEOUT (2 * USEC_PER_SEC) #define BOOT_STATE_IS_CRITICAL(s) IA_GOFO_FW_BOOT_STATE_IS_CRITICAL(s) #define BOOT_STATE_IS_READY(s) ((s) == IA_GOFO_FW_BOOT_STATE_READY) #define BOOT_STATE_IS_INACTIVE(s) ((s) == IA_GOFO_FW_BOOT_STATE_INACTIVE) struct ipu7_boot_context { u32 base; u32 dmem_address; u32 status_ctrl_reg; u32 fw_start_address_reg; u32 fw_code_base_reg; }; static const struct ipu7_boot_context contexts[IPU_SUBSYS_NUM] = { { /* ISYS */ .dmem_address = IPU_ISYS_DMEM_OFFSET, .status_ctrl_reg = BUTTRESS_REG_DRV_IS_UCX_CONTROL_STATUS, .fw_start_address_reg = BUTTRESS_REG_DRV_IS_UCX_START_ADDR, .fw_code_base_reg = IS_UC_CTRL_BASE }, { /* PSYS */ .dmem_address = IPU_PSYS_DMEM_OFFSET, .status_ctrl_reg = BUTTRESS_REG_DRV_PS_UCX_CONTROL_STATUS, .fw_start_address_reg = BUTTRESS_REG_DRV_PS_UCX_START_ADDR, .fw_code_base_reg = PS_UC_CTRL_BASE } }; static u32 get_fw_boot_reg_addr(const struct ipu7_bus_device *adev, enum ia_gofo_buttress_reg_id reg) { u32 base = (adev->subsys == IPU_IS) ? 0 : IA_GOFO_FW_BOOT_ID_MAX; return BUTTRESS_FW_BOOT_PARAMS_ENTRY(base + reg); } static void write_fw_boot_param(const struct ipu7_bus_device *adev, enum ia_gofo_buttress_reg_id reg, u32 val) { void __iomem *base = adev->isp->base; dev_dbg(&adev->auxdev.dev, "write boot param reg: %d addr: %x val: 0x%x\n", reg, get_fw_boot_reg_addr(adev, reg), val); writel(val, base + get_fw_boot_reg_addr(adev, reg)); } static u32 read_fw_boot_param(const struct ipu7_bus_device *adev, enum ia_gofo_buttress_reg_id reg) { void __iomem *base = adev->isp->base; return readl(base + get_fw_boot_reg_addr(adev, reg)); } static int ipu7_boot_cell_reset(const struct ipu7_bus_device *adev) { const struct ipu7_boot_context *ctx = &contexts[adev->subsys]; void __iomem *base = adev->isp->base; u32 ucx_ctrl_status = ctx->status_ctrl_reg; u32 timeout = IPU_BOOT_CELL_RESET_TIMEOUT; const struct device *dev = &adev->auxdev.dev; u32 val, val2; int ret; dev_dbg(dev, "cell enter reset...\n"); val = readl(base + ucx_ctrl_status); dev_dbg(dev, "cell_ctrl_reg addr = 0x%x, val = 0x%x\n", ucx_ctrl_status, val); dev_dbg(dev, "force cell reset...\n"); val |= UCX_CTL_RESET; val &= ~UCX_CTL_RUN; dev_dbg(dev, "write status_ctrl_reg(0x%x) to 0x%x\n", ucx_ctrl_status, val); writel(val, base + ucx_ctrl_status); ret = readl_poll_timeout(base + ucx_ctrl_status, val2, (val2 & 0x3) == (val & 0x3), 100, timeout); if (ret) { dev_err(dev, "cell enter reset timeout. status: 0x%x\n", val2); return -ETIMEDOUT; } dev_dbg(dev, "cell exit reset...\n"); val = readl(base + ucx_ctrl_status); WARN((!(val & UCX_CTL_RESET) || val & UCX_CTL_RUN), "cell status 0x%x", val); val &= ~(UCX_CTL_RESET | UCX_CTL_RUN); dev_dbg(dev, "write status_ctrl_reg(0x%x) to 0x%x\n", ucx_ctrl_status, val); writel(val, base + ucx_ctrl_status); ret = readl_poll_timeout(base + ucx_ctrl_status, val2, (val2 & 0x3) == (val & 0x3), 100, timeout); if (ret) { dev_err(dev, "cell exit reset timeout. status: 0x%x\n", val2); return -ETIMEDOUT; } return 0; } static void ipu7_boot_cell_start(const struct ipu7_bus_device *adev) { const struct ipu7_boot_context *ctx = &contexts[adev->subsys]; void __iomem *base = adev->isp->base; const struct device *dev = &adev->auxdev.dev; u32 val; dev_dbg(dev, "starting cell...\n"); val = readl(base + ctx->status_ctrl_reg); WARN_ON(val & (UCX_CTL_RESET | UCX_CTL_RUN)); val &= ~UCX_CTL_RESET; val |= UCX_CTL_RUN; dev_dbg(dev, "write status_ctrl_reg(0x%x) to 0x%x\n", ctx->status_ctrl_reg, val); writel(val, base + ctx->status_ctrl_reg); } static void ipu7_boot_cell_stop(const struct ipu7_bus_device *adev) { const struct ipu7_boot_context *ctx = &contexts[adev->subsys]; void __iomem *base = adev->isp->base; const struct device *dev = &adev->auxdev.dev; u32 val; dev_dbg(dev, "stopping cell...\n"); val = readl(base + ctx->status_ctrl_reg); val &= ~UCX_CTL_RUN; dev_dbg(dev, "write status_ctrl_reg(0x%x) to 0x%x\n", ctx->status_ctrl_reg, val); writel(val, base + ctx->status_ctrl_reg); /* Wait for uC transactions complete */ usleep_range(10, 20); val = readl(base + ctx->status_ctrl_reg); val |= UCX_CTL_RESET; dev_dbg(dev, "write status_ctrl_reg(0x%x) to 0x%x\n", ctx->status_ctrl_reg, val); writel(val, base + ctx->status_ctrl_reg); } static int ipu7_boot_cell_init(const struct ipu7_bus_device *adev) { const struct ipu7_boot_context *ctx = &contexts[adev->subsys]; void __iomem *base = adev->isp->base; dev_dbg(&adev->auxdev.dev, "write fw_start_address_reg(0x%x) to 0x%x\n", ctx->fw_start_address_reg, adev->fw_entry); writel(adev->fw_entry, base + ctx->fw_start_address_reg); return ipu7_boot_cell_reset(adev); } static void init_boot_config(struct ia_gofo_boot_config *boot_config, u32 length) { /* syscom version, new syscom2 version */ boot_config->length = length; boot_config->config_version.major = 1U; boot_config->config_version.minor = 0U; boot_config->config_version.subminor = 0U; boot_config->config_version.patch = 0U; /* msg version for task interface */ boot_config->client_version_support.num_versions = 1U; boot_config->client_version_support.versions[0].major = 1U; boot_config->client_version_support.versions[0].minor = 0U; boot_config->client_version_support.versions[0].subminor = 0U; boot_config->client_version_support.versions[0].patch = 0U; } int ipu7_boot_init_boot_config(struct ipu7_bus_device *adev, struct syscom_queue_config *qconfigs, int num_queues, u32 uc_freq, dma_addr_t subsys_config) { struct ipu7_syscom_context *syscom = adev->syscom; struct syscom_config_s *syscfg; struct ia_gofo_boot_config *boot_config; struct syscom_queue_params_config *cfgs; dma_addr_t queue_mem_dma_ptr; void *queue_mem_ptr; u32 total_queue_size = 0, total_queue_size_aligned = 0; struct device *dev = &adev->auxdev.dev; unsigned int i; dev_dbg(dev, "init boot config. queues_nr: %d freq: %u sys_conf: 0x%llx\n", num_queues, uc_freq, subsys_config); /* Allocate boot config. */ adev->boot_config_size = FW_BOOT_CONFIG_ALLOC_SIZE(num_queues); adev->boot_config = dma_alloc_attrs(dev, adev->boot_config_size, &adev->boot_config_dma_addr, GFP_KERNEL, 0); if (!adev->boot_config) { dev_err(dev, "Failed to allocate boot config.\n"); return -ENOMEM; } dev_dbg(dev, "boot config dma addr: 0x%llx\n", adev->boot_config_dma_addr); boot_config = adev->boot_config; memset(boot_config, 0, sizeof(struct ia_gofo_boot_config)); init_boot_config(boot_config, adev->boot_config_size); boot_config->subsys_config = subsys_config; boot_config->uc_tile_frequency_mhz = uc_freq; boot_config->syscom_context_config[0].max_output_queues = syscom->num_output_queues; boot_config->syscom_context_config[0].max_input_queues = syscom->num_input_queues; dma_sync_single_for_device(dev, adev->boot_config_dma_addr, adev->boot_config_size, DMA_TO_DEVICE); for (i = 0; i < num_queues; i++) { u32 queue_size = qconfigs[i].max_capacity * qconfigs[i].token_size_in_bytes; total_queue_size += queue_size; queue_size = ALIGN(queue_size, IA_GOFO_CL_SIZE); total_queue_size_aligned += queue_size; qconfigs[i].queue_size = queue_size; dev_dbg(dev, "queue_configs[%d]: capacity = %d, token_size = %d\n", i, qconfigs[i].max_capacity, qconfigs[i].token_size_in_bytes); } dev_dbg(dev, "isys syscom queue size %d, aligned size %d\n", total_queue_size, total_queue_size_aligned); /* Allocate queue memory */ syscom->queue_mem = dma_alloc_attrs(dev, total_queue_size_aligned, &syscom->queue_mem_dma_addr, GFP_KERNEL, 0); if (!syscom->queue_mem) { dev_err(dev, "Failed to allocate queue memory.\n"); return -ENOMEM; } dev_dbg(dev, "syscom queue memory. dma_addr: 0x%llx size: %d\n", syscom->queue_mem_dma_addr, total_queue_size_aligned); syscom->queue_mem_size = total_queue_size_aligned; syscfg = &boot_config->syscom_context_config[0]; cfgs = ipu7_syscom_get_queue_config(syscfg); queue_mem_ptr = syscom->queue_mem; queue_mem_dma_ptr = syscom->queue_mem_dma_addr; for (i = 0; i < num_queues; i++) { cfgs[i].token_array_mem = queue_mem_dma_ptr; cfgs[i].max_capacity = qconfigs[i].max_capacity; cfgs[i].token_size_in_bytes = qconfigs[i].token_size_in_bytes; qconfigs[i].token_array_mem = queue_mem_ptr; queue_mem_dma_ptr += qconfigs[i].queue_size; queue_mem_ptr += qconfigs[i].queue_size; } dma_sync_single_for_device(dev, syscom->queue_mem_dma_addr, total_queue_size_aligned, DMA_TO_DEVICE); return 0; } EXPORT_SYMBOL_NS_GPL(ipu7_boot_init_boot_config, INTEL_IPU7); void ipu7_boot_release_boot_config(struct ipu7_bus_device *adev) { struct ipu7_syscom_context *syscom = adev->syscom; struct device *dev = &adev->auxdev.dev; if (syscom->queue_mem) { dma_free_attrs(dev, syscom->queue_mem_size, syscom->queue_mem, syscom->queue_mem_dma_addr, 0); syscom->queue_mem = NULL; syscom->queue_mem_dma_addr = 0; } if (adev->boot_config) { dma_free_attrs(dev, adev->boot_config_size, adev->boot_config, adev->boot_config_dma_addr, 0); adev->boot_config = NULL; adev->boot_config_dma_addr = 0; } } EXPORT_SYMBOL_NS_GPL(ipu7_boot_release_boot_config, INTEL_IPU7); int ipu7_boot_start_fw(const struct ipu7_bus_device *adev) { const struct device *dev = &adev->auxdev.dev; u32 boot_state, last_boot_state; u32 timeout = IPU_FW_START_STOP_TIMEOUT; void __iomem *base = adev->isp->base; u32 indices_addr, msg_ver, id; struct ia_gofo_version_s ver; int ret; ret = ipu7_boot_cell_init(adev); if (ret) return ret; dev_dbg(dev, "start booting fw...\n"); /* store "uninit" state to syscom/boot state reg */ write_fw_boot_param(adev, IA_GOFO_FW_BOOT_STATE_ID, IA_GOFO_FW_BOOT_STATE_UNINIT); /* * Set registers to zero * (not strictly required, but recommended for diagnostics) */ write_fw_boot_param(adev, IA_GOFO_FW_BOOT_SYSCOM_QUEUE_INDICES_BASE_ID, 0); write_fw_boot_param(adev, IA_GOFO_FW_BOOT_MESSAGING_VERSION_ID, 0); /* store firmware configuration address */ write_fw_boot_param(adev, IA_GOFO_FW_BOOT_CONFIG_ID, adev->boot_config_dma_addr); /* Kick uC, then wait for boot complete */ ipu7_boot_cell_start(adev); last_boot_state = IA_GOFO_FW_BOOT_STATE_UNINIT; while (timeout--) { boot_state = read_fw_boot_param(adev, IA_GOFO_FW_BOOT_STATE_ID); if (boot_state != last_boot_state) { dev_dbg(dev, "boot state changed from 0x%x to 0x%x\n", last_boot_state, boot_state); last_boot_state = boot_state; } if (BOOT_STATE_IS_CRITICAL(boot_state) || BOOT_STATE_IS_READY(boot_state)) break; usleep_range(1000, 1200); } if (BOOT_STATE_IS_CRITICAL(boot_state)) { ipu7_dump_fw_error_log(adev); dev_err(dev, "critical boot state error 0x%x\n", boot_state); return -EINVAL; } else if (!BOOT_STATE_IS_READY(boot_state)) { dev_err(dev, "fw boot timeout. state: 0x%x\n", boot_state); return -ETIMEDOUT; } dev_dbg(dev, "fw boot done.\n"); /* Get FW syscom queue indices addr */ id = IA_GOFO_FW_BOOT_SYSCOM_QUEUE_INDICES_BASE_ID; indices_addr = read_fw_boot_param(adev, id); adev->syscom->queue_indices = base + indices_addr; dev_dbg(dev, "fw queue indices offset is 0x%x\n", indices_addr); /* Get message version. */ msg_ver = read_fw_boot_param(adev, IA_GOFO_FW_BOOT_MESSAGING_VERSION_ID); memcpy(&ver, &msg_ver, sizeof(msg_ver)); dev_dbg(dev, "ipu message version is %d.%d.%d.%d\n", ver.major, ver.minor, ver.subminor, ver.patch); return 0; } EXPORT_SYMBOL_NS_GPL(ipu7_boot_start_fw, INTEL_IPU7); int ipu7_boot_stop_fw(const struct ipu7_bus_device *adev) { const struct device *dev = &adev->auxdev.dev; u32 boot_state; u32 timeout = IPU_FW_START_STOP_TIMEOUT; boot_state = read_fw_boot_param(adev, IA_GOFO_FW_BOOT_STATE_ID); if (BOOT_STATE_IS_CRITICAL(boot_state) || !BOOT_STATE_IS_READY(boot_state)) { dev_err(dev, "fw not ready for shutdown, state 0x%x\n", boot_state); return -EBUSY; } /* Issue shutdown to start shutdown process */ dev_dbg(dev, "stopping fw...\n"); write_fw_boot_param(adev, IA_GOFO_FW_BOOT_STATE_ID, IA_GOFO_FW_BOOT_STATE_SHUTDOWN_CMD); while (timeout--) { boot_state = read_fw_boot_param(adev, IA_GOFO_FW_BOOT_STATE_ID); if (BOOT_STATE_IS_CRITICAL(boot_state) || BOOT_STATE_IS_INACTIVE(boot_state)) break; usleep_range(1000, 1200); } if (BOOT_STATE_IS_CRITICAL(boot_state)) { ipu7_dump_fw_error_log(adev); dev_err(dev, "critical boot state error 0x%x\n", boot_state); return -EINVAL; } else if (!BOOT_STATE_IS_INACTIVE(boot_state)) { dev_err(dev, "stop fw timeout. state: 0x%x\n", boot_state); return -ETIMEDOUT; } ipu7_boot_cell_stop(adev); dev_dbg(dev, "stop fw done.\n"); return 0; } EXPORT_SYMBOL_NS_GPL(ipu7_boot_stop_fw, INTEL_IPU7); u32 ipu7_boot_get_boot_state(const struct ipu7_bus_device *adev) { return read_fw_boot_param(adev, IA_GOFO_FW_BOOT_STATE_ID); } EXPORT_SYMBOL_NS_GPL(ipu7_boot_get_boot_state, INTEL_IPU7); ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/ipu7-boot.h000066400000000000000000000013111465530421300262460ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2022 - 2024 Intel Corporation */ #include struct ipu7_bus_device; struct syscom_queue_config; #define FW_QUEUE_CONFIG_SIZE(num_queues) \ (sizeof(struct syscom_queue_config) * (num_queues)) int ipu7_boot_init_boot_config(struct ipu7_bus_device *adev, struct syscom_queue_config *qconfigs, int num_queues, u32 uc_freq, dma_addr_t subsys_config); void ipu7_boot_release_boot_config(struct ipu7_bus_device *adev); int ipu7_boot_start_fw(const struct ipu7_bus_device *adev); int ipu7_boot_stop_fw(const struct ipu7_bus_device *adev); u32 ipu7_boot_get_boot_state(const struct ipu7_bus_device *adev); ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/ipu7-bus.c000066400000000000000000000072771465530421300261100ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013 - 2024 Intel Corporation */ #include #include #include #include #include #include #include #include #include #include #include "ipu7.h" #include "ipu7-bus.h" #include "ipu7-boot.h" #include "ipu7-dma.h" static int bus_pm_runtime_suspend(struct device *dev) { struct ipu7_bus_device *adev = to_ipu7_bus_device(dev); int ret; ret = pm_generic_runtime_suspend(dev); if (ret) return ret; ret = ipu7_buttress_power(dev, adev->ctrl, false); dev_dbg(dev, "buttress power down %d\n", ret); if (!ret) return 0; dev_err(dev, "power down failed!\n"); /* Powering down failed, attempt to resume device now */ ret = pm_generic_runtime_resume(dev); if (!ret) return -EBUSY; return -EIO; } static int bus_pm_runtime_resume(struct device *dev) { struct ipu7_bus_device *adev = to_ipu7_bus_device(dev); int ret; ret = ipu7_buttress_power(dev, adev->ctrl, true); dev_dbg(dev, "buttress power up %d\n", ret); if (ret) return ret; ret = pm_generic_runtime_resume(dev); if (ret) goto out_err; return 0; out_err: ipu7_buttress_power(dev, adev->ctrl, false); return -EBUSY; } static struct dev_pm_domain ipu7_bus_pm_domain = { .ops = { .runtime_suspend = bus_pm_runtime_suspend, .runtime_resume = bus_pm_runtime_resume, }, }; static DEFINE_MUTEX(ipu7_bus_mutex); static void ipu7_bus_release(struct device *dev) { struct ipu7_bus_device *adev = to_ipu7_bus_device(dev); kfree(adev->pdata); kfree(adev); } struct ipu7_bus_device * ipu7_bus_initialize_device(struct pci_dev *pdev, struct device *parent, void *pdata, struct ipu7_buttress_ctrl *ctrl, char *name) { struct auxiliary_device *auxdev; struct ipu7_bus_device *adev; struct ipu7_device *isp = pci_get_drvdata(pdev); int ret; adev = kzalloc(sizeof(*adev), GFP_KERNEL); if (!adev) return ERR_PTR(-ENOMEM); if (isp->secure_mode) adev->dma_mask = DMA_BIT_MASK(IPU_MMU_ADDR_BITS); else adev->dma_mask = DMA_BIT_MASK(IPU_MMU_ADDR_BITS_NON_SECURE); adev->isp = isp; adev->ctrl = ctrl; adev->pdata = pdata; auxdev = &adev->auxdev; auxdev->name = name; auxdev->id = (pci_domain_nr(pdev->bus) << 16) | PCI_DEVID(pdev->bus->number, pdev->devfn); auxdev->dev.parent = parent; auxdev->dev.release = ipu7_bus_release; auxdev->dev.dma_ops = &ipu7_dma_ops; auxdev->dev.dma_mask = &adev->dma_mask; auxdev->dev.dma_parms = pdev->dev.dma_parms; auxdev->dev.coherent_dma_mask = adev->dma_mask; ret = auxiliary_device_init(auxdev); if (ret < 0) { dev_err(&isp->pdev->dev, "auxiliary device init failed (%d)\n", ret); kfree(adev); return ERR_PTR(ret); } dev_pm_domain_set(&auxdev->dev, &ipu7_bus_pm_domain); pm_runtime_forbid(&adev->auxdev.dev); pm_runtime_enable(&adev->auxdev.dev); return adev; } int ipu7_bus_add_device(struct ipu7_bus_device *adev) { struct auxiliary_device *auxdev = &adev->auxdev; int ret; ret = auxiliary_device_add(auxdev); if (ret) { auxiliary_device_uninit(auxdev); return ret; } mutex_lock(&ipu7_bus_mutex); list_add(&adev->list, &adev->isp->devices); mutex_unlock(&ipu7_bus_mutex); pm_runtime_allow(&auxdev->dev); return 0; } void ipu7_bus_del_devices(struct pci_dev *pdev) { struct ipu7_device *isp = pci_get_drvdata(pdev); struct ipu7_bus_device *adev, *save; mutex_lock(&ipu7_bus_mutex); list_for_each_entry_safe(adev, save, &isp->devices, list) { pm_runtime_disable(&adev->auxdev.dev); list_del(&adev->list); auxiliary_device_delete(&adev->auxdev); auxiliary_device_uninit(&adev->auxdev); } mutex_unlock(&ipu7_bus_mutex); } ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/ipu7-bus.h000066400000000000000000000033241465530421300261020ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 - 2024 Intel Corporation */ #ifndef IPU7_BUS_H #define IPU7_BUS_H #include #include #include #include #include #include #include #include "abi/ipu7_fw_boot_abi.h" #include "ipu7-syscom.h" struct pci_dev; struct ipu7_buttress_ctrl; struct ipu7_mmu; struct ipu7_device; enum ipu7_subsys { IPU_IS = 0, IPU_PS = 1, IPU_SUBSYS_NUM = 2, }; struct ipu7_bus_device { struct auxiliary_device auxdev; struct auxiliary_driver *auxdrv; const struct ipu7_auxdrv_data *auxdrv_data; struct list_head list; enum ipu7_subsys subsys; void *pdata; struct ipu7_mmu *mmu; struct ipu7_device *isp; struct ipu7_buttress_ctrl *ctrl; u64 dma_mask; struct sg_table fw_sgt; u32 fw_entry; struct ipu7_syscom_context *syscom; struct ia_gofo_boot_config *boot_config; dma_addr_t boot_config_dma_addr; u32 boot_config_size; }; struct ipu7_auxdrv_data { irqreturn_t (*isr)(struct ipu7_bus_device *adev); irqreturn_t (*isr_threaded)(struct ipu7_bus_device *adev); bool wake_isr_thread; }; #define to_ipu7_bus_device(_dev) \ container_of(to_auxiliary_dev(_dev), struct ipu7_bus_device, auxdev) #define auxdev_to_adev(_auxdev) \ container_of(_auxdev, struct ipu7_bus_device, auxdev) #define ipu7_bus_get_drvdata(adev) dev_get_drvdata(&(adev)->auxdev.dev) struct ipu7_bus_device * ipu7_bus_initialize_device(struct pci_dev *pdev, struct device *parent, void *pdata, struct ipu7_buttress_ctrl *ctrl, char *name); int ipu7_bus_add_device(struct ipu7_bus_device *adev); void ipu7_bus_del_devices(struct pci_dev *pdev); #endif ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/ipu7-buttress-regs.h000066400000000000000000000447411465530421300301320ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2020 - 2024 Intel Corporation */ #ifndef IPU7_BUTTRESS_REGS_H #define IPU7_BUTTRESS_REGS_H #define BUTTRESS_REG_IRQ_STATUS 0x2000 #define BUTTRESS_REG_IRQ_STATUS_UNMASKED 0x2004 #define BUTTRESS_REG_IRQ_ENABLE 0x2008 #define BUTTRESS_REG_IRQ_CLEAR 0x200c #define BUTTRESS_REG_IRQ_MASK 0x2010 #define BUTTRESS_REG_TSC_CMD 0x2014 #define BUTTRESS_REG_TSC_CTL 0x2018 #define BUTTRESS_REG_TSC_LO 0x201c #define BUTTRESS_REG_TSC_HI 0x2020 /* valid for PTL */ #define BUTTRESS_REG_PB_TIMESTAMP_LO 0x2030 #define BUTTRESS_REG_PB_TIMESTAMP_HI 0x2034 #define BUTTRESS_REG_PB_TIMESTAMP_VALID 0x2038 #define BUTTRESS_REG_PS_WORKPOINT_REQ 0x2100 #define BUTTRESS_REG_IS_WORKPOINT_REQ 0x2104 #define BUTTRESS_REG_PS_WORKPOINT_DOMAIN_REQ 0x2108 #define BUTTRESS_REG_PS_DOMAINS_STATUS 0x2110 #define BUTTRESS_REG_PWR_STATUS 0x2114 #define BUTTRESS_REG_PS_WORKPOINT_REQ_SHADOW 0x2120 #define BUTTRESS_REG_IS_WORKPOINT_REQ_SHADOW 0x2124 #define BUTTRESS_REG_PS_WORKPOINT_DOMAIN_REQ_SHADOW 0x2128 #define BUTTRESS_REG_ISPS_WORKPOINT_DOWNLOAD 0x212c #define BUTTRESS_REG_PG_FLOW_OVERRIDE 0x2180 #define BUTTRESS_REG_GLOBAL_OVERRIDE_UNGATE_CTL 0x2184 #define BUTTRESS_REG_PWR_FSM_CTL 0x2188 #define BUTTRESS_REG_IDLE_WDT 0x218c #define BUTTRESS_REG_PS_PWR_DOMAIN_EVENTQ_EN 0x2190 #define BUTTRESS_REG_PS_PWR_DOMAIN_EVENTQ_ADDR 0x2194 #define BUTTRESS_REG_PS_PWR_DOMAIN_EVENTQ_DATA 0x2198 #define BUTTRESS_REG_POWER_EN_DELAY 0x219c #define BUTTRESS_REG_LTR_CONTROL 0x21a0 #define BUTTRESS_REG_NDE_CONTROL 0x21a4 #define BUTTRESS_REG_INT_FRM_PUNIT 0x21a8 #define BUTTRESS_REG_SLEEP_LEVEL_CFG 0x21b0 #define BUTTRESS_REG_SLEEP_LEVEL_STS 0x21b4 #define BUTTRESS_REG_DVFS_FSM_STATUS 0x21b8 #define BUTTRESS_REG_D2D_CTL 0x21d4 #define BUTTRESS_REG_IB_CLK_CTL 0x21d8 #define BUTTRESS_REG_IB_CRO_CLK_CTL 0x21dc #define BUTTRESS_REG_FUNC_FUSES 0x21e0 #define BUTTRESS_REG_ISOCH_CTL 0x21e4 #define BUTTRESS_REG_WORKPOINT_CTL 0x21f0 #define BUTTRESS_REG_DRV_IS_UCX_CONTROL_STATUS 0x2200 #define BUTTRESS_REG_DRV_IS_UCX_START_ADDR 0x2204 #define BUTTRESS_REG_DRV_PS_UCX_CONTROL_STATUS 0x2208 #define BUTTRESS_REG_DRV_PS_UCX_START_ADDR 0x220c #define BUTTRESS_REG_DRV_UCX_RESET_CFG 0x2210 /* configured by CSE */ #define BUTTRESS_REG_CSE_IS_UCX_CONTROL_STATUS 0x2300 #define BUTTRESS_REG_CSE_IS_UCX_START_ADDR 0x2304 #define BUTTRESS_REG_CSE_PS_UCX_CONTROL_STATUS 0x2308 #define BUTTRESS_REG_CSE_PS_UCX_START_ADDR 0x230c #define BUTTRESS_REG_CAMERA_MASK 0x2310 #define BUTTRESS_REG_FW_CTL 0x2314 #define BUTTRESS_REG_SECURITY_CTL 0x2318 #define BUTTRESS_REG_FUNCTIONAL_FW_SETUP 0x231c #define BUTTRESS_REG_FW_BASE 0x2320 #define BUTTRESS_REG_FW_BASE_LIMIT 0x2324 #define BUTTRESS_REG_FW_SCRATCH_BASE 0x2328 #define BUTTRESS_REG_FW_SCRATCH_LIMIT 0x232c #define BUTTRESS_REG_CSE_ACTION 0x2330 /* configured by SW */ #define BUTTRESS_REG_FW_RESET_CTL 0x2334 #define BUTTRESS_REG_FW_SOURCE_SIZE 0x2338 #define BUTTRESS_REG_FW_SOURCE_BASE 0x233c #define BUTTRESS_REG_IPU_SEC_CP_LSB 0x2400 #define BUTTRESS_REG_IPU_SEC_CP_MSB 0x2404 #define BUTTRESS_REG_IPU_SEC_WAC_LSB 0x2408 #define BUTTRESS_REG_IPU_SEC_WAC_MSB 0x240c #define BUTTRESS_REG_IPU_SEC_RAC_LSB 0x2410 #define BUTTRESS_REG_IPU_SEC_RAC_MSB 0x2414 #define BUTTRESS_REG_IPU_DRV_CP_LSB 0x2418 #define BUTTRESS_REG_IPU_DRV_CP_MSB 0x241c #define BUTTRESS_REG_IPU_DRV_WAC_LSB 0x2420 #define BUTTRESS_REG_IPU_DRV_WAC_MSB 0x2424 #define BUTTRESS_REG_IPU_DRV_RAC_LSB 0x2428 #define BUTTRESS_REG_IPU_DRV_RAC_MSB 0x242c #define BUTTRESS_REG_IPU_FW_CP_LSB 0x2430 #define BUTTRESS_REG_IPU_FW_CP_MSB 0x2434 #define BUTTRESS_REG_IPU_FW_WAC_LSB 0x2438 #define BUTTRESS_REG_IPU_FW_WAC_MSB 0x243c #define BUTTRESS_REG_IPU_FW_RAC_LSB 0x2440 #define BUTTRESS_REG_IPU_FW_RAC_MSB 0x2444 #define BUTTRESS_REG_IPU_BIOS_SEC_CP_LSB 0x2448 #define BUTTRESS_REG_IPU_BIOS_SEC_CP_MSB 0x244c #define BUTTRESS_REG_IPU_BIOS_SEC_WAC_LSB 0x2450 #define BUTTRESS_REG_IPU_BIOS_SEC_WAC_MSB 0x2454 #define BUTTRESS_REG_IPU_BIOS_SEC_RAC_LSB 0x2458 #define BUTTRESS_REG_IPU_BIOS_SEC_RAC_MSB 0x245c #define BUTTRESS_REG_IPU_DFD_CP_LSB 0x2460 #define BUTTRESS_REG_IPU_DFD_CP_MSB 0x2464 #define BUTTRESS_REG_IPU_DFD_WAC_LSB 0x2468 #define BUTTRESS_REG_IPU_DFD_WAC_MSB 0x246c #define BUTTRESS_REG_IPU_DFD_RAC_LSB 0x2470 #define BUTTRESS_REG_IPU_DFD_RAC_MSB 0x2474 #define BUTTRESS_REG_CSE2IUDB0 0x2500 #define BUTTRESS_REG_CSE2IUDATA0 0x2504 #define BUTTRESS_REG_CSE2IUCSR 0x2508 #define BUTTRESS_REG_IU2CSEDB0 0x250c #define BUTTRESS_REG_IU2CSEDATA0 0x2510 #define BUTTRESS_REG_IU2CSECSR 0x2514 #define BUTTRESS_REG_CSE2IUDB0_CR_SHADOW 0x2520 #define BUTTRESS_REG_CSE2IUDATA0_CR_SHADOW 0x2524 #define BUTTRESS_REG_CSE2IUCSR_CR_SHADOW 0x2528 #define BUTTRESS_REG_IU2CSEDB0_CR_SHADOW 0x252c #define BUTTRESS_REG_DVFS_FSM_SURVIVABILITY 0x2900 #define BUTTRESS_REG_FLOWS_FSM_SURVIVABILITY 0x2904 #define BUTTRESS_REG_FABRICS_FSM_SURVIVABILITY 0x2908 #define BUTTRESS_REG_PS_SUB1_PM_FSM_SURVIVABILITY 0x290c #define BUTTRESS_REG_PS_SUB0_PM_FSM_SURVIVABILITY 0x2910 #define BUTTRESS_REG_PS_PM_FSM_SURVIVABILITY 0x2914 #define BUTTRESS_REG_IS_PM_FSM_SURVIVABILITY 0x2918 #define BUTTRESS_REG_FLR_RST_FSM_SURVIVABILITY 0x291c #define BUTTRESS_REG_FW_RST_FSM_SURVIVABILITY 0x2920 #define BUTTRESS_REG_RESETPREP_FSM_SURVIVABILITY 0x2924 #define BUTTRESS_REG_POWER_FSM_DOMAIN_STATUS 0x3000 #define BUTTRESS_REG_IDLEREQ_STATUS1 0x3004 #define BUTTRESS_REG_POWER_FSM_STATUS_IS_PS 0x3008 #define BUTTRESS_REG_POWER_ACK_B_STATUS 0x300c #define BUTTRESS_REG_DOMAIN_RETENTION_CTL 0x3010 #define BUTTRESS_REG_CG_CTRL_BITS 0x3014 #define BUTTRESS_REG_IS_IFC_STATUS0 0x3018 #define BUTTRESS_REG_IS_IFC_STATUS1 0x301c #define BUTTRESS_REG_PS_IFC_STATUS0 0x3020 #define BUTTRESS_REG_PS_IFC_STATUS1 0x3024 #define BUTTRESS_REG_BTRS_IFC_STATUS0 0x3028 #define BUTTRESS_REG_BTRS_IFC_STATUS1 0x302c #define BUTTRESS_REG_IPU_SKU 0x3030 #define BUTTRESS_REG_PS_IDLEACK 0x3034 #define BUTTRESS_REG_IS_IDLEACK 0x3038 #define BUTTRESS_REG_SPARE_REGS_0 0x303c #define BUTTRESS_REG_SPARE_REGS_1 0x3040 #define BUTTRESS_REG_SPARE_REGS_2 0x3044 #define BUTTRESS_REG_SPARE_REGS_3 0x3048 #define BUTTRESS_REG_IUNIT_ACV 0x304c #define BUTTRESS_REG_CHICKEN_BITS 0x3050 #define BUTTRESS_REG_SBENDPOINT_CFG 0x3054 #define BUTTRESS_REG_ECC_ERR_LOG 0x3058 #define BUTTRESS_REG_POWER_FSM_STATUS 0x3070 #define BUTTRESS_REG_RESET_FSM_STATUS 0x3074 #define BUTTRESS_REG_IDLE_STATUS 0x3078 #define BUTTRESS_REG_IDLEACK_STATUS 0x307c #define BUTTRESS_REG_IPU_DEBUG 0x3080 #define BUTTRESS_REG_FW_BOOT_PARAMS0 0x4000 #define BUTTRESS_REG_FW_BOOT_PARAMS1 0x4004 #define BUTTRESS_REG_FW_BOOT_PARAMS2 0x4008 #define BUTTRESS_REG_FW_BOOT_PARAMS3 0x400c #define BUTTRESS_REG_FW_BOOT_PARAMS4 0x4010 #define BUTTRESS_REG_FW_BOOT_PARAMS5 0x4014 #define BUTTRESS_REG_FW_BOOT_PARAMS6 0x4018 #define BUTTRESS_REG_FW_BOOT_PARAMS7 0x401c #define BUTTRESS_REG_FW_BOOT_PARAMS8 0x4020 #define BUTTRESS_REG_FW_BOOT_PARAMS9 0x4024 #define BUTTRESS_REG_FW_BOOT_PARAMS10 0x4028 #define BUTTRESS_REG_FW_BOOT_PARAMS11 0x402c #define BUTTRESS_REG_FW_BOOT_PARAMS12 0x4030 #define BUTTRESS_REG_FW_BOOT_PARAMS13 0x4034 #define BUTTRESS_REG_FW_BOOT_PARAMS14 0x4038 #define BUTTRESS_REG_FW_BOOT_PARAMS15 0x403c #define BUTTRESS_FW_BOOT_PARAMS_ENTRY(i) \ (BUTTRESS_REG_FW_BOOT_PARAMS0 + ((i) * 4U)) #define BUTTRESS_REG_FW_GP(i) (0x4040 + 0x4 * (i)) #define BUTTRESS_REG_FPGA_SUPPORT(i) (0x40c0 + 0x4 * (i)) #define BUTTRESS_REG_FW_GP8 0x4060 #define BUTTRESS_REG_FW_GP24 0x40a0 #define BUTTRESS_REG_GPIO_0_PADCFG_ADDR_CR 0x4100 #define BUTTRESS_REG_GPIO_1_PADCFG_ADDR_CR 0x4104 #define BUTTRESS_REG_GPIO_2_PADCFG_ADDR_CR 0x4108 #define BUTTRESS_REG_GPIO_3_PADCFG_ADDR_CR 0x410c #define BUTTRESS_REG_GPIO_4_PADCFG_ADDR_CR 0x4110 #define BUTTRESS_REG_GPIO_5_PADCFG_ADDR_CR 0x4114 #define BUTTRESS_REG_GPIO_6_PADCFG_ADDR_CR 0x4118 #define BUTTRESS_REG_GPIO_7_PADCFG_ADDR_CR 0x411c #define BUTTRESS_REG_GPIO_ENABLE 0x4140 #define BUTTRESS_REG_GPIO_VALUE_CR 0x4144 #define BUTTRESS_REG_IS_MEM_CORRECTABLE_ERROR_STATUS 0x5000 #define BUTTRESS_REG_IS_MEM_FATAL_ERROR_STATUS 0x5004 #define BUTTRESS_REG_IS_MEM_NON_FATAL_ERROR_STATUS 0x5008 #define BUTTRESS_REG_IS_MEM_CHECK_PASSED 0x500c #define BUTTRESS_REG_IS_MEM_ERROR_INJECT 0x5010 #define BUTTRESS_REG_IS_MEM_ERROR_CLEAR 0x5014 #define BUTTRESS_REG_PS_MEM_CORRECTABLE_ERROR_STATUS 0x5040 #define BUTTRESS_REG_PS_MEM_FATAL_ERROR_STATUS 0x5044 #define BUTTRESS_REG_PS_MEM_NON_FATAL_ERROR_STATUS 0x5048 #define BUTTRESS_REG_PS_MEM_CHECK_PASSED 0x504c #define BUTTRESS_REG_PS_MEM_ERROR_INJECT 0x5050 #define BUTTRESS_REG_PS_MEM_ERROR_CLEAR 0x5054 #define BUTTRESS_REG_IS_AB_REGION_MIN_ADDRESS(i) (0x6000 + 0x8 * (i)) #define BUTTRESS_REG_IS_AB_REGION_MAX_ADDRESS(i) (0x6004 + 0x8 * (i)) #define BUTTRESS_REG_IS_AB_VIOLATION_LOG0 0x6080 #define BUTTRESS_REG_IS_AB_VIOLATION_LOG1 0x6084 #define BUTTRESS_REG_PS_AB_REGION_MIN_ADDRESS(i) (0x6100 + 0x8 * (i)) #define BUTTRESS_REG_PS_AB_REGION_MAX_ADDRESS0 (0x6104 + 0x8 * (i)) #define BUTTRESS_REG_PS_AB_VIOLATION_LOG0 0x6180 #define BUTTRESS_REG_PS_AB_VIOLATION_LOG1 0x6184 #define BUTTRESS_REG_PS_DEBUG_AB_VIOLATION_LOG0 0x6200 #define BUTTRESS_REG_PS_DEBUG_AB_VIOLATION_LOG1 0x6204 #define BUTTRESS_REG_IS_DEBUG_AB_VIOLATION_LOG0 0x6208 #define BUTTRESS_REG_IS_DEBUG_AB_VIOLATION_LOG1 0x620c #define BUTTRESS_REG_IB_DVP_AB_VIOLATION_LOG0 0x6210 #define BUTTRESS_REG_IB_DVP_AB_VIOLATION_LOG1 0x6214 #define BUTTRESS_REG_IB_ATB2DTF_AB_VIOLATION_LOG0 0x6218 #define BUTTRESS_REG_IB_ATB2DTF_AB_VIOLATION_LOG1 0x621c #define BUTTRESS_REG_AB_ENABLE 0x6220 #define BUTTRESS_REG_AB_DEFAULT_ACCESS 0x6230 /* Indicates CSE has received an IPU driver IPC transaction */ #define BUTTRESS_IRQ_IPC_EXEC_DONE_BY_CSE BIT(0) /* Indicates an IPC transaction from CSE has arrived */ #define BUTTRESS_IRQ_IPC_FROM_CSE_IS_WAITING BIT(1) /* Indicates a CSR update from CSE has arrived */ #define BUTTRESS_IRQ_CSE_CSR_SET BIT(2) /* Indicates an interrupt set by Punit (not in use at this time) */ #define BUTTRESS_IRQ_PUNIT_2_IUNIT_IRQ BIT(3) /* Indicates an SAI violation was detected on access to IB registers */ #define BUTTRESS_IRQ_SAI_VIOLATION BIT(4) /* Indicates a transaction to IS was not able to pass the access blocker */ #define BUTTRESS_IRQ_IS_AB_VIOLATION BIT(5) /* Indicates a transaction to PS was not able to pass the access blocker */ #define BUTTRESS_IRQ_PS_AB_VIOLATION BIT(6) /* Indicates an error response was detected by the IB config NoC */ #define BUTTRESS_IRQ_IB_CFG_NOC_ERR_IRQ BIT(7) /* Indicates an error response was detected by the IB data NoC */ #define BUTTRESS_IRQ_IB_DATA_NOC_ERR_IRQ BIT(8) /* Transaction to DVP regs was not able to pass the access blocker */ #define BUTTRESS_IRQ_IB_DVP_AB_VIOLATION BIT(9) /* Transaction to ATB2DTF regs was not able to pass the access blocker */ #define BUTTRESS_IRQ_ATB2DTF_AB_VIOLATION BIT(10) /* Transaction to IS debug regs was not able to pass the access blocker */ #define BUTTRESS_IRQ_IS_DEBUG_AB_VIOLATION BIT(11) /* Transaction to PS debug regs was not able to pass the access blocker */ #define BUTTRESS_IRQ_PS_DEBUG_AB_VIOLATION BIT(12) /* Indicates timeout occurred waiting for a response from a target */ #define BUTTRESS_IRQ_IB_CFG_NOC_TIMEOUT_IRQ BIT(13) /* Set when any correctable ECC error input wire to buttress is set */ #define BUTTRESS_IRQ_ECC_CORRECTABLE BIT(14) /* Any noncorrectable-nonfatal ECC error input wire to buttress is set */ #define BUTTRESS_IRQ_ECC_NONCORRECTABLE_NONFATAL BIT(15) /* Set when any noncorrectable-fatal ECC error input wire to buttress is set */ #define BUTTRESS_IRQ_ECC_NONCORRECTABLE_FATAL BIT(16) /* IS FW double exception event */ #define BUTTRESS_IRQ_IS_UC_PFATAL_ERROR BIT(26) /* PS FW double exception event */ #define BUTTRESS_IRQ_PS_UC_PFATAL_ERROR BIT(27) /* IS FW watchdog event */ #define BUTTRESS_IRQ_IS_WATCHDOG BIT(28) /* PS FW watchdog event */ #define BUTTRESS_IRQ_PS_WATCHDOG BIT(29) /* IS IRC irq out */ #define BUTTRESS_IRQ_IS_IRQ BIT(30) /* PS IRC irq out */ #define BUTTRESS_IRQ_PS_IRQ BIT(31) /* buttress irq */ enum { BUTTRESS_PWR_STATUS_HH_STATE_IDLE, BUTTRESS_PWR_STATUS_HH_STATE_IN_PRGS, BUTTRESS_PWR_STATUS_HH_STATE_DONE, BUTTRESS_PWR_STATUS_HH_STATE_ERR, }; #define BUTTRESS_TSC_CMD_START_TSC_SYNC BIT(0) #define BUTTRESS_PWR_STATUS_HH_STATUS_SHIFT 11 #define BUTTRESS_PWR_STATUS_HH_STATUS_MASK (0x3 << 11) #define BUTTRESS_TSW_WA_SOFT_RESET BIT(8) /* new for PTL */ #define BUTTRESS_SEL_PB_TIMESTAMP BIT(9) #define BUTTRESS_IRQS (BUTTRESS_IRQ_IS_IRQ | \ BUTTRESS_IRQ_PS_IRQ | \ BUTTRESS_IRQ_IPC_FROM_CSE_IS_WAITING | \ BUTTRESS_IRQ_CSE_CSR_SET | \ BUTTRESS_IRQ_IPC_EXEC_DONE_BY_CSE | \ BUTTRESS_IRQ_PUNIT_2_IUNIT_IRQ) /* Iunit to CSE regs */ #define BUTTRESS_IU2CSEDB0_BUSY BIT(31) #define BUTTRESS_IU2CSEDB0_SHORT_FORMAT_SHIFT 27 #define BUTTRESS_IU2CSEDB0_CLIENT_ID_SHIFT 10 #define BUTTRESS_IU2CSEDB0_IPC_CLIENT_ID_VAL 2 #define BUTTRESS_IU2CSEDATA0_IPC_BOOT_LOAD 1 #define BUTTRESS_IU2CSEDATA0_IPC_AUTH_RUN 2 #define BUTTRESS_IU2CSEDATA0_IPC_AUTH_REPLACE 3 #define BUTTRESS_IU2CSEDATA0_IPC_UPDATE_SECURE_TOUCH 16 #define BUTTRESS_CSE2IUDATA0_IPC_BOOT_LOAD_DONE BIT(0) #define BUTTRESS_CSE2IUDATA0_IPC_AUTH_RUN_DONE BIT(1) #define BUTTRESS_CSE2IUDATA0_IPC_AUTH_REPLACE_DONE BIT(2) #define BUTTRESS_CSE2IUDATA0_IPC_UPDATE_SECURE_TOUCH_DONE BIT(4) #define BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE1 BIT(0) #define BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE2 BIT(1) #define BUTTRESS_IU2CSECSR_IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE BIT(2) #define BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ BIT(3) #define BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID BIT(4) #define BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ BIT(5) /* 0x20 == NACK, 0xf == unknown command */ #define BUTTRESS_CSE2IUDATA0_IPC_NACK 0xf20 #define BUTTRESS_CSE2IUDATA0_IPC_NACK_MASK 0xffff /* IS/PS freq control */ #define BUTTRESS_IS_FREQ_CTL_RATIO_MASK 0xff #define BUTTRESS_PS_FREQ_CTL_RATIO_MASK 0xff #define BUTTRESS_IS_FREQ_CTL_CDYN_MASK 0xff #define BUTTRESS_PS_FREQ_CTL_CDYN_MASK 0xff #define IPU7_IS_FREQ_MAX 450 #define IPU7_IS_FREQ_MIN 50 #define IPU7_PS_FREQ_MAX 750 #define BUTTRESS_PS_FREQ_RATIO_STEP 25U /* From PTL, the minimal freq is updated to 325mhz */ #define BUTTRESS_MIN_FORCE_PS_FREQ (BUTTRESS_PS_FREQ_RATIO_STEP * 13) #define BUTTRESS_MAX_FORCE_PS_FREQ (BUTTRESS_PS_FREQ_RATIO_STEP * 30) #define BUTTRESS_MIN_FORCE_IS_RATIO 5 #define BUTTRESS_MAX_FORCE_IS_RATIO 27 /* IS: 400mhz, PS: 500mhz */ /* Need update CRO_CLK divisor if using CRO_CLK in blocked state */ #define IPU7_IS_FREQ_CTL_DEFAULT_RATIO 0x1b #define IPU7_PS_FREQ_CTL_DEFAULT_RATIO 0x14 #define IPU_FREQ_CTL_CDYN 0x80 #define IPU_FREQ_CTL_RATIO_SHIFT 0x0 #define IPU_FREQ_CTL_CDYN_SHIFT 0x8 /* buttree power status */ #define IPU_BUTTRESS_PWR_STATE_IS_PWR_SHIFT 0 #define IPU_BUTTRESS_PWR_STATE_IS_PWR_MASK \ (0x3 << IPU_BUTTRESS_PWR_STATE_IS_PWR_SHIFT) #define IPU_BUTTRESS_PWR_STATE_PS_PWR_SHIFT 4 #define IPU_BUTTRESS_PWR_STATE_PS_PWR_MASK \ (0x3 << IPU_BUTTRESS_PWR_STATE_PS_PWR_SHIFT) #define IPU_BUTTRESS_PWR_STATE_DN_DONE 0x0 #define IPU_BUTTRESS_PWR_STATE_UP_PROCESS 0x1 #define IPU_BUTTRESS_PWR_STATE_DN_PROCESS 0x2 #define IPU_BUTTRESS_PWR_STATE_UP_DONE 0x3 #define BUTTRESS_PWR_STATE_IS_PWR_SHIFT 3 #define BUTTRESS_PWR_STATE_IS_PWR_MASK (0x3 << 3) #define BUTTRESS_PWR_STATE_PS_PWR_SHIFT 6 #define BUTTRESS_PWR_STATE_PS_PWR_MASK (0x3 << 6) #define PS_FSM_CG BIT(3) #define BUTTRESS_OVERRIDE_IS_CLK BIT(1) #define BUTTRESS_OVERRIDE_PS_CLK BIT(2) #define BUTTRESS_OWN_ACK_IS_CLK BIT(9) #define BUTTRESS_OWN_ACK_PS_CLK BIT(10) /* FW reset ctrl */ #define BUTTRESS_FW_RESET_CTL_START BIT(0) #define BUTTRESS_FW_RESET_CTL_DONE BIT(1) /* security */ #define BUTTRESS_SECURITY_CTL_FW_SECURE_MODE BIT(16) #define BUTTRESS_SECURITY_CTL_FW_SETUP_MASK GENMASK(4, 0) #define BUTTRESS_SECURITY_CTL_FW_SETUP_DONE BIT(0) #define BUTTRESS_SECURITY_CTL_AUTH_DONE BIT(1) #define BUTTRESS_SECURITY_CTL_AUTH_FAILED BIT(3) /* D2D */ #define BUTTRESS_D2D_PWR_EN BIT(0) #define BUTTRESS_D2D_PWR_ACK BIT(4) /* NDE */ #define NDE_VAL_MASK GENMASK(9, 0) #define NDE_SCALE_MASK GENMASK(12, 10) #define NDE_VALID_MASK BIT(13) #define NDE_RESVEC_MASK GENMASK(19, 16) #define NDE_IN_VBLANK_DIS_MASK BIT(31) #define BUTTRESS_NDE_RESVEC 0xe #define BUTTRESS_NDE_VAL_ACTIVE 480 #define BUTTRESS_NDE_SCALE_ACTIVE 2 #define BUTTRESS_NDE_VALID_ACTIVE 1 #define BUTTRESS_NDE_VAL_DEFAULT 1023 #define BUTTRESS_NDE_SCALE_DEFAULT 2 #define BUTTRESS_NDE_VALID_DEFAULT 0 /* IS and PS UCX control */ #define UCX_CTL_RESET BIT(0) #define UCX_CTL_RUN BIT(1) #define UCX_CTL_WAKEUP BIT(2) #define UCX_CTL_SPARE GENMASK(7, 3) #define UCX_STS_PWR GENMASK(17, 16) #define UCX_STS_SLEEPING BIT(18) /* offset from PHY base */ #define PHY_CSI_CFG 0xc0 #define PHY_CSI_RCOMP_CONTROL 0xc8 #define PHY_CSI_BSCAN_EXCLUDE 0xd8 #define PHY_CPHY_DLL_OVRD(x) (0x100 + 0x100 * (x)) #define PHY_DPHY_DLL_OVRD(x) (0x14c + 0x100 * (x)) #define PHY_CPHY_RX_CONTROL1(x) (0x110 + 0x100 * (x)) #define PHY_CPHY_RX_CONTROL2(x) (0x114 + 0x100 * (x)) #define PHY_DPHY_CFG(x) (0x148 + 0x100 * (x)) #define PHY_BB_AFE_CONFIG(x) (0x174 + 0x100 * (x)) /* PB registers */ #define INTERRUPT_STATUS 0x0 #define BTRS_LOCAL_INTERRUPT_MASK 0x4 #define GLOBAL_INTERRUPT_MASK 0x8 #define HM_ATS 0xc #define ATS_ERROR_LOG1 0x10 #define ATS_ERROR_LOG2 0x14 #define ATS_ERROR_CLEAR 0x18 #define CFI_0_ERROR_LOG 0x1c #define CFI_0_ERROR_CLEAR 0x20 #define HASH_CONFIG 0x2c #define TLBID_HASH_ENABLE_31_0 0x30 #define TLBID_HASH_ENABLE_63_32 0x34 #define TLBID_HASH_ENABLE_95_64 0x38 #define TLBID_HASH_ENABLE_127_96 0x3c #define CFI_1_ERROR_LOGGING 0x40 #define CFI_1_ERROR_CLEAR 0x44 #define IMR_ERROR_LOGGING_LOW 0x48 #define IMR_ERROR_LOGGING_HIGH 0x4c #define IMR_ERROR_CLEAR 0x50 #define PORT_ARBITRATION_WEIGHTS 0x54 #define IMR_ERROR_LOGGING_CFI_1_LOW 0x58 #define IMR_ERROR_LOGGING_CFI_1_HIGH 0x5c #define IMR_ERROR_CLEAR_CFI_1 0x60 #define BAR2_MISC_CONFIG 0x64 #define RSP_ID_CONFIG_AXI2CFI_0 0x68 #define RSP_ID_CONFIG_AXI2CFI_1 0x6c #define PB_DRIVER_PCODE_MAILBOX_STATUS 0x70 #define PB_DRIVER_PCODE_MAILBOX_INTERFACE 0x74 #define PORT_ARBITRATION_WEIGHTS_ATS 0x78 #endif /* IPU7_BUTTRESS_REGS_H */ ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/ipu7-buttress.c000066400000000000000000001002751465530421300271620ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013 - 2024 Intel Corporation */ #include #include #include #ifdef CONFIG_DEBUG_FS #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ipu7.h" #include "ipu7-bus.h" #include "ipu7-buttress.h" #include "ipu7-buttress-regs.h" #define BOOTLOADER_STATUS_OFFSET BUTTRESS_REG_FW_BOOT_PARAMS7 #define BOOTLOADER_MAGIC_KEY 0xb00710ad #define ENTRY BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE1 #define EXIT BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE2 #define QUERY BUTTRESS_IU2CSECSR_IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE #define BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX 10 #define BUTTRESS_POWER_TIMEOUT_US (200 * USEC_PER_MSEC) #define BUTTRESS_CSE_BOOTLOAD_TIMEOUT_US (5 * USEC_PER_SEC) #define BUTTRESS_CSE_AUTHENTICATE_TIMEOUT_US (10 * USEC_PER_SEC) #define BUTTRESS_CSE_FWRESET_TIMEOUT_US (100 * USEC_PER_MSEC) #define BUTTRESS_IPC_TX_TIMEOUT_MS MSEC_PER_SEC #define BUTTRESS_IPC_RX_TIMEOUT_MS MSEC_PER_SEC #define BUTTRESS_IPC_VALIDITY_TIMEOUT_US (1 * USEC_PER_SEC) #define BUTTRESS_TSC_SYNC_TIMEOUT_US (5 * USEC_PER_MSEC) #define BUTTRESS_IPC_RESET_RETRY 2000 #define BUTTRESS_CSE_IPC_RESET_RETRY 4 #define BUTTRESS_IPC_CMD_SEND_RETRY 1 static const u32 ipu7_adev_irq_mask[2] = { BUTTRESS_IRQ_IS_IRQ, BUTTRESS_IRQ_PS_IRQ }; int ipu7_buttress_ipc_reset(struct ipu7_device *isp, struct ipu7_buttress_ipc *ipc) { unsigned int retries = BUTTRESS_IPC_RESET_RETRY; struct ipu7_buttress *b = &isp->buttress; u32 val = 0, csr_in_clr; if (!isp->secure_mode) { dev_dbg(&isp->pdev->dev, "Skip IPC reset for non-secure mode"); return 0; } mutex_lock(&b->ipc_mutex); /* Clear-by-1 CSR (all bits), corresponding internal states. */ val = readl(isp->base + ipc->csr_in); writel(val, isp->base + ipc->csr_in); /* Set peer CSR bit IPC_PEER_COMP_ACTIONS_RST_PHASE1 */ writel(ENTRY, isp->base + ipc->csr_out); /* * Clear-by-1 all CSR bits EXCEPT following * bits: * A. IPC_PEER_COMP_ACTIONS_RST_PHASE1. * B. IPC_PEER_COMP_ACTIONS_RST_PHASE2. * C. Possibly custom bits, depending on * their role. */ csr_in_clr = BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ | BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID | BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ | QUERY; do { usleep_range(400, 500); val = readl(isp->base + ipc->csr_in); switch (val) { case ENTRY | EXIT: case ENTRY | EXIT | QUERY: /* * 1) Clear-by-1 CSR bits * (IPC_PEER_COMP_ACTIONS_RST_PHASE1, * IPC_PEER_COMP_ACTIONS_RST_PHASE2). * 2) Set peer CSR bit * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE. */ writel(ENTRY | EXIT, isp->base + ipc->csr_in); writel(QUERY, isp->base + ipc->csr_out); break; case ENTRY: case ENTRY | QUERY: /* * 1) Clear-by-1 CSR bits * (IPC_PEER_COMP_ACTIONS_RST_PHASE1, * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE). * 2) Set peer CSR bit * IPC_PEER_COMP_ACTIONS_RST_PHASE1. */ writel(ENTRY | QUERY, isp->base + ipc->csr_in); writel(ENTRY, isp->base + ipc->csr_out); break; case EXIT: case EXIT | QUERY: /* * Clear-by-1 CSR bit * IPC_PEER_COMP_ACTIONS_RST_PHASE2. * 1) Clear incoming doorbell. * 2) Clear-by-1 all CSR bits EXCEPT following * bits: * A. IPC_PEER_COMP_ACTIONS_RST_PHASE1. * B. IPC_PEER_COMP_ACTIONS_RST_PHASE2. * C. Possibly custom bits, depending on * their role. * 3) Set peer CSR bit * IPC_PEER_COMP_ACTIONS_RST_PHASE2. */ writel(EXIT, isp->base + ipc->csr_in); writel(0, isp->base + ipc->db0_in); writel(csr_in_clr, isp->base + ipc->csr_in); writel(EXIT, isp->base + ipc->csr_out); /* * Read csr_in again to make sure if RST_PHASE2 is done. * If csr_in is QUERY, it should be handled again. */ usleep_range(200, 300); val = readl(isp->base + ipc->csr_in); if (val & QUERY) { dev_dbg(&isp->pdev->dev, "RST_PHASE2 retry csr_in = %x\n", val); break; } mutex_unlock(&b->ipc_mutex); return 0; case QUERY: /* * 1) Clear-by-1 CSR bit * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE. * 2) Set peer CSR bit * IPC_PEER_COMP_ACTIONS_RST_PHASE1 */ writel(QUERY, isp->base + ipc->csr_in); writel(ENTRY, isp->base + ipc->csr_out); break; default: dev_warn_ratelimited(&isp->pdev->dev, "Unexpected CSR 0x%x\n", val); break; } } while (retries--); mutex_unlock(&b->ipc_mutex); dev_err(&isp->pdev->dev, "Timed out while waiting for CSE\n"); return -ETIMEDOUT; } static void ipu7_buttress_ipc_validity_close(struct ipu7_device *isp, struct ipu7_buttress_ipc *ipc) { writel(BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ, isp->base + ipc->csr_out); } static int ipu7_buttress_ipc_validity_open(struct ipu7_device *isp, struct ipu7_buttress_ipc *ipc) { unsigned int mask = BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID; void __iomem *addr; int ret; u32 val; writel(BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ, isp->base + ipc->csr_out); addr = isp->base + ipc->csr_in; ret = readl_poll_timeout(addr, val, val & mask, 200, BUTTRESS_IPC_VALIDITY_TIMEOUT_US); if (ret) { dev_err(&isp->pdev->dev, "CSE validity timeout 0x%x\n", val); ipu7_buttress_ipc_validity_close(isp, ipc); } return ret; } static void ipu7_buttress_ipc_recv(struct ipu7_device *isp, struct ipu7_buttress_ipc *ipc, u32 *ipc_msg) { if (ipc_msg) *ipc_msg = readl(isp->base + ipc->data0_in); writel(0, isp->base + ipc->db0_in); } static int ipu7_buttress_ipc_send_bulk(struct ipu7_device *isp, enum ipu7_buttress_ipc_domain ipc_domain, struct ipu7_ipc_buttress_bulk_msg *msgs, u32 size) { unsigned long tx_timeout_jiffies, rx_timeout_jiffies; unsigned int i, retry = BUTTRESS_IPC_CMD_SEND_RETRY; struct ipu7_buttress *b = &isp->buttress; struct ipu7_buttress_ipc *ipc; u32 val; int ret; int tout; ipc = ipc_domain == IPU_BUTTRESS_IPC_CSE ? &b->cse : &b->ish; mutex_lock(&b->ipc_mutex); ret = ipu7_buttress_ipc_validity_open(isp, ipc); if (ret) { dev_err(&isp->pdev->dev, "IPC validity open failed\n"); goto out; } tx_timeout_jiffies = msecs_to_jiffies(BUTTRESS_IPC_TX_TIMEOUT_MS); rx_timeout_jiffies = msecs_to_jiffies(BUTTRESS_IPC_RX_TIMEOUT_MS); for (i = 0; i < size; i++) { reinit_completion(&ipc->send_complete); if (msgs[i].require_resp) reinit_completion(&ipc->recv_complete); dev_dbg(&isp->pdev->dev, "bulk IPC command: 0x%x\n", msgs[i].cmd); writel(msgs[i].cmd, isp->base + ipc->data0_out); val = BUTTRESS_IU2CSEDB0_BUSY | msgs[i].cmd_size; writel(val, isp->base + ipc->db0_out); tout = wait_for_completion_timeout(&ipc->send_complete, tx_timeout_jiffies); if (!tout) { dev_err(&isp->pdev->dev, "send IPC response timeout\n"); if (!retry--) { ret = -ETIMEDOUT; goto out; } /* Try again if CSE is not responding on first try */ writel(0, isp->base + ipc->db0_out); i--; continue; } retry = BUTTRESS_IPC_CMD_SEND_RETRY; if (!msgs[i].require_resp) continue; tout = wait_for_completion_timeout(&ipc->recv_complete, rx_timeout_jiffies); if (!tout) { dev_err(&isp->pdev->dev, "recv IPC response timeout\n"); ret = -ETIMEDOUT; goto out; } if (ipc->nack_mask && (ipc->recv_data & ipc->nack_mask) == ipc->nack) { dev_err(&isp->pdev->dev, "IPC NACK for cmd 0x%x\n", msgs[i].cmd); ret = -EIO; goto out; } if (ipc->recv_data != msgs[i].expected_resp) { dev_err(&isp->pdev->dev, "expected resp: 0x%x, IPC response: 0x%x ", msgs[i].expected_resp, ipc->recv_data); ret = -EIO; goto out; } } dev_dbg(&isp->pdev->dev, "bulk IPC commands done\n"); out: ipu7_buttress_ipc_validity_close(isp, ipc); mutex_unlock(&b->ipc_mutex); return ret; } static int ipu7_buttress_ipc_send(struct ipu7_device *isp, enum ipu7_buttress_ipc_domain ipc_domain, u32 ipc_msg, u32 size, bool require_resp, u32 expected_resp) { struct ipu7_ipc_buttress_bulk_msg msg = { .cmd = ipc_msg, .cmd_size = size, .require_resp = require_resp, .expected_resp = expected_resp, }; return ipu7_buttress_ipc_send_bulk(isp, ipc_domain, &msg, 1); } static irqreturn_t ipu7_buttress_call_isr(struct ipu7_bus_device *adev) { irqreturn_t ret = IRQ_WAKE_THREAD; if (!adev || !adev->auxdrv || !adev->auxdrv_data) return IRQ_NONE; if (adev->auxdrv_data->isr) ret = adev->auxdrv_data->isr(adev); if (ret == IRQ_WAKE_THREAD && !adev->auxdrv_data->isr_threaded) ret = IRQ_NONE; return ret; } irqreturn_t ipu7_buttress_isr(int irq, void *isp_ptr) { struct ipu7_device *isp = isp_ptr; struct ipu7_bus_device *adev[] = { isp->isys, isp->psys }; struct ipu7_buttress *b = &isp->buttress; struct device *dev = &isp->pdev->dev; irqreturn_t ret = IRQ_NONE; u32 disable_irqs = 0; u32 irq_status; u32 pb_irq, pb_local_irq; unsigned int i; pm_runtime_get_noresume(dev); pb_irq = readl(isp->pb_base + INTERRUPT_STATUS); writel(pb_irq, isp->pb_base + INTERRUPT_STATUS); pb_local_irq = readl(isp->pb_base + BTRS_LOCAL_INTERRUPT_MASK); if (pb_local_irq) { dev_warn(dev, "PB interrupt status 0x%x local 0x%x\n", pb_irq, pb_local_irq); dev_warn(dev, "Details: %x %x %x %x %x %x %x %x\n", readl(isp->pb_base + ATS_ERROR_LOG1), readl(isp->pb_base + ATS_ERROR_LOG2), readl(isp->pb_base + CFI_0_ERROR_LOG), readl(isp->pb_base + CFI_1_ERROR_LOGGING), readl(isp->pb_base + IMR_ERROR_LOGGING_LOW), readl(isp->pb_base + IMR_ERROR_LOGGING_HIGH), readl(isp->pb_base + IMR_ERROR_LOGGING_CFI_1_LOW), readl(isp->pb_base + IMR_ERROR_LOGGING_CFI_1_HIGH)); } irq_status = readl(isp->base + BUTTRESS_REG_IRQ_STATUS); if (!irq_status) { pm_runtime_put_noidle(dev); return IRQ_NONE; } do { writel(irq_status, isp->base + BUTTRESS_REG_IRQ_CLEAR); for (i = 0; i < ARRAY_SIZE(ipu7_adev_irq_mask); i++) { irqreturn_t r = ipu7_buttress_call_isr(adev[i]); if (!(irq_status & ipu7_adev_irq_mask[i])) continue; if (r == IRQ_WAKE_THREAD) { ret = IRQ_WAKE_THREAD; disable_irqs |= ipu7_adev_irq_mask[i]; } else if (ret == IRQ_NONE && r == IRQ_HANDLED) { ret = IRQ_HANDLED; } } if (irq_status & (BUTTRESS_IRQS | BUTTRESS_IRQ_SAI_VIOLATION) && ret == IRQ_NONE) ret = IRQ_HANDLED; if (irq_status & BUTTRESS_IRQ_IPC_FROM_CSE_IS_WAITING) { dev_dbg(dev, "BUTTRESS_IRQ_IPC_FROM_CSE_IS_WAITING\n"); ipu7_buttress_ipc_recv(isp, &b->cse, &b->cse.recv_data); complete(&b->cse.recv_complete); } if (irq_status & BUTTRESS_IRQ_CSE_CSR_SET) dev_dbg(dev, "BUTTRESS_IRQ_CSE_CSR_SET\n"); if (irq_status & BUTTRESS_IRQ_IPC_EXEC_DONE_BY_CSE) { dev_dbg(dev, "BUTTRESS_IRQ_IPC_EXEC_DONE_BY_CSE\n"); complete(&b->cse.send_complete); } if (irq_status & BUTTRESS_IRQ_PUNIT_2_IUNIT_IRQ) dev_dbg(dev, "BUTTRESS_IRQ_PUNIT_2_IUNIT_IRQ\n"); if (irq_status & BUTTRESS_IRQ_SAI_VIOLATION && ipu7_buttress_get_secure_mode(isp)) dev_err(dev, "BUTTRESS_IRQ_SAI_VIOLATION\n"); irq_status = readl(isp->base + BUTTRESS_REG_IRQ_STATUS); } while (irq_status); if (disable_irqs) writel(BUTTRESS_IRQS & ~disable_irqs, isp->base + BUTTRESS_REG_IRQ_ENABLE); pm_runtime_put(dev); return ret; } irqreturn_t ipu7_buttress_isr_threaded(int irq, void *isp_ptr) { struct ipu7_device *isp = isp_ptr; struct ipu7_bus_device *adev[] = { isp->isys, isp->psys }; const struct ipu7_auxdrv_data *drv_data = NULL; irqreturn_t ret = IRQ_NONE; unsigned int i; for (i = 0; i < ARRAY_SIZE(ipu7_adev_irq_mask) && adev[i]; i++) { drv_data = adev[i]->auxdrv_data; if (!drv_data) continue; if (drv_data->wake_isr_thread && drv_data->isr_threaded(adev[i]) == IRQ_HANDLED) ret = IRQ_HANDLED; } writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_IRQ_ENABLE); return ret; } static int isys_d2d_power(struct device *dev, bool on) { int ret = 0; u32 val; struct ipu7_device *isp = to_ipu7_bus_device(dev)->isp; dev_dbg(&isp->pdev->dev, "power %s isys d2d.\n", on ? "UP" : "DOWN"); val = readl(isp->base + BUTTRESS_REG_D2D_CTL); if (!(val & BUTTRESS_D2D_PWR_ACK) ^ on) { dev_info(&isp->pdev->dev, "d2d already in %s state.\n", on ? "UP" : "DOWN"); return 0; } val = on ? val | BUTTRESS_D2D_PWR_EN : val & (~BUTTRESS_D2D_PWR_EN); writel(val, isp->base + BUTTRESS_REG_D2D_CTL); ret = readl_poll_timeout(isp->base + BUTTRESS_REG_D2D_CTL, val, (!(val & BUTTRESS_D2D_PWR_ACK) ^ on), 100, BUTTRESS_POWER_TIMEOUT_US); if (ret) dev_err(&isp->pdev->dev, "power %s d2d timeout. status: 0x%x\n", on ? "UP" : "DOWN", val); return ret; } static void isys_nde_control(struct device *dev, bool on) { u32 val, value, scale, valid, resvec; struct ipu7_device *isp = to_ipu7_bus_device(dev)->isp; if (on) { value = BUTTRESS_NDE_VAL_ACTIVE; scale = BUTTRESS_NDE_SCALE_ACTIVE; valid = BUTTRESS_NDE_VALID_ACTIVE; } else { value = BUTTRESS_NDE_VAL_DEFAULT; scale = BUTTRESS_NDE_SCALE_DEFAULT; valid = BUTTRESS_NDE_VALID_DEFAULT; } resvec = BUTTRESS_NDE_RESVEC; val = FIELD_PREP(NDE_VAL_MASK, value) | FIELD_PREP(NDE_SCALE_MASK, scale) | FIELD_PREP(NDE_VALID_MASK, valid) | FIELD_PREP(NDE_RESVEC_MASK, resvec); dev_dbg(dev, "Set NED control value to 0x%x\n", val); writel(val, isp->base + BUTTRESS_REG_NDE_CONTROL); } /* TODO: split buttress_power to 2 functions */ int ipu7_buttress_power(struct device *dev, struct ipu7_buttress_ctrl *ctrl, bool on) { struct ipu7_device *isp = to_ipu7_bus_device(dev)->isp; u32 val, exp_sts; int ret = 0; if (!ctrl) return 0; mutex_lock(&isp->buttress.power_mutex); exp_sts = on ? ctrl->pwr_sts_on << ctrl->pwr_sts_shift : ctrl->pwr_sts_off << ctrl->pwr_sts_shift; if (on) { if (ctrl->subsys_id == IPU_IS) { ret = isys_d2d_power(dev, on); if (ret) goto out_power; isys_nde_control(dev, on); } /* request clock resource ownership */ val = readl(isp->base + BUTTRESS_REG_SLEEP_LEVEL_CFG); val |= ctrl->ovrd_clk; writel(val, isp->base + BUTTRESS_REG_SLEEP_LEVEL_CFG); ret = readl_poll_timeout(isp->base + BUTTRESS_REG_SLEEP_LEVEL_STS, val, (val & ctrl->own_clk_ack), 100, BUTTRESS_POWER_TIMEOUT_US); if (ret) dev_err(&isp->pdev->dev, "request clk ownership timeout. status 0x%x\n", val); } if (on) val = ctrl->ratio << ctrl->ratio_shift | ctrl->cdyn << ctrl->cdyn_shift; else val = 0x8 << ctrl->ratio_shift; dev_dbg(&isp->pdev->dev, "set 0x%x to %s_WORKPOINT_REQ.\n", val, ctrl->subsys_id == IPU_IS ? "IS" : "PS"); writel(val, isp->base + ctrl->freq_ctl); ret = readl_poll_timeout(isp->base + BUTTRESS_REG_PWR_STATUS, val, ((val & ctrl->pwr_sts_mask) == exp_sts), 100, BUTTRESS_POWER_TIMEOUT_US); if (ret) { dev_err(&isp->pdev->dev, "%s power %s timeout with status: 0x%x\n", ctrl->subsys_id == IPU_IS ? "IS" : "PS", on ? "up" : "down", val); goto out_power; } dev_dbg(&isp->pdev->dev, "%s power %s successfully. status: 0x%x\n", ctrl->subsys_id == IPU_IS ? "IS" : "PS", on ? "up" : "down", val); if (on) { /* release clock resource ownership */ val = readl(isp->base + BUTTRESS_REG_SLEEP_LEVEL_CFG); val &= ~ctrl->ovrd_clk; writel(val, isp->base + BUTTRESS_REG_SLEEP_LEVEL_CFG); } out_power: if (!on && ctrl->subsys_id == IPU_IS && !ret) { isys_d2d_power(dev, on); isys_nde_control(dev, on); } ctrl->started = !ret && on; mutex_unlock(&isp->buttress.power_mutex); dev_dbg(&isp->pdev->dev, "%s buttress power %s done.\n", ctrl->subsys_id == IPU_IS ? "IS" : "PS", on ? "up" : "down"); return ret; } bool ipu7_buttress_get_secure_mode(struct ipu7_device *isp) { u32 val; val = readl(isp->base + BUTTRESS_REG_SECURITY_CTL); return val & BUTTRESS_SECURITY_CTL_FW_SECURE_MODE; } bool ipu7_buttress_auth_done(struct ipu7_device *isp) { u32 val; if (!isp->secure_mode) return true; val = readl(isp->base + BUTTRESS_REG_SECURITY_CTL); val = FIELD_GET(BUTTRESS_SECURITY_CTL_FW_SETUP_MASK, val); return val == BUTTRESS_SECURITY_CTL_AUTH_DONE; } EXPORT_SYMBOL_NS_GPL(ipu7_buttress_auth_done, INTEL_IPU7); static void ipu7_buttress_set_isys_ratio(struct ipu7_device *isp, u32 isys_ratio) { struct ipu7_buttress_ctrl *ctrl = isp->isys->ctrl; mutex_lock(&isp->buttress.power_mutex); if (ctrl->ratio == isys_ratio) goto out_mutex_unlock; ctrl->ratio = isys_ratio; if (!ctrl->started) return; writel(ctrl->ratio << ctrl->ratio_shift | ctrl->cdyn << ctrl->cdyn_shift, isp->base + ctrl->freq_ctl); out_mutex_unlock: mutex_unlock(&isp->buttress.power_mutex); } int ipu7_buttress_get_isys_freq(struct ipu7_device *isp, u32 *freq) { u32 reg_val; int ret; ret = pm_runtime_get_sync(&isp->isys->auxdev.dev); if (ret < 0) { pm_runtime_put(&isp->isys->auxdev.dev); dev_err(&isp->pdev->dev, "Runtime PM failed (%d)\n", ret); return ret; } reg_val = readl(isp->base + BUTTRESS_REG_IS_WORKPOINT_REQ); pm_runtime_put(&isp->isys->auxdev.dev); *freq = (reg_val & BUTTRESS_IS_FREQ_CTL_RATIO_MASK) * 50 / 3; return 0; } EXPORT_SYMBOL_NS_GPL(ipu7_buttress_get_isys_freq, INTEL_IPU7); int ipu7_buttress_get_psys_freq(struct ipu7_device *isp, u32 *freq) { u32 reg_val; int ret; ret = pm_runtime_get_sync(&isp->psys->auxdev.dev); if (ret < 0) { pm_runtime_put(&isp->psys->auxdev.dev); dev_err(&isp->pdev->dev, "Runtime PM failed (%d)\n", ret); return ret; } reg_val = readl(isp->base + BUTTRESS_REG_PS_WORKPOINT_REQ); pm_runtime_put(&isp->psys->auxdev.dev); reg_val &= BUTTRESS_PS_FREQ_CTL_RATIO_MASK; *freq = BUTTRESS_PS_FREQ_RATIO_STEP * reg_val; return 0; } EXPORT_SYMBOL_NS_GPL(ipu7_buttress_get_psys_freq, INTEL_IPU7); static int ipu7_buttress_set_isys_freq(struct ipu7_device *isp, u64 freq) { u32 ratio = freq * 50 / 3; int ret; if (freq < BUTTRESS_MIN_FORCE_IS_RATIO || freq > BUTTRESS_MAX_FORCE_IS_RATIO) return -EINVAL; ret = pm_runtime_get_sync(&isp->isys->auxdev.dev); if (ret < 0) { pm_runtime_put(&isp->isys->auxdev.dev); dev_err(&isp->pdev->dev, "Runtime PM failed (%d)\n", ret); return ret; } if (freq) ipu7_buttress_set_isys_ratio(isp, ratio); pm_runtime_put(&isp->isys->auxdev.dev); return 0; } int ipu7_buttress_reset_authentication(struct ipu7_device *isp) { int ret; u32 val; if (!isp->secure_mode) { dev_dbg(&isp->pdev->dev, "Skip auth for non-secure mode\n"); return 0; } writel(BUTTRESS_FW_RESET_CTL_START, isp->base + BUTTRESS_REG_FW_RESET_CTL); ret = readl_poll_timeout(isp->base + BUTTRESS_REG_FW_RESET_CTL, val, val & BUTTRESS_FW_RESET_CTL_DONE, 500, BUTTRESS_CSE_FWRESET_TIMEOUT_US); if (ret) { dev_err(&isp->pdev->dev, "Time out while resetting authentication state\n"); return ret; } dev_dbg(&isp->pdev->dev, "FW reset for authentication done\n"); writel(0, isp->base + BUTTRESS_REG_FW_RESET_CTL); /* leave some time for HW restore */ usleep_range(800, 1000); return 0; } int ipu7_buttress_authenticate(struct ipu7_device *isp) { struct ipu7_buttress *b = &isp->buttress; u32 data, mask, done, fail; int ret; if (!isp->secure_mode) { dev_dbg(&isp->pdev->dev, "Skip auth for non-secure mode\n"); return 0; } mutex_lock(&b->auth_mutex); if (ipu7_buttress_auth_done(isp)) { ret = 0; goto out_unlock; } /* * BUTTRESS_REG_FW_SOURCE_BASE needs to be set with FW CPD * package address for secure mode. */ writel(isp->cpd_fw->size, isp->base + BUTTRESS_REG_FW_SOURCE_SIZE); writel(sg_dma_address(isp->psys->fw_sgt.sgl), isp->base + BUTTRESS_REG_FW_SOURCE_BASE); /* * Write boot_load into IU2CSEDATA0 * Write sizeof(boot_load) | 0x2 << CLIENT_ID to * IU2CSEDB.IU2CSECMD and set IU2CSEDB.IU2CSEBUSY as */ dev_info(&isp->pdev->dev, "Sending BOOT_LOAD to CSE\n"); ret = ipu7_buttress_ipc_send(isp, IPU_BUTTRESS_IPC_CSE, BUTTRESS_IU2CSEDATA0_IPC_BOOT_LOAD, 1, true, BUTTRESS_CSE2IUDATA0_IPC_BOOT_LOAD_DONE); if (ret) { dev_err(&isp->pdev->dev, "CSE boot_load failed\n"); goto out_unlock; } mask = BUTTRESS_SECURITY_CTL_FW_SETUP_MASK; done = BUTTRESS_SECURITY_CTL_FW_SETUP_DONE; fail = BUTTRESS_SECURITY_CTL_AUTH_FAILED; ret = readl_poll_timeout(isp->base + BUTTRESS_REG_SECURITY_CTL, data, ((data & mask) == done || (data & mask) == fail), 500, BUTTRESS_CSE_BOOTLOAD_TIMEOUT_US); if (ret) { dev_err(&isp->pdev->dev, "CSE boot_load timeout\n"); goto out_unlock; } if ((data & mask) == fail) { dev_err(&isp->pdev->dev, "CSE auth failed\n"); ret = -EINVAL; goto out_unlock; } ret = readl_poll_timeout(isp->base + BOOTLOADER_STATUS_OFFSET, data, data == BOOTLOADER_MAGIC_KEY, 500, BUTTRESS_CSE_BOOTLOAD_TIMEOUT_US); if (ret) { dev_err(&isp->pdev->dev, "Unexpected magic number 0x%x\n", data); goto out_unlock; } /* * Write authenticate_run into IU2CSEDATA0 * Write sizeof(boot_load) | 0x2 << CLIENT_ID to * IU2CSEDB.IU2CSECMD and set IU2CSEDB.IU2CSEBUSY as */ dev_info(&isp->pdev->dev, "Sending AUTHENTICATE_RUN to CSE\n"); ret = ipu7_buttress_ipc_send(isp, IPU_BUTTRESS_IPC_CSE, BUTTRESS_IU2CSEDATA0_IPC_AUTH_RUN, 1, true, BUTTRESS_CSE2IUDATA0_IPC_AUTH_RUN_DONE); if (ret) { dev_err(&isp->pdev->dev, "CSE authenticate_run failed\n"); goto out_unlock; } done = BUTTRESS_SECURITY_CTL_AUTH_DONE; ret = readl_poll_timeout(isp->base + BUTTRESS_REG_SECURITY_CTL, data, ((data & mask) == done || (data & mask) == fail), 500, BUTTRESS_CSE_AUTHENTICATE_TIMEOUT_US); if (ret) { dev_err(&isp->pdev->dev, "CSE authenticate timeout\n"); goto out_unlock; } if ((data & mask) == fail) { dev_err(&isp->pdev->dev, "CSE boot_load failed\n"); ret = -EINVAL; goto out_unlock; } dev_info(&isp->pdev->dev, "CSE authenticate_run done\n"); out_unlock: mutex_unlock(&b->auth_mutex); return ret; } static int ipu7_buttress_send_tsc_request(struct ipu7_device *isp) { u32 val, mask, done; int ret; mask = BUTTRESS_PWR_STATUS_HH_STATUS_MASK; writel(BUTTRESS_TSC_CMD_START_TSC_SYNC, isp->base + BUTTRESS_REG_TSC_CMD); val = readl(isp->base + BUTTRESS_REG_PWR_STATUS); val = FIELD_GET(mask, val); if (val == BUTTRESS_PWR_STATUS_HH_STATE_ERR) { dev_err(&isp->pdev->dev, "Start tsc sync failed\n"); return -EINVAL; } done = BUTTRESS_PWR_STATUS_HH_STATE_DONE; ret = readl_poll_timeout(isp->base + BUTTRESS_REG_PWR_STATUS, val, FIELD_GET(mask, val) == done, 500, BUTTRESS_TSC_SYNC_TIMEOUT_US); if (ret) dev_err(&isp->pdev->dev, "Start tsc sync timeout\n"); return ret; } int ipu7_buttress_start_tsc_sync(struct ipu7_device *isp) { void __iomem *base = isp->base; unsigned int i; u32 val; if (is_ipu7p5(isp->hw_ver)) { val = readl(base + BUTTRESS_REG_TSC_CTL); val |= BUTTRESS_SEL_PB_TIMESTAMP; writel(val, base + BUTTRESS_REG_TSC_CTL); for (i = 0; i < BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX; i++) { val = readl(base + BUTTRESS_REG_PB_TIMESTAMP_VALID); if (val == 1) return 0; usleep_range(40, 50); } dev_err(&isp->pdev->dev, "PB HH sync failed (valid %u)\n", val); return -ETIMEDOUT; } for (i = 0; i < BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX; i++) { int ret; ret = ipu7_buttress_send_tsc_request(isp); if (ret != -ETIMEDOUT) return ret; val = readl(base + BUTTRESS_REG_TSC_CTL); val = val | BUTTRESS_TSW_WA_SOFT_RESET; writel(val, base + BUTTRESS_REG_TSC_CTL); val = val & (~BUTTRESS_TSW_WA_SOFT_RESET); writel(val, base + BUTTRESS_REG_TSC_CTL); } dev_err(&isp->pdev->dev, "TSC sync failed (timeout)\n"); return -ETIMEDOUT; } EXPORT_SYMBOL_NS_GPL(ipu7_buttress_start_tsc_sync, INTEL_IPU7); void ipu7_buttress_tsc_read(struct ipu7_device *isp, u64 *val) { u32 tsc_hi, tsc_lo; unsigned long flags; local_irq_save(flags); if (is_ipu7p5(isp->hw_ver)) { tsc_lo = readl(isp->base + BUTTRESS_REG_PB_TIMESTAMP_LO); tsc_hi = readl(isp->base + BUTTRESS_REG_PB_TIMESTAMP_HI); } else { tsc_lo = readl(isp->base + BUTTRESS_REG_TSC_LO); tsc_hi = readl(isp->base + BUTTRESS_REG_TSC_HI); } *val = (u64)tsc_hi << 32 | tsc_lo; local_irq_restore(flags); } EXPORT_SYMBOL_NS_GPL(ipu7_buttress_tsc_read, INTEL_IPU7); #ifdef CONFIG_DEBUG_FS static int ipu7_buttress_start_tsc_sync_set(void *data, u64 val) { struct ipu7_device *isp = data; return ipu7_buttress_start_tsc_sync(isp); } DEFINE_SIMPLE_ATTRIBUTE(ipu7_buttress_start_tsc_sync_fops, NULL, ipu7_buttress_start_tsc_sync_set, "%llu\n"); static int ipu7_buttress_tsc_get(void *data, u64 *val) { ipu7_buttress_tsc_read(data, val); return 0; } DEFINE_SIMPLE_ATTRIBUTE(ipu7_buttress_tsc_fops, ipu7_buttress_tsc_get, NULL, "%llu\n"); static int ipu7_buttress_isys_freq_get(void *data, u64 *val) { u32 freq; int ret; ret = ipu7_buttress_get_isys_freq(data, &freq); if (ret < 0) return ret; *val = freq; return 0; } static int ipu7_buttress_isys_freq_set(void *data, u64 val) { return ipu7_buttress_set_isys_freq(data, val); } DEFINE_SIMPLE_ATTRIBUTE(ipu7_buttress_psys_freq_fops, ipu7_buttress_psys_freq_get, NULL, "%llu\n"); DEFINE_SIMPLE_ATTRIBUTE(ipu7_buttress_isys_freq_fops, ipu7_buttress_isys_freq_get, ipu7_buttress_isys_freq_set, "%llu\n"); int ipu7_buttress_debugfs_init(struct ipu7_device *isp) { struct dentry *dir, *file; dir = debugfs_create_dir("buttress", isp->ipu7_dir); if (!dir) return -ENOMEM; file = debugfs_create_file("start_tsc_sync", 0200, dir, isp, &ipu7_buttress_start_tsc_sync_fops); if (!file) goto err; file = debugfs_create_file("tsc", 0400, dir, isp, &ipu7_buttress_tsc_fops); if (!file) goto err; file = debugfs_create_file("psys_freq", 0400, dir, isp, &ipu7_buttress_psys_freq_fops); if (!file) goto err; file = debugfs_create_file("isys_freq", 0700, dir, isp, &ipu7_buttress_isys_freq_fops); if (!file) goto err; return 0; err: debugfs_remove_recursive(dir); return -ENOMEM; } #endif /* CONFIG_DEBUG_FS */ u64 ipu7_buttress_tsc_ticks_to_ns(u64 ticks, const struct ipu7_device *isp) { u64 ns = ticks * 10000; /* * converting TSC tick count to ns is calculated by: * Example (TSC clock frequency is 19.2MHz): * ns = ticks * 1000 000 000 / 19.2Mhz * = ticks * 1000 000 000 / 19200000Hz * = ticks * 10000 / 192 ns */ return div_u64(ns, isp->buttress.ref_clk); } EXPORT_SYMBOL_NS_GPL(ipu7_buttress_tsc_ticks_to_ns, INTEL_IPU7); u32 ipu7_buttress_get_ref_clk(const struct ipu7_device *isp) { return isp->buttress.ref_clk; } EXPORT_SYMBOL_NS_GPL(ipu7_buttress_get_ref_clk, INTEL_IPU7); /* trigger uc control to wakeup fw */ void ipu7_buttress_wakeup_is_uc(const struct ipu7_device *isp) { u32 val; val = readl(isp->base + BUTTRESS_REG_DRV_IS_UCX_CONTROL_STATUS); val |= UCX_CTL_WAKEUP; writel(val, isp->base + BUTTRESS_REG_DRV_IS_UCX_CONTROL_STATUS); } EXPORT_SYMBOL_NS_GPL(ipu7_buttress_wakeup_is_uc, INTEL_IPU7); void ipu7_buttress_wakeup_ps_uc(const struct ipu7_device *isp) { u32 val; val = readl(isp->base + BUTTRESS_REG_DRV_PS_UCX_CONTROL_STATUS); val |= UCX_CTL_WAKEUP; writel(val, isp->base + BUTTRESS_REG_DRV_PS_UCX_CONTROL_STATUS); } EXPORT_SYMBOL_NS_GPL(ipu7_buttress_wakeup_ps_uc, INTEL_IPU7); static ssize_t psys_fused_min_freq_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ipu7_device *isp = pci_get_drvdata(to_pci_dev(dev)); return snprintf(buf, PAGE_SIZE, "%u\n", isp->buttress.psys_fused_freqs.min_freq); } static DEVICE_ATTR_RO(psys_fused_min_freq); static ssize_t psys_fused_max_freq_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ipu7_device *isp = pci_get_drvdata(to_pci_dev(dev)); return snprintf(buf, PAGE_SIZE, "%u\n", isp->buttress.psys_fused_freqs.max_freq); } static DEVICE_ATTR_RO(psys_fused_max_freq); static ssize_t psys_fused_efficient_freq_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ipu7_device *isp = pci_get_drvdata(to_pci_dev(dev)); return snprintf(buf, PAGE_SIZE, "%u\n", isp->buttress.psys_fused_freqs.efficient_freq); } static DEVICE_ATTR_RO(psys_fused_efficient_freq); static void ipu7_buttress_setup(struct ipu7_device *isp) { /* program PB BAR */ writel(0, isp->pb_base + GLOBAL_INTERRUPT_MASK); writel(0x100, isp->pb_base + BAR2_MISC_CONFIG); if (is_ipu7p5(isp->hw_ver)) { writel(BIT(14), isp->pb_base + TLBID_HASH_ENABLE_63_32); writel(BIT(9), isp->pb_base + TLBID_HASH_ENABLE_95_64); dev_dbg(&isp->pdev->dev, "PTL TLBID_HASH %x %x\n", readl(isp->pb_base + TLBID_HASH_ENABLE_63_32), readl(isp->pb_base + TLBID_HASH_ENABLE_95_64)); } else { writel(BIT(22), isp->pb_base + TLBID_HASH_ENABLE_63_32); writel(BIT(1), isp->pb_base + TLBID_HASH_ENABLE_127_96); dev_dbg(&isp->pdev->dev, "TLBID_HASH %x %x\n", readl(isp->pb_base + TLBID_HASH_ENABLE_63_32), readl(isp->pb_base + TLBID_HASH_ENABLE_127_96)); } writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_IRQ_CLEAR); writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_IRQ_MASK); writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_IRQ_ENABLE); /* LNL SW workaround for PS PD hang when PS sub-domain during PD */ writel(PS_FSM_CG, isp->base + BUTTRESS_REG_CG_CTRL_BITS); } void ipu7_buttress_restore(struct ipu7_device *isp) { struct ipu7_buttress *b = &isp->buttress; ipu7_buttress_setup(isp); writel(b->wdt_cached_value, isp->base + BUTTRESS_REG_IDLE_WDT); } int ipu7_buttress_init(struct ipu7_device *isp) { int ret, ipc_reset_retry = BUTTRESS_CSE_IPC_RESET_RETRY; struct ipu7_buttress *b = &isp->buttress; u32 val; mutex_init(&b->power_mutex); mutex_init(&b->auth_mutex); mutex_init(&b->cons_mutex); mutex_init(&b->ipc_mutex); init_completion(&b->ish.send_complete); init_completion(&b->cse.send_complete); init_completion(&b->ish.recv_complete); init_completion(&b->cse.recv_complete); b->cse.nack = BUTTRESS_CSE2IUDATA0_IPC_NACK; b->cse.nack_mask = BUTTRESS_CSE2IUDATA0_IPC_NACK_MASK; b->cse.csr_in = BUTTRESS_REG_CSE2IUCSR; b->cse.csr_out = BUTTRESS_REG_IU2CSECSR; b->cse.db0_in = BUTTRESS_REG_CSE2IUDB0; b->cse.db0_out = BUTTRESS_REG_IU2CSEDB0; b->cse.data0_in = BUTTRESS_REG_CSE2IUDATA0; b->cse.data0_out = BUTTRESS_REG_IU2CSEDATA0; /* no ISH on IPU7 */ memset(&b->ish, 0, sizeof(b->ish)); INIT_LIST_HEAD(&b->constraints); isp->secure_mode = ipu7_buttress_get_secure_mode(isp); val = readl(isp->base + BUTTRESS_REG_IPU_SKU); dev_info(&isp->pdev->dev, "IPU%u SKU %u in %s mode mask 0x%x\n", val & 0xf, (val >> 4) & 0x7, isp->secure_mode ? "secure" : "non-secure", readl(isp->base + BUTTRESS_REG_CAMERA_MASK)); b->wdt_cached_value = readl(isp->base + BUTTRESS_REG_IDLE_WDT); b->ref_clk = 384; ipu7_buttress_setup(isp); ret = device_create_file(&isp->pdev->dev, &dev_attr_psys_fused_min_freq); if (ret) { dev_err(&isp->pdev->dev, "Create min freq file failed\n"); goto err_mutex_destroy; } ret = device_create_file(&isp->pdev->dev, &dev_attr_psys_fused_max_freq); if (ret) { dev_err(&isp->pdev->dev, "Create max freq file failed\n"); goto err_remove_min_freq_file; } ret = device_create_file(&isp->pdev->dev, &dev_attr_psys_fused_efficient_freq); if (ret) { dev_err(&isp->pdev->dev, "Create efficient freq file failed\n"); goto err_remove_max_freq_file; } /* Retry couple of times in case of CSE initialization is delayed */ do { ret = ipu7_buttress_ipc_reset(isp, &b->cse); if (ret) { dev_warn(&isp->pdev->dev, "IPC reset protocol failed, retrying\n"); } else { dev_dbg(&isp->pdev->dev, "IPC reset done\n"); return 0; } } while (ipc_reset_retry--); dev_err(&isp->pdev->dev, "IPC reset protocol failed\n"); err_remove_max_freq_file: device_remove_file(&isp->pdev->dev, &dev_attr_psys_fused_max_freq); err_remove_min_freq_file: device_remove_file(&isp->pdev->dev, &dev_attr_psys_fused_min_freq); err_mutex_destroy: mutex_destroy(&b->power_mutex); mutex_destroy(&b->auth_mutex); mutex_destroy(&b->cons_mutex); mutex_destroy(&b->ipc_mutex); return ret; } void ipu7_buttress_exit(struct ipu7_device *isp) { struct ipu7_buttress *b = &isp->buttress; writel(0, isp->base + BUTTRESS_REG_IRQ_ENABLE); device_remove_file(&isp->pdev->dev, &dev_attr_psys_fused_efficient_freq); device_remove_file(&isp->pdev->dev, &dev_attr_psys_fused_max_freq); device_remove_file(&isp->pdev->dev, &dev_attr_psys_fused_min_freq); mutex_destroy(&b->power_mutex); mutex_destroy(&b->auth_mutex); mutex_destroy(&b->cons_mutex); mutex_destroy(&b->ipc_mutex); } ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/ipu7-buttress.h000066400000000000000000000055021465530421300271640ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 - 2024 Intel Corporation */ #ifndef IPU7_BUTTRESS_H #define IPU7_BUTTRESS_H #include #include #include #include struct device; struct ipu7_device; struct ipu7_buttress_ctrl { u32 subsys_id; u32 freq_ctl, pwr_sts_shift, pwr_sts_mask, pwr_sts_on, pwr_sts_off; u32 ratio; u32 ratio_shift; u32 cdyn; u32 cdyn_shift; bool started; u32 ovrd_clk; u32 own_clk_ack; }; struct ipu7_buttress_fused_freqs { u32 min_freq; u32 max_freq; u32 efficient_freq; }; struct ipu7_buttress_ipc { struct completion send_complete; struct completion recv_complete; u32 nack; u32 nack_mask; u32 recv_data; u32 csr_out; u32 csr_in; u32 db0_in; u32 db0_out; u32 data0_out; u32 data0_in; }; struct ipu7_buttress { struct mutex power_mutex, auth_mutex, cons_mutex, ipc_mutex; struct ipu7_buttress_ipc cse; struct ipu7_buttress_ipc ish; struct list_head constraints; struct ipu7_buttress_fused_freqs psys_fused_freqs; u32 psys_min_freq; u32 wdt_cached_value; u8 psys_force_ratio; bool force_suspend; u32 ref_clk; }; enum ipu7_buttress_ipc_domain { IPU_BUTTRESS_IPC_CSE, IPU_BUTTRESS_IPC_ISH, }; struct ipu7_buttress_constraint { struct list_head list; u32 min_freq; }; struct ipu7_ipc_buttress_bulk_msg { u32 cmd; u32 expected_resp; bool require_resp; u8 cmd_size; }; int ipu7_buttress_ipc_reset(struct ipu7_device *isp, struct ipu7_buttress_ipc *ipc); int ipu7_buttress_power(struct device *dev, struct ipu7_buttress_ctrl *ctrl, bool on); bool ipu7_buttress_get_secure_mode(struct ipu7_device *isp); int ipu7_buttress_authenticate(struct ipu7_device *isp); int ipu7_buttress_reset_authentication(struct ipu7_device *isp); bool ipu7_buttress_auth_done(struct ipu7_device *isp); int ipu7_buttress_get_isys_freq(struct ipu7_device *isp, u32 *freq); int ipu7_buttress_get_psys_freq(struct ipu7_device *isp, u32 *freq); int ipu7_buttress_start_tsc_sync(struct ipu7_device *isp); void ipu7_buttress_tsc_read(struct ipu7_device *isp, u64 *val); u64 ipu7_buttress_tsc_ticks_to_ns(u64 ticks, const struct ipu7_device *isp); irqreturn_t ipu7_buttress_isr(int irq, void *isp_ptr); irqreturn_t ipu7_buttress_isr_threaded(int irq, void *isp_ptr); int ipu7_buttress_debugfs_init(struct ipu7_device *isp); int ipu7_buttress_init(struct ipu7_device *isp); void ipu7_buttress_exit(struct ipu7_device *isp); void ipu7_buttress_csi_port_config(struct ipu7_device *isp, u32 legacy, u32 combo); void ipu7_buttress_restore(struct ipu7_device *isp); int ipu7_buttress_psys_freq_get(void *data, u64 *val); void ipu7_buttress_wakeup_is_uc(const struct ipu7_device *isp); void ipu7_buttress_wakeup_ps_uc(const struct ipu7_device *isp); u32 ipu7_buttress_get_ref_clk(const struct ipu7_device *isp); #endif /* IPU7_BUTTRESS_H */ ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/ipu7-cpd.c000066400000000000000000000143151465530421300260540ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2015 - 2024 Intel Corporation */ #include #include #include #if LINUX_VERSION_CODE > KERNEL_VERSION(6, 0, 0) #include #else #include #endif #include #include #include #include #include "ipu7.h" #include "ipu7-cpd.h" /* $CPD */ #define CPD_HDR_MARK 0x44504324 /* Maximum size is 4K DWORDs or 16KB */ #define MAX_MANIFEST_SIZE (SZ_4K * sizeof(u32)) #define CPD_MANIFEST_IDX 0 #define CPD_BINARY_START_IDX 1 #define CPD_METADATA_START_IDX 2 #define CPD_BINARY_NUM 2 /* ISYS + PSYS */ /* * Entries include: * 1 manifest entry. * 1 metadata entry for each sub system(ISYS and PSYS). * 1 binary entry for each sub system(ISYS and PSYS). */ #define CPD_ENTRY_NUM (CPD_BINARY_NUM * 2 + 1) #define CPD_METADATA_ATTR 0xa #define CPD_METADATA_IPL 0x1c #define ONLINE_METADATA_SIZE 128 #define ONLINE_METADATA_LINES 6 static inline struct ipu7_cpd_ent *ipu7_cpd_get_entry(const void *cpd, int idx) { const struct ipu7_cpd_hdr *cpd_hdr = cpd; return ((struct ipu7_cpd_ent *)((u8 *)cpd + cpd_hdr->hdr_len)) + idx; } #define ipu7_cpd_get_manifest(cpd) ipu7_cpd_get_entry(cpd, 0) static struct ipu7_cpd_metadata *ipu7_cpd_get_metadata(const void *cpd, int idx) { struct ipu7_cpd_ent *cpd_ent = ipu7_cpd_get_entry(cpd, CPD_METADATA_START_IDX + idx * 2); return (struct ipu7_cpd_metadata *)((u8 *)cpd + cpd_ent->offset); } static int ipu7_cpd_validate_cpd(struct ipu7_device *isp, const void *cpd, unsigned long data_size) { const struct ipu7_cpd_hdr *cpd_hdr = cpd; struct ipu7_cpd_ent *ent; unsigned int i; u8 len; len = cpd_hdr->hdr_len; /* Ensure cpd hdr is within moduledata */ if (data_size < len) { dev_err(&isp->pdev->dev, "Invalid CPD moduledata size\n"); return -EINVAL; } /* Check for CPD file marker */ if (cpd_hdr->hdr_mark != CPD_HDR_MARK) { dev_err(&isp->pdev->dev, "Invalid CPD header marker\n"); return -EINVAL; } /* Sanity check for CPD entry header */ if (cpd_hdr->ent_cnt != CPD_ENTRY_NUM) { dev_err(&isp->pdev->dev, "Invalid CPD entry number %d\n", cpd_hdr->ent_cnt); return -EINVAL; } if ((data_size - len) / sizeof(*ent) < cpd_hdr->ent_cnt) { dev_err(&isp->pdev->dev, "Invalid CPD entry headers\n"); return -EINVAL; } /* Ensure that all entries are within moduledata */ ent = (struct ipu7_cpd_ent *)(((u8 *)cpd_hdr) + len); for (i = 0; i < cpd_hdr->ent_cnt; i++, ent++) { if (data_size < ent->offset || data_size - ent->offset < ent->len) { dev_err(&isp->pdev->dev, "Invalid CPD entry %d\n", i); return -EINVAL; } } return 0; } static int ipu7_cpd_validate_metadata(struct ipu7_device *isp, const void *cpd, int idx) { const struct ipu7_cpd_ent *cpd_ent = ipu7_cpd_get_entry(cpd, CPD_METADATA_START_IDX + idx * 2); const struct ipu7_cpd_metadata *metadata = ipu7_cpd_get_metadata(cpd, idx); /* Sanity check for metadata size */ if (cpd_ent->len != sizeof(struct ipu7_cpd_metadata)) { dev_err(&isp->pdev->dev, "Invalid metadata size\n"); return -EINVAL; } /* Validate type and length of metadata sections */ if (metadata->attr.hdr.type != CPD_METADATA_ATTR) { dev_err(&isp->pdev->dev, "Invalid metadata attr type (%d)\n", metadata->attr.hdr.type); return -EINVAL; } if (metadata->attr.hdr.len != sizeof(struct ipu7_cpd_metadata_attr)) { dev_err(&isp->pdev->dev, "Invalid metadata attr size (%d)\n", metadata->attr.hdr.len); return -EINVAL; } if (metadata->ipl.hdr.type != CPD_METADATA_IPL) { dev_err(&isp->pdev->dev, "Invalid metadata ipl type (%d)\n", metadata->ipl.hdr.type); return -EINVAL; } if (metadata->ipl.hdr.len != sizeof(struct ipu7_cpd_metadata_ipl)) { dev_err(&isp->pdev->dev, "Invalid metadata ipl size (%d)\n", metadata->ipl.hdr.len); return -EINVAL; } return 0; } int ipu7_cpd_validate_cpd_file(struct ipu7_device *isp, const void *cpd_file, unsigned long cpd_file_size) { struct ipu7_cpd_ent *ent; unsigned int i; int ret; char *buf; ret = ipu7_cpd_validate_cpd(isp, cpd_file, cpd_file_size); if (ret) { dev_err(&isp->pdev->dev, "Invalid CPD in file\n"); return -EINVAL; } /* Sanity check for manifest size */ ent = ipu7_cpd_get_manifest(cpd_file); if (ent->len > MAX_MANIFEST_SIZE) { dev_err(&isp->pdev->dev, "Invalid manifest size\n"); return -EINVAL; } /* Validate metadata */ for (i = 0; i < CPD_BINARY_NUM; i++) { ret = ipu7_cpd_validate_metadata(isp, cpd_file, i); if (ret) { dev_err(&isp->pdev->dev, "Invalid metadata%d\n", i); return ret; } } /* Get fw binary version. */ buf = kmalloc(ONLINE_METADATA_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; for (i = 0; i < CPD_BINARY_NUM; i++) { char *lines[ONLINE_METADATA_LINES]; char *info = buf; unsigned int l; ent = ipu7_cpd_get_entry(cpd_file, CPD_BINARY_START_IDX + i * 2); memcpy(info, (u8 *)cpd_file + ent->offset + ent->len - ONLINE_METADATA_SIZE, ONLINE_METADATA_SIZE); for (l = 0; l < ONLINE_METADATA_LINES; l++) { lines[l] = strsep((char **)&info, "\n"); if (!lines[l]) break; } if (l < ONLINE_METADATA_LINES) { dev_err(&isp->pdev->dev, "Failed to parse fw binary%d info.\n", i); continue; } dev_info(&isp->pdev->dev, "FW binary%d info:\n", i); dev_info(&isp->pdev->dev, "Name: %s\n", lines[1]); dev_info(&isp->pdev->dev, "Version: %s\n", lines[2]); dev_info(&isp->pdev->dev, "Timestamp: %s\n", lines[3]); dev_info(&isp->pdev->dev, "Commit: %s\n", lines[4]); } kfree(buf); return 0; } EXPORT_SYMBOL_NS_GPL(ipu7_cpd_validate_cpd_file, INTEL_IPU7); int ipu7_cpd_copy_binary(const void *cpd, const char *name, void *code_region, u32 *entry) { unsigned int i; for (i = 0; i < CPD_BINARY_NUM; i++) { const struct ipu7_cpd_ent *binary = ipu7_cpd_get_entry(cpd, CPD_BINARY_START_IDX + i * 2); const struct ipu7_cpd_metadata *metadata = ipu7_cpd_get_metadata(cpd, i); if (!strncmp(binary->name, name, sizeof(binary->name))) { memcpy(code_region + metadata->ipl.param[0], cpd + binary->offset, binary->len); *entry = metadata->ipl.param[2]; return 0; } } return -ENOENT; } EXPORT_SYMBOL_NS_GPL(ipu7_cpd_copy_binary, INTEL_IPU7); ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/ipu7-cpd.h000066400000000000000000000021611465530421300260550ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2015 - 2024 Intel Corporation */ #ifndef IPU7_CPD_H #define IPU7_CPD_H struct ipu7_device; struct ipu7_cpd_hdr { u32 hdr_mark; u32 ent_cnt; u8 hdr_ver; u8 ent_ver; u8 hdr_len; u8 rsvd; u8 partition_name[4]; u32 crc32; } __packed; struct ipu7_cpd_ent { u8 name[12]; u32 offset; u32 len; u8 rsvd[4]; } __packed; struct ipu7_cpd_metadata_hdr { u32 type; u32 len; } __packed; struct ipu7_cpd_metadata_attr { struct ipu7_cpd_metadata_hdr hdr; u8 compression_type; u8 encryption_type; u8 rsvd[2]; u32 uncompressed_size; u32 compressed_size; u32 module_id; u8 hash[48]; } __packed; struct ipu7_cpd_metadata_ipl { struct ipu7_cpd_metadata_hdr hdr; u32 param[4]; u8 rsvd[8]; } __packed; struct ipu7_cpd_metadata { struct ipu7_cpd_metadata_attr attr; struct ipu7_cpd_metadata_ipl ipl; } __packed; int ipu7_cpd_validate_cpd_file(struct ipu7_device *isp, const void *cpd_file, unsigned long cpd_file_size); int ipu7_cpd_copy_binary(const void *cpd, const char *name, void *code_region, u32 *entry); #endif /* IPU7_CPD_H */ ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/ipu7-dma.c000066400000000000000000000310271465530421300260460ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013 - 2024 Intel Corporation */ #include #include #include #include #include #include #include #include #include #include #include #include "ipu7.h" #include "ipu7-bus.h" #include "ipu7-dma.h" #include "ipu7-mmu.h" struct pci_dev; struct vm_info { struct list_head list; struct page **pages; dma_addr_t ipu7_iova; void *vaddr; unsigned long size; }; static struct vm_info *get_vm_info(struct ipu7_mmu *mmu, dma_addr_t iova) { struct vm_info *info, *save; list_for_each_entry_safe(info, save, &mmu->vma_list, list) { if (iova >= info->ipu7_iova && iova < (info->ipu7_iova + info->size)) return info; } return NULL; } static void __dma_clear_buffer(struct page *page, size_t size, unsigned long attrs) { void *ptr; if (!page) return; /* * Ensure that the allocated pages are zeroed, and that any data * lurking in the kernel direct-mapped region is invalidated. */ ptr = page_address(page); memset(ptr, 0, size); if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) clflush_cache_range(ptr, size); } static struct page **__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp, unsigned long attrs) { int count = PHYS_PFN(size); int array_size = count * sizeof(struct page *); struct page **pages; int i = 0; pages = kvzalloc(array_size, GFP_KERNEL); if (!pages) return NULL; gfp |= __GFP_NOWARN; while (count) { int j, order = __fls(count); pages[i] = alloc_pages(gfp, order); while (!pages[i] && order) pages[i] = alloc_pages(gfp, --order); if (!pages[i]) goto error; if (order) { split_page(pages[i], order); j = 1 << order; while (j--) pages[i + j] = pages[i] + j; } __dma_clear_buffer(pages[i], PAGE_SIZE << order, attrs); i += 1 << order; count -= 1 << order; } return pages; error: while (i--) if (pages[i]) __free_pages(pages[i], 0); kvfree(pages); return NULL; } static void __dma_free_buffer(struct device *dev, struct page **pages, size_t size, unsigned long attrs) { unsigned int count = PHYS_PFN(size); unsigned int i; for (i = 0; i < count && pages[i]; i++) __free_pages(pages[i], 0); kvfree(pages); } static void ipu7_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) { void *vaddr; u32 offset; struct vm_info *info; struct ipu7_mmu *mmu = to_ipu7_bus_device(dev)->mmu; info = get_vm_info(mmu, dma_handle); if (WARN_ON(!info)) return; offset = dma_handle - info->ipu7_iova; if (WARN_ON(size > (info->size - offset))) return; vaddr = info->vaddr + offset; clflush_cache_range(vaddr, size); } static void ipu7_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction dir) { struct scatterlist *sg; int i; for_each_sg(sglist, sg, nents, i) clflush_cache_range(page_to_virt(sg_page(sg)), sg->length); } static void *ipu7_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { struct ipu7_mmu *mmu = to_ipu7_bus_device(dev)->mmu; struct pci_dev *pdev = to_ipu7_bus_device(dev)->isp->pdev; dma_addr_t pci_dma_addr, ipu7_iova; struct vm_info *info; unsigned long count; struct page **pages; struct iova *iova; unsigned int i; int ret; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return NULL; size = PAGE_ALIGN(size); count = PHYS_PFN(size); iova = alloc_iova(&mmu->dmap->iovad, count, PHYS_PFN(dma_get_mask(dev)), 0); if (!iova) goto out_kfree; pages = __dma_alloc_buffer(dev, size, gfp, attrs); if (!pages) goto out_free_iova; dev_dbg(dev, "dma_alloc: size %zu iova low pfn %lu, high pfn %lu\n", size, iova->pfn_lo, iova->pfn_hi); for (i = 0; iova->pfn_lo + i <= iova->pfn_hi; i++) { pci_dma_addr = dma_map_page_attrs(&pdev->dev, pages[i], 0, PAGE_SIZE, DMA_BIDIRECTIONAL, attrs); dev_dbg(dev, "dma_alloc: mapped pci_dma_addr %pad\n", &pci_dma_addr); if (dma_mapping_error(&pdev->dev, pci_dma_addr)) { dev_err(dev, "pci_dma_mapping for page[%d] failed", i); goto out_unmap; } ret = ipu7_mmu_map(mmu->dmap->mmu_info, PFN_PHYS(iova->pfn_lo + i), pci_dma_addr, PAGE_SIZE); if (ret) { dev_err(dev, "ipu7_mmu_map for pci_dma[%d] %pad failed", i, &pci_dma_addr); dma_unmap_page_attrs(&pdev->dev, pci_dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL, attrs); goto out_unmap; } } info->vaddr = vmap(pages, count, VM_USERMAP, PAGE_KERNEL); if (!info->vaddr) goto out_unmap; *dma_handle = PFN_PHYS(iova->pfn_lo); info->pages = pages; info->ipu7_iova = *dma_handle; info->size = size; list_add(&info->list, &mmu->vma_list); return info->vaddr; out_unmap: while (i--) { ipu7_iova = PFN_PHYS(iova->pfn_lo + i); pci_dma_addr = ipu7_mmu_iova_to_phys(mmu->dmap->mmu_info, ipu7_iova); dma_unmap_page_attrs(&pdev->dev, pci_dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL, attrs); ipu7_mmu_unmap(mmu->dmap->mmu_info, ipu7_iova, PAGE_SIZE); } __dma_free_buffer(dev, pages, size, attrs); out_free_iova: __free_iova(&mmu->dmap->iovad, iova); out_kfree: kfree(info); return NULL; } static void ipu7_dma_free(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { struct ipu7_mmu *mmu = to_ipu7_bus_device(dev)->mmu; struct pci_dev *pdev = to_ipu7_bus_device(dev)->isp->pdev; struct iova *iova = find_iova(&mmu->dmap->iovad, PHYS_PFN(dma_handle)); dma_addr_t pci_dma_addr, ipu7_iova; struct vm_info *info; struct page **pages; unsigned int i; if (WARN_ON(!iova)) return; info = get_vm_info(mmu, dma_handle); if (WARN_ON(!info)) return; if (WARN_ON(!info->vaddr)) return; if (WARN_ON(!info->pages)) return; list_del(&info->list); size = PAGE_ALIGN(size); pages = info->pages; vunmap(vaddr); for (i = 0; i < PHYS_PFN(size); i++) { ipu7_iova = PFN_PHYS(iova->pfn_lo + i); pci_dma_addr = ipu7_mmu_iova_to_phys(mmu->dmap->mmu_info, ipu7_iova); dma_unmap_page_attrs(&pdev->dev, pci_dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL, attrs); } ipu7_mmu_unmap(mmu->dmap->mmu_info, PFN_PHYS(iova->pfn_lo), PFN_PHYS(iova_size(iova))); __dma_free_buffer(dev, pages, size, attrs); mmu->tlb_invalidate(mmu); __free_iova(&mmu->dmap->iovad, iova); kfree(info); } static int ipu7_dma_mmap(struct device *dev, struct vm_area_struct *vma, void *addr, dma_addr_t iova, size_t size, unsigned long attrs) { struct ipu7_mmu *mmu = to_ipu7_bus_device(dev)->mmu; size_t count = PHYS_PFN(PAGE_ALIGN(size)); struct vm_info *info; size_t i; int ret; info = get_vm_info(mmu, iova); if (!info) return -EFAULT; if (!info->vaddr) return -EFAULT; if (vma->vm_start & ~PAGE_MASK) return -EINVAL; if (size > info->size) return -EFAULT; for (i = 0; i < count; i++) { ret = vm_insert_page(vma, vma->vm_start + PFN_PHYS(i), info->pages[i]); if (ret < 0) return ret; } return 0; } static void ipu7_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction dir, unsigned long attrs) { struct ipu7_mmu *mmu = to_ipu7_bus_device(dev)->mmu; struct pci_dev *pdev = to_ipu7_bus_device(dev)->isp->pdev; struct iova *iova = find_iova(&mmu->dmap->iovad, PHYS_PFN(sg_dma_address(sglist))); int i, npages, count; struct scatterlist *sg; dma_addr_t pci_dma_addr; if (!nents) return; if (WARN_ON(!iova)) return; if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) ipu7_dma_sync_sg_for_cpu(dev, sglist, nents, DMA_BIDIRECTIONAL); /* get the nents as orig_nents given by caller */ count = 0; npages = iova_size(iova); for_each_sg(sglist, sg, nents, i) { if (sg_dma_len(sg) == 0 || sg_dma_address(sg) == DMA_MAPPING_ERROR) break; npages -= PHYS_PFN(PAGE_ALIGN(sg_dma_len(sg))); count++; if (npages <= 0) break; } /* * Before IPU7 mmu unmap, return the pci dma address back to sg * assume the nents is less than orig_nents as the least granule * is 1 SZ_4K page */ dev_dbg(dev, "trying to unmap concatenated %u ents\n", count); for_each_sg(sglist, sg, count, i) { dev_dbg(dev, "ipu unmap sg[%d] %pad\n", i, &sg_dma_address(sg)); pci_dma_addr = ipu7_mmu_iova_to_phys(mmu->dmap->mmu_info, sg_dma_address(sg)); dev_dbg(dev, "return pci_dma_addr %pad back to sg[%d]\n", &pci_dma_addr, i); sg_dma_address(sg) = pci_dma_addr; } dev_dbg(dev, "ipu7_mmu_unmap low pfn %lu high pfn %lu\n", iova->pfn_lo, iova->pfn_hi); ipu7_mmu_unmap(mmu->dmap->mmu_info, PFN_PHYS(iova->pfn_lo), PFN_PHYS(iova_size(iova))); mmu->tlb_invalidate(mmu); dma_unmap_sg_attrs(&pdev->dev, sglist, nents, dir, attrs); __free_iova(&mmu->dmap->iovad, iova); } static int ipu7_dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction dir, unsigned long attrs) { struct ipu7_mmu *mmu = to_ipu7_bus_device(dev)->mmu; struct pci_dev *pdev = to_ipu7_bus_device(dev)->isp->pdev; struct scatterlist *sg; struct iova *iova; size_t npages = 0; unsigned long iova_addr; int i, count; for_each_sg(sglist, sg, nents, i) { if (sg->offset) { dev_err(dev, "Unsupported non-zero sg[%d].offset %x\n", i, sg->offset); return -EFAULT; } } dev_dbg(dev, "pci_dma_map_sg trying to map %d ents\n", nents); count = dma_map_sg_attrs(&pdev->dev, sglist, nents, dir, attrs); if (count <= 0) { dev_err(dev, "pci_dma_map_sg %d ents failed\n", nents); return -EIO; } dev_dbg(dev, "pci_dma_map_sg %d ents mapped\n", count); for_each_sg(sglist, sg, count, i) npages += PHYS_PFN(PAGE_ALIGN(sg_dma_len(sg))); if (!mmu->dmap->mmu_info->fw_code_region_mapped && PFN_PHYS(npages) == IPU_FW_CODE_REGION_SIZE && !to_ipu7_bus_device(dev)->isp->secure_mode) { /* * Assume the first mapping with size IPU_FW_CODE_REGION_SIZE * is for fw code region. Allocate iova with size aligned to * IPU_FW_CODE_REGION_SIZE. Only apply for non-secure mode. */ /* * Reserve iova from 64MB to 80MB. * TODO: Get start address from register instead of hard coding */ unsigned long lo, hi; lo = PFN_DOWN(IPU_FW_CODE_REGION_START); hi = PFN_DOWN(IPU_FW_CODE_REGION_END); iova = reserve_iova(&mmu->dmap->iovad, lo, hi); if (!iova) { dev_err(dev, "Reserve iova[%lx:%lx] failed.\n", lo, hi); return -ENOMEM; } dev_dbg(dev, "iova[%lx:%lx] reserved for FW code.\n", lo, hi); mmu->dmap->mmu_info->fw_code_region_mapped = true; } else { iova = alloc_iova(&mmu->dmap->iovad, npages, PHYS_PFN(dma_get_mask(dev)), 0); } if (!iova) return -ENOMEM; dev_dbg(dev, "dmamap: iova low pfn %lu, high pfn %lu\n", iova->pfn_lo, iova->pfn_hi); iova_addr = iova->pfn_lo; for_each_sg(sglist, sg, count, i) { int ret; dev_dbg(dev, "mapping entry %d: iova 0x%llx phy %pad size %d\n", i, PFN_PHYS(iova_addr), &sg_dma_address(sg), sg_dma_len(sg)); ret = ipu7_mmu_map(mmu->dmap->mmu_info, PFN_PHYS(iova_addr), sg_dma_address(sg), PAGE_ALIGN(sg_dma_len(sg))); if (ret) { ipu7_dma_unmap_sg(dev, sglist, i, dir, attrs); return ret; } sg_dma_address(sg) = PFN_PHYS(iova_addr); iova_addr += PHYS_PFN(PAGE_ALIGN(sg_dma_len(sg))); } if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) ipu7_dma_sync_sg_for_cpu(dev, sglist, nents, DMA_BIDIRECTIONAL); mmu->tlb_invalidate(mmu); return count; } /* * Create scatter-list for the already allocated DMA buffer */ static int ipu7_dma_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t handle, size_t size, unsigned long attrs) { struct ipu7_mmu *mmu = to_ipu7_bus_device(dev)->mmu; struct vm_info *info; int n_pages; int ret = 0; info = get_vm_info(mmu, handle); if (!info) return -EFAULT; if (!info->vaddr) return -EFAULT; if (WARN_ON(!info->pages)) return -ENOMEM; n_pages = PHYS_PFN(PAGE_ALIGN(size)); ret = sg_alloc_table_from_pages(sgt, info->pages, n_pages, 0, size, GFP_KERNEL); if (ret) dev_warn(dev, "IPU get sgt table failed\n"); return ret; } const struct dma_map_ops ipu7_dma_ops = { .alloc = ipu7_dma_alloc, .free = ipu7_dma_free, .mmap = ipu7_dma_mmap, .map_sg = ipu7_dma_map_sg, .unmap_sg = ipu7_dma_unmap_sg, .sync_single_for_cpu = ipu7_dma_sync_single_for_cpu, .sync_single_for_device = ipu7_dma_sync_single_for_cpu, .sync_sg_for_cpu = ipu7_dma_sync_sg_for_cpu, .sync_sg_for_device = ipu7_dma_sync_sg_for_cpu, .get_sgtable = ipu7_dma_get_sgtable, }; ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/ipu7-dma.h000066400000000000000000000005721465530421300260540ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 - 2024 Intel Corporation */ #ifndef IPU7_DMA_H #define IPU7_DMA_H #include #include struct ipu7_mmu_info; struct ipu7_dma_mapping { struct ipu7_mmu_info *mmu_info; struct iova_domain iovad; }; extern const struct dma_map_ops ipu7_dma_ops; #endif /* IPU7_DMA_H */ ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/ipu7-fw-isys.c000066400000000000000000000250131465530421300267040ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013 - 2024 Intel Corporation */ #include #include #include #include #include #include #include "abi/ipu7_fw_insys_config_abi.h" #include "abi/ipu7_fw_isys_abi.h" #include "ipu7.h" #include "ipu7-boot.h" #include "ipu7-bus.h" #include "ipu7-fw-isys.h" #include "ipu7-isys.h" #include "ipu7-platform-regs.h" #include "ipu7-syscom.h" static const char send_msg_types[N_IPU_INSYS_SEND_TYPE][32] = { "STREAM_OPEN", "STREAM_START_AND_CAPTURE", "STREAM_CAPTURE", "STREAM_ABORT", "STREAM_FLUSH", "STREAM_CLOSE" }; int ipu7_fw_isys_complex_cmd(struct ipu7_isys *isys, const unsigned int stream_handle, void *cpu_mapped_buf, dma_addr_t dma_mapped_buf, size_t size, u16 send_type) { struct ipu7_syscom_context *ctx = isys->adev->syscom; struct device *dev = &isys->adev->auxdev.dev; struct ipu7_insys_send_queue_token *token; if (send_type >= N_IPU_INSYS_SEND_TYPE) return -EINVAL; dev_dbg(dev, "send_token: %s\n", send_msg_types[send_type]); /* * Time to flush cache in case we have some payload. Not all messages * have that */ if (cpu_mapped_buf) clflush_cache_range(cpu_mapped_buf, size); token = ipu7_syscom_get_token(ctx, stream_handle + IPU_INSYS_INPUT_MSG_QUEUE); if (!token) return -EBUSY; token->addr = dma_mapped_buf; token->buf_handle = (unsigned long)cpu_mapped_buf; token->send_type = send_type; token->stream_id = stream_handle; token->flag = IPU_INSYS_SEND_QUEUE_TOKEN_FLAG_NONE; ipu7_syscom_put_token(ctx, stream_handle + IPU_INSYS_INPUT_MSG_QUEUE); /* now wakeup FW */ ipu7_buttress_wakeup_is_uc(isys->adev->isp); return 0; } int ipu7_fw_isys_simple_cmd(struct ipu7_isys *isys, const unsigned int stream_handle, u16 send_type) { return ipu7_fw_isys_complex_cmd(isys, stream_handle, NULL, 0, 0, send_type); } int ipu7_fw_isys_init(struct ipu7_isys *isys) { struct syscom_queue_config *queue_configs; struct ipu7_bus_device *adev = isys->adev; struct device *dev = &adev->auxdev.dev; struct ipu7_insys_config *isys_config; struct ipu7_syscom_context *syscom; dma_addr_t isys_config_dma_addr; unsigned int i, num_queues; u32 freq; int ret; /* Allocate and init syscom context. */ syscom = devm_kzalloc(dev, sizeof(struct ipu7_syscom_context), GFP_KERNEL); if (!syscom) return -ENOMEM; adev->syscom = syscom; syscom->num_input_queues = IPU_INSYS_MAX_INPUT_QUEUES; syscom->num_output_queues = IPU_INSYS_MAX_OUTPUT_QUEUES; num_queues = syscom->num_input_queues + syscom->num_output_queues; queue_configs = devm_kzalloc(dev, FW_QUEUE_CONFIG_SIZE(num_queues), GFP_KERNEL); if (!queue_configs) { ipu7_fw_isys_release(isys); return -ENOMEM; } syscom->queue_configs = queue_configs; queue_configs[IPU_INSYS_OUTPUT_MSG_QUEUE].max_capacity = IPU_ISYS_SIZE_RECV_QUEUE; queue_configs[IPU_INSYS_OUTPUT_MSG_QUEUE].token_size_in_bytes = sizeof(struct ipu7_insys_resp); queue_configs[IPU_INSYS_OUTPUT_LOG_QUEUE].max_capacity = IPU_ISYS_SIZE_LOG_QUEUE; queue_configs[IPU_INSYS_OUTPUT_LOG_QUEUE].token_size_in_bytes = sizeof(struct ipu7_insys_resp); queue_configs[IPU_INSYS_OUTPUT_RESERVED_QUEUE].max_capacity = 0; queue_configs[IPU_INSYS_OUTPUT_RESERVED_QUEUE].token_size_in_bytes = 0; queue_configs[IPU_INSYS_INPUT_DEV_QUEUE].max_capacity = IPU_ISYS_MAX_STREAMS; queue_configs[IPU_INSYS_INPUT_DEV_QUEUE].token_size_in_bytes = sizeof(struct ipu7_insys_send_queue_token); for (i = IPU_INSYS_INPUT_MSG_QUEUE; i < num_queues; i++) { queue_configs[i].max_capacity = IPU_ISYS_SIZE_SEND_QUEUE; queue_configs[i].token_size_in_bytes = sizeof(struct ipu7_insys_send_queue_token); } /* Allocate ISYS subsys config. */ isys_config = dma_alloc_attrs(dev, sizeof(struct ipu7_insys_config), &isys_config_dma_addr, GFP_KERNEL, 0); if (!isys_config) { dev_err(dev, "Failed to allocate isys subsys config.\n"); ipu7_fw_isys_release(isys); return -ENOMEM; } isys->subsys_config = isys_config; isys->subsys_config_dma_addr = isys_config_dma_addr; memset(isys_config, 0, sizeof(struct ipu7_insys_config)); isys_config->logger_config.use_source_severity = 0; isys_config->logger_config.use_channels_enable_bitmask = 1; isys_config->logger_config.channels_enable_bitmask = LOGGER_CONFIG_CHANNEL_ENABLE_SYSCOM_BITMASK; isys_config->wdt_config.wdt_timer1_us = 0; isys_config->wdt_config.wdt_timer2_us = 0; ret = ipu7_buttress_get_isys_freq(adev->isp, &freq); if (ret) { dev_err(dev, "Failed to get ISYS frequency.\n"); ipu7_fw_isys_release(isys); return ret; } dma_sync_single_for_device(dev, isys_config_dma_addr, sizeof(struct ipu7_insys_config), DMA_TO_DEVICE); ret = ipu7_boot_init_boot_config(adev, queue_configs, num_queues, freq, isys_config_dma_addr); if (ret) ipu7_fw_isys_release(isys); return ret; } void ipu7_fw_isys_release(struct ipu7_isys *isys) { struct ipu7_bus_device *adev = isys->adev; ipu7_boot_release_boot_config(adev); if (isys->subsys_config) { dma_free_attrs(&adev->auxdev.dev, sizeof(struct ipu7_insys_config), isys->subsys_config, isys->subsys_config_dma_addr, 0); isys->subsys_config = NULL; isys->subsys_config_dma_addr = 0; } } int ipu7_fw_isys_open(struct ipu7_isys *isys) { return ipu7_boot_start_fw(isys->adev); } int ipu7_fw_isys_close(struct ipu7_isys *isys) { return ipu7_boot_stop_fw(isys->adev); } struct ipu7_insys_resp *ipu7_fw_isys_get_resp(struct ipu7_isys *isys) { return ipu7_syscom_get_token(isys->adev->syscom, IPU_INSYS_OUTPUT_MSG_QUEUE); } void ipu7_fw_isys_put_resp(struct ipu7_isys *isys) { ipu7_syscom_put_token(isys->adev->syscom, IPU_INSYS_OUTPUT_MSG_QUEUE); } #ifdef ENABLE_FW_OFFLINE_LOGGER /* TODO: address the memory crash caused by offline logger */ int ipu7_fw_isys_get_log(struct ipu7_isys *isys) { struct device *dev = &isys->adev->auxdev.dev; struct isys_fw_log *fw_log = isys->fw_log; struct ia_gofo_msg_log *log_msg; u8 msg_type, msg_len; u32 count, fmt_id; void *token; token = ipu7_syscom_get_token(isys->adev->syscom, IPU_INSYS_OUTPUT_LOG_QUEUE); if (!token) return -ENODATA; while (token) { log_msg = (struct ia_gofo_msg_log *)token; msg_type = log_msg->header.tlv_header.tlv_type; msg_len = log_msg->header.tlv_header.tlv_len32; if (msg_type != IPU_MSG_TYPE_DEV_LOG || !msg_len) dev_warn(dev, "Invalid msg data from Log queue!\n"); count = log_msg->log_info_ts.log_info.log_counter; fmt_id = log_msg->log_info_ts.log_info.fmt_id; if (count > fw_log->count + 1) dev_warn(dev, "log msg lost, count %u+1 != %u!\n", count, fw_log->count); if (fmt_id == IA_GOFO_MSG_LOG_FMT_ID_INVALID) { dev_err(dev, "invalid log msg fmt_id 0x%x!\n", fmt_id); ipu7_syscom_put_token(isys->adev->syscom, IPU_INSYS_OUTPUT_LOG_QUEUE); return -EIO; } memcpy(fw_log->head, (void *)&log_msg->log_info_ts, sizeof(struct ia_gofo_msg_log_info_ts)); fw_log->count = count; fw_log->head += sizeof(struct ia_gofo_msg_log_info_ts); fw_log->size += sizeof(struct ia_gofo_msg_log_info_ts); ipu7_syscom_put_token(isys->adev->syscom, IPU_INSYS_OUTPUT_LOG_QUEUE); token = ipu7_syscom_get_token(isys->adev->syscom, IPU_INSYS_OUTPUT_LOG_QUEUE); }; return 0; } #endif void ipu7_fw_isys_dump_stream_cfg(struct device *dev, struct ipu7_insys_stream_cfg *cfg) { unsigned int i; dev_dbg(dev, "---------------------------\n"); dev_dbg(dev, "IPU_FW_ISYS_STREAM_CFG_DATA\n"); dev_dbg(dev, ".port id %d\n", cfg->port_id); dev_dbg(dev, ".vc %d\n", cfg->vc); dev_dbg(dev, ".nof_input_pins = %d\n", cfg->nof_input_pins); dev_dbg(dev, ".nof_output_pins = %d\n", cfg->nof_output_pins); dev_dbg(dev, ".stream_msg_map = 0x%x\n", cfg->stream_msg_map); for (i = 0; i < cfg->nof_input_pins; i++) { dev_dbg(dev, ".input_pin[%d]:\n", i); dev_dbg(dev, "\t.dt = 0x%0x\n", cfg->input_pins[i].dt); dev_dbg(dev, "\t.disable_mipi_unpacking = %d\n", cfg->input_pins[i].disable_mipi_unpacking); dev_dbg(dev, "\t.dt_rename_mode = %d\n", cfg->input_pins[i].dt_rename_mode); dev_dbg(dev, "\t.mapped_dt = 0x%0x\n", cfg->input_pins[i].mapped_dt); dev_dbg(dev, "\t.input_res = %d x %d\n", cfg->input_pins[i].input_res.width, cfg->input_pins[i].input_res.height); dev_dbg(dev, "\t.sync_msg_map = 0x%x\n", cfg->input_pins[i].sync_msg_map); } for (i = 0; i < cfg->nof_output_pins; i++) { dev_dbg(dev, ".output_pin[%d]:\n", i); dev_dbg(dev, "\t.input_pin_id = %d\n", cfg->output_pins[i].input_pin_id); dev_dbg(dev, "\t.stride = %d\n", cfg->output_pins[i].stride); dev_dbg(dev, "\t.send_irq = %d\n", cfg->output_pins[i].send_irq); dev_dbg(dev, "\t.ft = %d\n", cfg->output_pins[i].ft); dev_dbg(dev, "\t.link.buffer_lines = %d\n", cfg->output_pins[i].link.buffer_lines); dev_dbg(dev, "\t.link.foreign_key = %d\n", cfg->output_pins[i].link.foreign_key); dev_dbg(dev, "\t.link.granularity_pointer_update = %d\n", cfg->output_pins[i].link.granularity_pointer_update); dev_dbg(dev, "\t.link.msg_link_streaming_mode = %d\n", cfg->output_pins[i].link.msg_link_streaming_mode); dev_dbg(dev, "\t.link.pbk_id = %d\n", cfg->output_pins[i].link.pbk_id); dev_dbg(dev, "\t.link.pbk_slot_id = %d\n", cfg->output_pins[i].link.pbk_slot_id); dev_dbg(dev, "\t.link.dest = %d\n", cfg->output_pins[i].link.dest); dev_dbg(dev, "\t.link.use_sw_managed = %d\n", cfg->output_pins[i].link.use_sw_managed); dev_dbg(dev, "\t.crop.line_top = %d\n", cfg->output_pins[i].crop.line_top); dev_dbg(dev, "\t.crop.line_bottom = %d\n", cfg->output_pins[i].crop.line_bottom); dev_dbg(dev, "\t.dpcm_enable = %d\n", cfg->output_pins[i].dpcm.enable); dev_dbg(dev, "\t.dpcm.type = %d\n", cfg->output_pins[i].dpcm.type); dev_dbg(dev, "\t.dpcm.predictor = %d\n", cfg->output_pins[i].dpcm.predictor); } dev_dbg(dev, "---------------------------\n"); } void ipu7_fw_isys_dump_frame_buff_set(struct device *dev, struct ipu7_insys_buffset *buf, unsigned int outputs) { unsigned int i; dev_dbg(dev, "--------------------------\n"); dev_dbg(dev, "IPU_ISYS_BUFF_SET\n"); dev_dbg(dev, ".capture_msg_map = %d\n", buf->capture_msg_map); dev_dbg(dev, ".frame_id = %d\n", buf->frame_id); dev_dbg(dev, ".skip_frame = %d\n", buf->skip_frame); for (i = 0; i < outputs; i++) { dev_dbg(dev, ".output_pin[%d]:\n", i); dev_dbg(dev, "\t.user_token = %llx\n", buf->output_pins[i].user_token); dev_dbg(dev, "\t.addr = 0x%x\n", buf->output_pins[i].addr); } dev_dbg(dev, "---------------------------\n"); } ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/ipu7-fw-isys.h000066400000000000000000000026371465530421300267200ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 - 2024 Intel Corporation */ #ifndef IPU7_FW_ISYS_H #define IPU7_FW_ISYS_H #include #include "abi/ipu7_fw_isys_abi.h" struct device; struct ipu7_insys_buffset; struct ipu7_insys_stream_cfg; struct ipu7_isys; /* From here on type defines not coming from the ISYSAPI interface */ #ifndef UINT8_MAX #define UINT8_MAX (0xffUL) #endif #ifndef UINT16_MAX #define UINT16_MAX (0xffffUL) #endif int ipu7_fw_isys_init(struct ipu7_isys *isys); void ipu7_fw_isys_release(struct ipu7_isys *isys); int ipu7_fw_isys_open(struct ipu7_isys *isys); int ipu7_fw_isys_close(struct ipu7_isys *isys); void ipu7_fw_isys_dump_stream_cfg(struct device *dev, struct ipu7_insys_stream_cfg *cfg); void ipu7_fw_isys_dump_frame_buff_set(struct device *dev, struct ipu7_insys_buffset *buf, unsigned int outputs); int ipu7_fw_isys_simple_cmd(struct ipu7_isys *isys, const unsigned int stream_handle, u16 send_type); int ipu7_fw_isys_complex_cmd(struct ipu7_isys *isys, const unsigned int stream_handle, void *cpu_mapped_buf, dma_addr_t dma_mapped_buf, size_t size, u16 send_type); struct ipu7_insys_resp *ipu7_fw_isys_get_resp(struct ipu7_isys *isys); void ipu7_fw_isys_put_resp(struct ipu7_isys *isys); #ifdef ENABLE_FW_OFFLINE_LOGGER int ipu7_fw_isys_get_log(struct ipu7_isys *isys); #endif #endif ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/ipu7-isys-csi-phy.c000066400000000000000000000602771465530421300276570ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013 - 2024 Intel Corporation */ #include #include #include #include #include #include #include #include #include #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0) #include #endif #include #include "ipu7.h" #include "ipu7-bus.h" #include "ipu7-buttress.h" #include "ipu7-isys.h" #include "ipu7-isys-csi2.h" #include "ipu7-isys-csi2-regs.h" #include "ipu7-platform-regs.h" #include "ipu7-isys-csi-phy.h" #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 18, 0) #define MIPI_CSI2_DT_RAW10 0x2b #endif #define PORT_A 0 #define PORT_B 1 #define PORT_C 2 #define PORT_D 3 struct ddlcal_counter_ref_s { u16 min_mbps; u16 max_mbps; u16 ddlcal_counter_ref; }; struct ddlcal_params { u16 min_mbps; u16 max_mbps; u16 oa_lanex_hsrx_cdphy_sel_fast; u16 ddlcal_max_phase; u16 ddlcal_dll_fbk; u16 ddlcal_ddl_coarse_bank; u16 fjump_deskew; u16 min_eye_opening_deskew; }; struct i_thssettle_params { u16 min_mbps; u16 max_mbps; u16 i_thssettle; }; struct i_coarse_target_params { u16 min_mbps; u16 max_mbps; u16 i_coarse_target; }; struct post_thres_params { u16 min_mbps; u16 max_mbps; u16 post_received_reset_thresh; u16 post_det_delay_thresh; }; /* lane2 for 4l3t, lane1 for 2l2t */ struct oa_lane_clk_div_params { u16 min_mbps; u16 max_mbps; u16 oa_lane_hsrx_hs_clk_div; }; struct des_delay_params { u16 min_mbps; u16 max_mbps; u16 deserializer_en_delay_deass_thresh; }; static const struct ddlcal_counter_ref_s table0[] = { {1500, 1999, 118}, {2000, 2499, 157}, {2500, 3499, 196}, {3500, 4499, 274}, {4500, 4500, 352}, {} }; static const struct ddlcal_params table1[] = { {1500, 1587, 0, 143, 17, 3, 4, 29}, {1588, 1687, 0, 135, 16, 2, 4, 27}, {1688, 1799, 0, 127, 15, 2, 4, 26}, {1800, 1928, 0, 119, 14, 2, 3, 24}, {1929, 2076, 0, 111, 13, 2, 3, 23}, {2077, 2249, 0, 103, 12, 1, 3, 21}, {2250, 2454, 0, 95, 11, 1, 3, 19}, {2455, 2699, 0, 87, 10, 1, 3, 18}, {2700, 2999, 0, 79, 9, 1, 2, 16}, {3000, 3229, 0, 71, 8, 0, 2, 15}, {3230, 3599, 1, 87, 10, 1, 3, 18}, {3600, 3999, 1, 79, 9, 1, 2, 16}, {4000, 4499, 1, 71, 8, 1, 2, 15}, {4500, 4500, 1, 63, 7, 0, 2, 13}, {} }; static const struct i_thssettle_params table2[] = { {80, 124, 24}, {125, 249, 20}, {250, 499, 16}, {500, 749, 14}, {750, 1499, 13}, {1500, 4500, 12}, {} }; static const struct oa_lane_clk_div_params table6[] = { {80, 159, 0x1}, {160, 319, 0x2}, {320, 639, 0x3}, {640, 1279, 0x4}, {1280, 2560, 0x5}, {2561, 4500, 0x6}, {} }; enum ppi_datawidth { PPI_DATAWIDTH_8BIT = 0, PPI_DATAWIDTH_16BIT = 1, }; static void dwc_phy_write(struct ipu7_isys *isys, u32 id, u32 addr, u16 data) { void __iomem *isys_base = isys->pdata->base; void __iomem *base = isys_base + IS_IO_CDPHY_BASE(id); dev_dbg(&isys->adev->auxdev.dev, "phy write: reg 0x%lx = data 0x%04x", base + addr - isys_base, data); writew(data, base + addr); } static u16 dwc_phy_read(struct ipu7_isys *isys, u32 id, u32 addr) { u16 data; void __iomem *isys_base = isys->pdata->base; void __iomem *base = isys_base + IS_IO_CDPHY_BASE(id); data = readw(base + addr); dev_dbg(&isys->adev->auxdev.dev, "phy read: reg 0x%lx = data 0x%04x", base + addr - isys_base, data); return data; } static void dwc_csi_write(struct ipu7_isys *isys, u32 id, u32 addr, u32 data) { struct device *dev = &isys->adev->auxdev.dev; void __iomem *isys_base = isys->pdata->base; void __iomem *base = isys_base + IS_IO_CSI2_HOST_BASE(id); dev_dbg(dev, "csi write: reg 0x%lx = data 0x%08x", base + addr - isys_base, data); writel(data, base + addr); dev_dbg(dev, "csi read: reg 0x%lx = data 0x%08x", base + addr - isys_base, readl(base + addr)); } static void gpreg_write(struct ipu7_isys *isys, u32 id, u32 addr, u32 data) { struct device *dev = &isys->adev->auxdev.dev; void __iomem *isys_base = isys->pdata->base; void __iomem *base = isys_base + IS_IO_CSI2_GPREGS_BASE(id); dev_dbg(dev, "gpreg write: reg 0x%lx = data 0x%08x", base + addr - isys_base, data); writel(data, base + addr); dev_dbg(dev, "gpreg read: reg 0x%lx = data 0x%08x", base + addr - isys_base, readl(base + addr)); } static u32 dwc_csi_read(struct ipu7_isys *isys, u32 id, u32 addr) { u32 data; void __iomem *isys_base = isys->pdata->base; void __iomem *base = isys_base + IS_IO_CSI2_HOST_BASE(id); data = readl(base + addr); dev_dbg(&isys->adev->auxdev.dev, "csi read: reg 0x%lx = data 0x%x", base + addr - isys_base, data); return data; } static void dwc_phy_write_mask(struct ipu7_isys *isys, u32 id, u32 addr, u16 val, u8 lo, u8 hi) { u32 temp, mask; WARN_ON(lo > hi); WARN_ON(hi > 15); mask = ((~0U - (1 << lo) + 1)) & (~0U >> (31 - hi)); temp = dwc_phy_read(isys, id, addr); temp &= ~mask; temp |= (val << lo) & mask; dwc_phy_write(isys, id, addr, temp); } static void dwc_csi_write_mask(struct ipu7_isys *isys, u32 id, u32 addr, u32 val, u8 hi, u8 lo) { u32 temp, mask; WARN_ON(lo > hi); mask = ((~0U - (1 << lo) + 1)) & (~0U >> (31 - hi)); temp = dwc_csi_read(isys, id, addr); temp &= ~mask; temp |= (val << lo) & mask; dwc_csi_write(isys, id, addr, temp); } static void ipu7_isys_csi_ctrl_cfg(struct ipu7_isys *isys, struct ipu7_isys_csi2_config *cfg) { struct device *dev = &isys->adev->auxdev.dev; u32 id, lanes; u32 val; id = cfg->port; lanes = cfg->nlanes; dev_dbg(dev, "csi-%d controller init with %u lanes", id, lanes); val = dwc_csi_read(isys, id, VERSION); dev_dbg(dev, "csi-%d controller version = 0x%x", id, val); /* num of active data lanes */ dwc_csi_write(isys, id, N_LANES, lanes - 1); dwc_csi_write(isys, id, CDPHY_MODE, PHY_MODE_DPHY); dwc_csi_write(isys, id, VC_EXTENSION, 0); /* only mask PHY_FATAL and PKT_FATAL interrupts */ dwc_csi_write(isys, id, INT_MSK_PHY_FATAL, 0xff); dwc_csi_write(isys, id, INT_MSK_PKT_FATAL, 0x3); dwc_csi_write(isys, id, INT_MSK_PHY, 0x0); dwc_csi_write(isys, id, INT_MSK_LINE, 0x0); dwc_csi_write(isys, id, INT_MSK_BNDRY_FRAME_FATAL, 0x0); dwc_csi_write(isys, id, INT_MSK_SEQ_FRAME_FATAL, 0x0); dwc_csi_write(isys, id, INT_MSK_CRC_FRAME_FATAL, 0x0); dwc_csi_write(isys, id, INT_MSK_PLD_CRC_FATAL, 0x0); dwc_csi_write(isys, id, INT_MSK_DATA_ID, 0x0); dwc_csi_write(isys, id, INT_MSK_ECC_CORRECTED, 0x0); } static void ipu7_isys_csi_phy_reset(struct ipu7_isys *isys, u32 id) { dwc_csi_write(isys, id, PHY_SHUTDOWNZ, 0); dwc_csi_write(isys, id, DPHY_RSTZ, 0); dwc_csi_write(isys, id, CSI2_RESETN, 0); gpreg_write(isys, id, PHY_RESET, 0); gpreg_write(isys, id, PHY_SHUTDOWN, 0); } #define N_DATA_IDS 8 static DECLARE_BITMAP(data_ids, N_DATA_IDS); /* 8 Data ID monitors, each Data ID is composed by pair of VC and data type */ int ipu7_isys_csi_ctrl_dids_config(struct ipu7_isys *isys, u32 id, u8 vc, u8 dt) { u32 reg, n; int ret; u8 lo, hi; dev_dbg(&isys->adev->auxdev.dev, "Config CSI-%u with vc:%u data-type:0x%x\n", id, vc, dt); /* enable VCX: 2-bit field for DPHY, 3-bit for CPHY */ dwc_csi_write(isys, id, VC_EXTENSION, 0x0); n = find_first_zero_bit(data_ids, N_DATA_IDS); if (n == N_DATA_IDS) return -ENOSPC; ret = test_and_set_bit(n, data_ids); if (ret) return -EBUSY; reg = n < 4 ? DATA_IDS_VC_1 : DATA_IDS_VC_2; lo = (n % 4) * 8; hi = lo + 4; dwc_csi_write_mask(isys, id, reg, vc & GENMASK(4, 0), hi, lo); reg = n < 4 ? DATA_IDS_1 : DATA_IDS_2; lo = (n % 4) * 8; hi = lo + 5; dwc_csi_write_mask(isys, id, reg, dt & GENMASK(5, 0), hi, lo); return 0; } #define CDPHY_TIMEOUT (5000000) static int ipu7_isys_phy_ready(struct ipu7_isys *isys, u32 id) { void __iomem *base = isys->pdata->base; void __iomem *gpreg = base + IS_IO_CSI2_GPREGS_BASE(id); struct device *dev = &isys->adev->auxdev.dev; unsigned int i; u32 phy_ready; u32 reg, rext; int ret; dev_dbg(dev, "waiting phy ready...\n"); ret = readl_poll_timeout(gpreg + PHY_READY, phy_ready, phy_ready & BIT(0) && phy_ready != ~0U, 100, CDPHY_TIMEOUT); dev_dbg(dev, "phy %u ready = 0x%08x\n", id, readl(gpreg + PHY_READY)); dev_dbg(dev, "csi %u PHY_RX = 0x%08x\n", id, dwc_csi_read(isys, id, PHY_RX)); dev_dbg(dev, "csi %u PHY_STOPSTATE = 0x%08x\n", id, dwc_csi_read(isys, id, PHY_STOPSTATE)); dev_dbg(dev, "csi %u PHY_CAL = 0x%08x\n", id, dwc_csi_read(isys, id, PHY_CAL)); for (i = 0; i < 4; i++) { reg = CORE_DIG_DLANE_0_R_HS_RX_0 + (i * 0x400); dev_dbg(dev, "phy %u DLANE%u skewcal = 0x%04x\n", id, i, dwc_phy_read(isys, id, reg)); } dev_dbg(dev, "phy %u DDLCAL = 0x%04x\n", id, dwc_phy_read(isys, id, PPI_CALIBCTRL_R_COMMON_CALIBCTRL_2_5)); dev_dbg(dev, "phy %u TERMCAL = 0x%04x\n", id, dwc_phy_read(isys, id, PPI_R_TERMCAL_DEBUG_0)); dev_dbg(dev, "phy %u LPDCOCAL = 0x%04x\n", id, dwc_phy_read(isys, id, PPI_R_LPDCOCAL_DEBUG_RB)); dev_dbg(dev, "phy %u HSDCOCAL = 0x%04x\n", id, dwc_phy_read(isys, id, PPI_R_HSDCOCAL_DEBUG_RB)); dev_dbg(dev, "phy %u LPDCOCAL_VT = 0x%04x\n", id, dwc_phy_read(isys, id, PPI_R_LPDCOCAL_DEBUG_VT)); if (!ret) { if (id) { dev_dbg(dev, "ignore phy %u rext\n", id); return 0; } rext = dwc_phy_read(isys, id, CORE_DIG_IOCTRL_R_AFE_CB_CTRL_2_15) & 0xf; dev_dbg(dev, "phy %u rext value = %u\n", id, rext); isys->phy_rext_cal = rext; return 0; } dev_err(dev, "wait phy ready timeout!\n"); return ret; } static int lookup_table0(u16 mbps) { unsigned int i; for (i = 0; i < ARRAY_SIZE(table0); i++) { if (mbps >= table0[i].min_mbps && mbps <= table0[i].max_mbps) return i; } return -ENXIO; } static int lookup_table1(u16 mbps) { unsigned int i; for (i = 0; i < ARRAY_SIZE(table1); i++) { if (mbps >= table1[i].min_mbps && mbps <= table1[i].max_mbps) return i; } return -ENXIO; } static int lookup_table2(u16 mbps) { unsigned int i; for (i = 0; i < ARRAY_SIZE(table2); i++) { if (mbps >= table2[i].min_mbps && mbps <= table2[i].max_mbps) return i; } return -ENXIO; } static int lookup_table6(u16 mbps) { unsigned int i; for (i = 0; i < ARRAY_SIZE(table6); i++) { if (mbps >= table6[i].min_mbps && mbps <= table6[i].max_mbps) return i; } return -ENXIO; } static const u16 deskew_fine_mem[] = { 0x0404, 0x040c, 0x0414, 0x041c, 0x0423, 0x0429, 0x0430, 0x043a, 0x0445, 0x044a, 0x0450, 0x045a, 0x0465, 0x0469, 0x0472, 0x047a, 0x0485, 0x0489, 0x0490, 0x049a, 0x04a4, 0x04ac, 0x04b4, 0x04bc, 0x04c4, 0x04cc, 0x04d4, 0x04dc, 0x04e4, 0x04ec, 0x04f4, 0x04fc, 0x0504, 0x050c, 0x0514, 0x051c, 0x0523, 0x0529, 0x0530, 0x053a, 0x0545, 0x054a, 0x0550, 0x055a, 0x0565, 0x0569, 0x0572, 0x057a, 0x0585, 0x0589, 0x0590, 0x059a, 0x05a4, 0x05ac, 0x05b4, 0x05bc, 0x05c4, 0x05cc, 0x05d4, 0x05dc, 0x05e4, 0x05ec, 0x05f4, 0x05fc, 0x0604, 0x060c, 0x0614, 0x061c, 0x0623, 0x0629, 0x0632, 0x063a, 0x0645, 0x064a, 0x0650, 0x065a, 0x0665, 0x0669, 0x0672, 0x067a, 0x0685, 0x0689, 0x0690, 0x069a, 0x06a4, 0x06ac, 0x06b4, 0x06bc, 0x06c4, 0x06cc, 0x06d4, 0x06dc, 0x06e4, 0x06ec, 0x06f4, 0x06fc, 0x0704, 0x070c, 0x0714, 0x071c, 0x0723, 0x072a, 0x0730, 0x073a, 0x0745, 0x074a, 0x0750, 0x075a, 0x0765, 0x0769, 0x0772, 0x077a, 0x0785, 0x0789, 0x0790, 0x079a, 0x07a4, 0x07ac, 0x07b4, 0x07bc, 0x07c4, 0x07cc, 0x07d4, 0x07dc, 0x07e4, 0x07ec, 0x07f4, 0x07fc, }; static void ipu7_isys_phy_config(struct ipu7_isys *isys, u8 id, u8 lanes, bool aggregation) { struct device *dev = &isys->adev->auxdev.dev; u16 hsrxval0, hsrxval1, hsrxval2; s64 link_freq; int index; s64 mbps; u16 reg; u16 val; u32 i; link_freq = ipu7_isys_csi2_get_link_freq(&isys->csi2[id]); if (link_freq < 0) { dev_warn(dev, "get link freq failed, use default mbps\n"); link_freq = 600000000; } mbps = div_u64(link_freq, 500000); dev_dbg(dev, "config phy %u with lanes %u aggregation %d mbps %lld\n", id, lanes, aggregation, mbps); dwc_phy_write_mask(isys, id, PPI_STARTUP_RW_COMMON_DPHY_10, 48, 0, 7); dwc_phy_write_mask(isys, id, CORE_DIG_ANACTRL_RW_COMMON_ANACTRL_2, 1, 12, 13); dwc_phy_write_mask(isys, id, CORE_DIG_ANACTRL_RW_COMMON_ANACTRL_0, 63, 2, 7); dwc_phy_write_mask(isys, id, PPI_STARTUP_RW_COMMON_STARTUP_1_1, 563, 0, 11); dwc_phy_write_mask(isys, id, PPI_STARTUP_RW_COMMON_DPHY_2, 5, 0, 7); dwc_phy_write_mask(isys, id, PPI_STARTUP_RW_COMMON_DPHY_6, 39, 0, 7); dwc_phy_write_mask(isys, id, PPI_CALIBCTRL_RW_COMMON_BG_0, 500, 0, 8); dwc_phy_write_mask(isys, id, PPI_RW_TERMCAL_CFG_0, 38, 0, 6); dwc_phy_write_mask(isys, id, PPI_RW_OFFSETCAL_CFG_0, 7, 0, 4); dwc_phy_write_mask(isys, id, PPI_RW_LPDCOCAL_TIMEBASE, 153, 0, 9); dwc_phy_write_mask(isys, id, PPI_RW_LPDCOCAL_NREF, 800, 0, 10); dwc_phy_write_mask(isys, id, PPI_RW_LPDCOCAL_NREF_RANGE, 27, 0, 4); dwc_phy_write_mask(isys, id, PPI_RW_LPDCOCAL_TWAIT_CONFIG, 47, 0, 8); dwc_phy_write_mask(isys, id, PPI_RW_LPDCOCAL_TWAIT_CONFIG, 127, 9, 15); dwc_phy_write_mask(isys, id, PPI_RW_LPDCOCAL_VT_CONFIG, 47, 7, 15); dwc_phy_write_mask(isys, id, PPI_RW_LPDCOCAL_VT_CONFIG, 27, 2, 6); dwc_phy_write_mask(isys, id, PPI_RW_LPDCOCAL_VT_CONFIG, 3, 0, 1); dwc_phy_write_mask(isys, id, PPI_RW_LPDCOCAL_COARSE_CFG, 1, 0, 1); dwc_phy_write_mask(isys, id, PPI_RW_COMMON_CFG, 3, 0, 1); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_0, 0, 10, 10); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_1, 1, 10, 10); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_1, 0, 15, 15); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_3, 3, 8, 9); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_0, 0, 15, 15); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_6, 7, 12, 14); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_7, 0, 8, 10); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_5, 0, 8, 8); /* DPHY specific */ dwc_phy_write_mask(isys, id, CORE_DIG_RW_COMMON_7, 0, 0, 9); if (mbps > 1500) dwc_phy_write_mask(isys, id, PPI_STARTUP_RW_COMMON_DPHY_7, 40, 0, 7); else dwc_phy_write_mask(isys, id, PPI_STARTUP_RW_COMMON_DPHY_7, 104, 0, 7); dwc_phy_write_mask(isys, id, PPI_STARTUP_RW_COMMON_DPHY_8, 80, 0, 7); dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_0, 191, 0, 9); dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_7, 34, 7, 12); dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_1, 38, 8, 15); dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_2, 4, 12, 15); dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_2, 2, 10, 11); dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_2, 1, 8, 8); dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_2, 38, 0, 7); dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_2, 1, 9, 9); dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_4, 10, 0, 9); dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_6, 10, 0, 9); dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_7, 19, 0, 6); index = lookup_table0(mbps); if (index >= 0) dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_3, table0[index].ddlcal_counter_ref, 0, 9); index = lookup_table1(mbps); if (index >= 0) { dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_1, table1[index].ddlcal_max_phase, 0, 7); dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_5, table1[index].ddlcal_dll_fbk, 4, 9); dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_5, table1[index].ddlcal_ddl_coarse_bank, 0, 3); reg = CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_8; val = table1[index].oa_lanex_hsrx_cdphy_sel_fast; for (i = 0; i < lanes + 1; i++) dwc_phy_write_mask(isys, id, reg + (i * 0x400), val, 12, 12); } reg = CORE_DIG_DLANE_0_RW_LP_0; for (i = 0; i < lanes; i++) dwc_phy_write_mask(isys, id, reg + (i * 0x400), 6, 8, 11); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_2, 0, 0, 0); if (is_ipu7p5(isys->adev->isp->hw_ver)) { dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_2, 1, 0, 0); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_2, 0, 0, 0); } else { dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_2, 0, 0, 0); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_2, 1, 0, 0); } if (lanes == 4) { dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_2, 0, 0, 0); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_2, 0, 0, 0); } dwc_phy_write_mask(isys, id, CORE_DIG_RW_COMMON_6, 1, 0, 2); dwc_phy_write_mask(isys, id, CORE_DIG_RW_COMMON_6, 1, 3, 5); reg = CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_12; val = (mbps < 1500) ? 1 : 0; for (i = 0; i < lanes + 1; i++) { dwc_phy_write_mask(isys, id, reg + (i * 0x400), val, 1, 1); dwc_phy_write_mask(isys, id, reg + (i * 0x400), !val, 3, 3); } reg = CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_13; val = (mbps < 1500) ? 1 : 0; for (i = 0; i < lanes + 1; i++) { dwc_phy_write_mask(isys, id, reg + (i * 0x400), val, 1, 1); dwc_phy_write_mask(isys, id, reg + (i * 0x400), val, 3, 3); } index = lookup_table6(mbps); if (index >= 0) { val = table6[index].oa_lane_hsrx_hs_clk_div; if (is_ipu7p5(isys->adev->isp->hw_ver)) reg = CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_9; else reg = CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_9; dwc_phy_write_mask(isys, id, reg, val, 5, 7); } if (aggregation) { dwc_phy_write_mask(isys, id, CORE_DIG_RW_COMMON_0, 1, 1, 1); reg = CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_15; for (i = 0; i < lanes; i++) dwc_phy_write_mask(isys, id, reg + (i * 0x400), 3, 3, 4); } dwc_phy_write_mask(isys, id, CORE_DIG_DLANE_CLK_RW_HS_RX_0, 28, 0, 7); dwc_phy_write_mask(isys, id, CORE_DIG_DLANE_CLK_RW_HS_RX_7, 6, 0, 7); index = lookup_table2(mbps); if (index >= 0) { val = table2[index].i_thssettle; reg = CORE_DIG_DLANE_0_RW_HS_RX_0; for (i = 0; i < lanes; i++) dwc_phy_write_mask(isys, id, reg + (i * 0x400), val, 8, 15); } /* deskew */ for (i = 0; i < lanes; i++) { reg = CORE_DIG_DLANE_0_RW_CFG_1; dwc_phy_write_mask(isys, id, reg + (i * 0x400), ((mbps > 1500) ? 0x1 : 0x2), 2, 3); reg = CORE_DIG_DLANE_0_RW_HS_RX_2; dwc_phy_write_mask(isys, id, reg + (i * 0x400), ((mbps > 2500) ? 0 : 1), 15, 15); dwc_phy_write_mask(isys, id, reg + (i * 0x400), 1, 13, 13); dwc_phy_write_mask(isys, id, reg + (i * 0x400), 3, 9, 12); reg = CORE_DIG_DLANE_0_RW_LP_0; dwc_phy_write_mask(isys, id, reg + (i * 0x400), 1, 12, 15); reg = CORE_DIG_DLANE_0_RW_LP_2; dwc_phy_write_mask(isys, id, reg + (i * 0x400), 0, 0, 0); reg = CORE_DIG_DLANE_0_RW_HS_RX_1; dwc_phy_write_mask(isys, id, reg + (i * 0x400), 16, 0, 7); reg = CORE_DIG_DLANE_0_RW_HS_RX_3; dwc_phy_write_mask(isys, id, reg + (i * 0x400), 1, 0, 2); index = lookup_table1(mbps); if (index >= 0) { val = table1[index].fjump_deskew; dwc_phy_write_mask(isys, id, reg + (i * 0x400), val, 3, 8); } reg = CORE_DIG_DLANE_0_RW_HS_RX_4; dwc_phy_write_mask(isys, id, reg + (i * 0x400), 150, 0, 15); reg = CORE_DIG_DLANE_0_RW_HS_RX_5; dwc_phy_write_mask(isys, id, reg + (i * 0x400), 0, 0, 7); dwc_phy_write_mask(isys, id, reg + (i * 0x400), 1, 8, 15); reg = CORE_DIG_DLANE_0_RW_HS_RX_6; dwc_phy_write_mask(isys, id, reg + (i * 0x400), 2, 0, 7); index = lookup_table1(mbps); if (index >= 0) { val = table1[index].min_eye_opening_deskew; dwc_phy_write_mask(isys, id, reg + (i * 0x400), val, 8, 15); } reg = CORE_DIG_DLANE_0_RW_HS_RX_7; dwc_phy_write_mask(isys, id, reg + (i * 0x400), 0, 13, 13); dwc_phy_write_mask(isys, id, reg + (i * 0x400), 0, 15, 15); reg = CORE_DIG_DLANE_0_RW_HS_RX_9; index = lookup_table1(mbps); if (index >= 0) { val = table1[index].ddlcal_max_phase; dwc_phy_write_mask(isys, id, reg + (i * 0x400), val, 0, 7); } } dwc_phy_write_mask(isys, id, CORE_DIG_DLANE_CLK_RW_LP_0, 1, 12, 15); dwc_phy_write_mask(isys, id, CORE_DIG_DLANE_CLK_RW_LP_2, 0, 0, 0); for (i = 0; i < ARRAY_SIZE(deskew_fine_mem); i++) dwc_phy_write_mask(isys, id, CORE_DIG_COMMON_RW_DESKEW_FINE_MEM, deskew_fine_mem[i], 0, 15); if (mbps <= 1500) { hsrxval0 = 0; hsrxval1 = 0; hsrxval2 = 0; } if (mbps > 1500) { hsrxval0 = 4; hsrxval1 = 0; hsrxval2 = 3; } if (mbps > 2500) hsrxval1 = 2; dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_9, hsrxval0, 0, 2); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_9, hsrxval0, 0, 2); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_9, hsrxval0, 0, 2); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_9, hsrxval0, 0, 2); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_9, hsrxval0, 0, 2); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_9, hsrxval1, 3, 4); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_9, hsrxval1, 3, 4); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_9, hsrxval1, 3, 4); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_9, hsrxval1, 3, 4); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_9, hsrxval1, 3, 4); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_15, hsrxval2, 0, 2); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_15, hsrxval2, 0, 2); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_15, hsrxval2, 0, 2); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_15, hsrxval2, 0, 2); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_15, hsrxval2, 0, 2); /* force and override rext */ if (isys->phy_rext_cal && id) { dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_8, isys->phy_rext_cal, 0, 3); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_7, 1, 11, 11); } } int ipu7_isys_csi_phy_powerup(struct ipu7_isys *isys, struct ipu7_isys_csi2_config *cfg) { int ret; u32 id = cfg->port; u32 lanes = cfg->nlanes; bool aggregation = false; /* lanes remapping for aggregation (port AB) mode */ if (is_ipu7p5(isys->adev->isp->hw_ver) && lanes > 2 && id == PORT_A) { aggregation = true; lanes = 2; } ipu7_isys_csi_phy_reset(isys, id); gpreg_write(isys, id, SOC_CSI2HOST_SELECT, CSI2HOST_SEL_CSI2HOST); /* enable_dck */ gpreg_write(isys, id, PHY_CLK_LANE_CONTROL, 0x1); /* forcerxmode_dck */ gpreg_write(isys, id, PHY_CLK_LANE_FORCE_CONTROL, 0x2); /* forcerxmode_0/1/2/3 */ gpreg_write(isys, id, PHY_LANE_FORCE_CONTROL, 0xf); /* phy mode */ gpreg_write(isys, id, PHY_MODE, PHY_MODE_DPHY); ipu7_isys_phy_config(isys, id, lanes, aggregation); ipu7_isys_csi_ctrl_cfg(isys, cfg); /* get the DT and VC from frame descript to fill for CSI2 */ ipu7_isys_csi_ctrl_dids_config(isys, id, 0, MIPI_CSI2_DT_RAW10); dwc_csi_write(isys, id, DPHY_RSTZ, 1); dwc_csi_write(isys, id, PHY_SHUTDOWNZ, 1); gpreg_write(isys, id, PHY_RESET, 1); gpreg_write(isys, id, PHY_SHUTDOWN, 1); dwc_csi_write(isys, id, CSI2_RESETN, 1); ret = ipu7_isys_phy_ready(isys, id); if (ret < 0) return ret; gpreg_write(isys, id, PHY_LANE_FORCE_CONTROL, 0); gpreg_write(isys, id, PHY_CLK_LANE_FORCE_CONTROL, 0); /* config PORT_B if aggregation mode */ if (aggregation) { ipu7_isys_csi_phy_reset(isys, PORT_B); gpreg_write(isys, PORT_B, SOC_CSI2HOST_SELECT, CSI2HOST_SEL_CSI2HOST); /* disable the dck for secondary phy */ gpreg_write(isys, PORT_B, PHY_CLK_LANE_CONTROL, 0x0); /* TODO: check forcerxmode_dck */ /* gpreg_write(isys, PORT_B, PHY_CLK_LANE_FORCE_CONTROL, 0x2);*/ /* forcerxmode_0/1/2/3 */ gpreg_write(isys, PORT_B, PHY_CLK_LANE_FORCE_CONTROL, 0xf); gpreg_write(isys, PORT_B, PHY_MODE, PHY_MODE_DPHY); ipu7_isys_phy_config(isys, PORT_B, 2, aggregation); /* TODO: how to powerup the secondary PHY from DWC CSI2 */ dwc_csi_write(isys, PORT_B, DPHY_RSTZ, 1); dwc_csi_write(isys, PORT_B, PHY_SHUTDOWNZ, 1); gpreg_write(isys, PORT_B, PHY_RESET, 1); gpreg_write(isys, PORT_B, PHY_SHUTDOWN, 1); dwc_csi_write(isys, PORT_B, CSI2_RESETN, 1); ret = ipu7_isys_phy_ready(isys, PORT_B); if (ret < 0) return ret; gpreg_write(isys, PORT_B, PHY_LANE_FORCE_CONTROL, 0); gpreg_write(isys, PORT_B, PHY_CLK_LANE_FORCE_CONTROL, 0); } return 0; } void ipu7_isys_csi_phy_powerdown(struct ipu7_isys *isys, struct ipu7_isys_csi2_config *cfg) { ipu7_isys_csi_phy_reset(isys, cfg->port); if (is_ipu7p5(isys->adev->isp->hw_ver) && cfg->nlanes > 2 && cfg->port == PORT_A) ipu7_isys_csi_phy_reset(isys, PORT_B); } ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/ipu7-isys-csi-phy.h000066400000000000000000000013511465530421300276500ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 - 2024 Intel Corporation */ #ifndef IPU7_ISYS_CSI_PHY_H #define IPU7_ISYS_CSI_PHY_H struct ipu7_isys; struct ipu7_isys_csi2_config; #define PHY_MODE_DPHY 0 #define PHY_MODE_CPHY 1 void ipu7_isys_csi_ctrl_init(struct ipu7_isys *isys, struct ipu7_isys_csi2_config *cfg); void ipu7_isys_csi_ctrl_reset(struct ipu7_isys *isys, struct ipu7_isys_csi2_config *cfg); int ipu7_isys_csi_ctrl_dids_config(struct ipu7_isys *isys, u32 id, u8 vc, u8 dt); int ipu7_isys_csi_phy_powerup(struct ipu7_isys *isys, struct ipu7_isys_csi2_config *cfg); void ipu7_isys_csi_phy_powerdown(struct ipu7_isys *isys, struct ipu7_isys_csi2_config *cfg); #endif ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/ipu7-isys-csi2-regs.h000066400000000000000000001454371465530421300301100ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2020 - 2024 Intel Corporation */ #ifndef IPU7_ISYS_CSI2_REG_H #define IPU7_ISYS_CSI2_REG_H /* IS main regs base */ #define IS_MAIN_BASE (0x240000) #define IS_MAIN_S2B_BASE (IS_MAIN_BASE + 0x22000) #define IS_MAIN_B2O_BASE (IS_MAIN_BASE + 0x26000) #define IS_MAIN_ISD_M0_BASE (IS_MAIN_BASE + 0x2b000) #define IS_MAIN_ISD_M1_BASE (IS_MAIN_BASE + 0x2b100) #define IS_MAIN_ISD_INT_BASE (IS_MAIN_BASE + 0x2b200) #define IS_MAIN_GDA_BASE (IS_MAIN_BASE + 0x32000) #define IS_MAIN_GPREGS_MAIN_BASE (IS_MAIN_BASE + 0x32500) #define IS_MAIN_IRQ_CTRL_BASE (IS_MAIN_BASE + 0x32700) #define IS_MAIN_PWM_CTRL_BASE (IS_MAIN_BASE + 0x32b00) #define S2B_IRQ_COMMON_0_CTL_STATUS (IS_MAIN_S2B_BASE + 0x1c) #define S2B_IRQ_COMMON_0_CTL_CLEAR (IS_MAIN_S2B_BASE + 0x20) #define S2B_IRQ_COMMON_0_CTL_ENABLE (IS_MAIN_S2B_BASE + 0x24) #define S2B_IID_IRQ_CTL_STATUS(iid) (IS_MAIN_S2B_BASE + 0x94 + \ 0x100 * (iid)) #define B2O_IRQ_COMMON_0_CTL_STATUS (IS_MAIN_B2O_BASE + 0x30) #define B2O_IRQ_COMMON_0_CTL_CLEAR (IS_MAIN_B2O_BASE + 0x34) #define B2O_IRQ_COMMON_0_CTL_ENABLE (IS_MAIN_B2O_BASE + 0x38) #define B2O_IID_IRQ_CTL_STATUS(oid) (IS_MAIN_B2O_BASE + 0x3dc + \ 0x200 * (oid)) #define ISD_M0_IRQ_CTL_STATUS (IS_MAIN_ISD_M0_BASE + 0x1c) #define ISD_M0_IRQ_CTL_CLEAR (IS_MAIN_ISD_M0_BASE + 0x20) #define ISD_M0_IRQ_CTL_ENABLE (IS_MAIN_ISD_M0_BASE + 0x24) #define ISD_M1_IRQ_CTL_STATUS (IS_MAIN_ISD_M1_BASE + 0x1c) #define ISD_M1_IRQ_CTL_CLEAR (IS_MAIN_ISD_M1_BASE + 0x20) #define ISD_M1_IRQ_CTL_ENABLE (IS_MAIN_ISD_M1_BASE + 0x24) #define ISD_INT_IRQ_CTL_STATUS (IS_MAIN_ISD_INT_BASE + 0x1c) #define ISD_INT_IRQ_CTL_CLEAR (IS_MAIN_ISD_INT_BASE + 0x20) #define ISD_INT_IRQ_CTL_ENABLE (IS_MAIN_ISD_INT_BASE + 0x24) #define GDA_IRQ_CTL_STATUS (IS_MAIN_GDA_BASE + 0x1c) #define GDA_IRQ_CTL_CLEAR (IS_MAIN_GDA_BASE + 0x20) #define GDA_IRQ_CTL_ENABLE (IS_MAIN_GDA_BASE + 0x24) #define IS_MAIN_IRQ_CTL_EDGE (IS_MAIN_IRQ_CTRL_BASE + 0x0) #define IS_MAIN_IRQ_CTL_MASK (IS_MAIN_IRQ_CTRL_BASE + 0x4) #define IS_MAIN_IRQ_CTL_STATUS (IS_MAIN_IRQ_CTRL_BASE + 0x8) #define IS_MAIN_IRQ_CTL_CLEAR (IS_MAIN_IRQ_CTRL_BASE + 0xc) #define IS_MAIN_IRQ_CTL_ENABLE (IS_MAIN_IRQ_CTRL_BASE + 0x10) #define IS_MAIN_IRQ_CTL_LEVEL_NOT_PULSE (IS_MAIN_IRQ_CTRL_BASE + 0x14) /* IS IO regs base */ #define IS_PHY_NUM (4) #define IS_IO_BASE (0x280000) /* dwc csi cdphy registers */ #define IS_IO_CDPHY_BASE(i) (IS_IO_BASE + 0x10000 * (i)) #define PPI_STARTUP_RW_COMMON_DPHY_0 0x1800 #define PPI_STARTUP_RW_COMMON_DPHY_1 0x1802 #define PPI_STARTUP_RW_COMMON_DPHY_2 0x1804 #define PPI_STARTUP_RW_COMMON_DPHY_3 0x1806 #define PPI_STARTUP_RW_COMMON_DPHY_4 0x1808 #define PPI_STARTUP_RW_COMMON_DPHY_5 0x180a #define PPI_STARTUP_RW_COMMON_DPHY_6 0x180c #define PPI_STARTUP_RW_COMMON_DPHY_7 0x180e #define PPI_STARTUP_RW_COMMON_DPHY_8 0x1810 #define PPI_STARTUP_RW_COMMON_DPHY_9 0x1812 #define PPI_STARTUP_RW_COMMON_DPHY_A 0x1814 #define PPI_STARTUP_RW_COMMON_DPHY_10 0x1820 #define PPI_STARTUP_RW_COMMON_STARTUP_1_1 0x1822 #define PPI_STARTUP_RW_COMMON_STARTUP_1_2 0x1824 #define PPI_CALIBCTRL_RW_COMMON_CALIBCTRL_2_0 0x1840 #define PPI_CALIBCTRL_R_COMMON_CALIBCTRL_2_1 0x1842 #define PPI_CALIBCTRL_R_COMMON_CALIBCTRL_2_2 0x1844 #define PPI_CALIBCTRL_R_COMMON_CALIBCTRL_2_3 0x1846 #define PPI_CALIBCTRL_R_COMMON_CALIBCTRL_2_4 0x1848 #define PPI_CALIBCTRL_R_COMMON_CALIBCTRL_2_5 0x184a #define PPI_CALIBCTRL_RW_COMMON_BG_0 0x184c #define PPI_CALIBCTRL_RW_COMMON_CALIBCTRL_2_7 0x184e #define PPI_CALIBCTRL_RW_ADC_CFG_0 0x1850 #define PPI_CALIBCTRL_RW_ADC_CFG_1 0x1852 #define PPI_CALIBCTRL_R_ADC_DEBUG 0x1854 #define PPI_RW_LPDCOCAL_TOP_OVERRIDE 0x1c00 #define PPI_RW_LPDCOCAL_TIMEBASE 0x1c02 #define PPI_RW_LPDCOCAL_NREF 0x1c04 #define PPI_RW_LPDCOCAL_NREF_RANGE 0x1c06 #define PPI_RW_LPDCOCAL_NREF_TRIGGER_MAN 0x1c08 #define PPI_RW_LPDCOCAL_TWAIT_CONFIG 0x1c0a #define PPI_RW_LPDCOCAL_VT_CONFIG 0x1c0c #define PPI_R_LPDCOCAL_DEBUG_RB 0x1c0e #define PPI_RW_LPDCOCAL_COARSE_CFG 0x1c10 #define PPI_R_LPDCOCAL_DEBUG_COARSE_RB 0x1c12 #define PPI_R_LPDCOCAL_DEBUG_COARSE_MEAS_0_RB 0x1c14 #define PPI_R_LPDCOCAL_DEBUG_COARSE_MEAS_1_RB 0x1c16 #define PPI_R_LPDCOCAL_DEBUG_COARSE_FWORD_RB 0x1c18 #define PPI_R_LPDCOCAL_DEBUG_MEASURE_CURR_ERROR 0x1c1a #define PPI_R_LPDCOCAL_DEBUG_MEASURE_LAST_ERROR 0x1c1c #define PPI_R_LPDCOCAL_DEBUG_VT 0x1c1e #define PPI_RW_LB_TIMEBASE_CONFIG 0x1c20 #define PPI_RW_LB_STARTCMU_CONFIG 0x1c22 #define PPI_R_LBPULSE_COUNTER_RB 0x1c24 #define PPI_R_LB_START_CMU_RB 0x1c26 #define PPI_RW_LB_DPHY_BURST_START 0x1c28 #define PPI_RW_LB_CPHY_BURST_START 0x1c2a #define PPI_RW_DDLCAL_CFG_0 0x1c40 #define PPI_RW_DDLCAL_CFG_1 0x1c42 #define PPI_RW_DDLCAL_CFG_2 0x1c44 #define PPI_RW_DDLCAL_CFG_3 0x1c46 #define PPI_RW_DDLCAL_CFG_4 0x1c48 #define PPI_RW_DDLCAL_CFG_5 0x1c4a #define PPI_RW_DDLCAL_CFG_6 0x1c4c #define PPI_RW_DDLCAL_CFG_7 0x1c4e #define PPI_R_DDLCAL_DEBUG_0 0x1c50 #define PPI_R_DDLCAL_DEBUG_1 0x1c52 #define PPI_RW_PARITY_TEST 0x1c60 #define PPI_RW_STARTUP_OVR_0 0x1c62 #define PPI_RW_STARTUP_STATE_OVR_1 0x1c64 #define PPI_RW_DTB_SELECTOR 0x1c66 #define PPI_RW_DPHY_CLK_SPARE 0x1c6a #define PPI_RW_COMMON_CFG 0x1c6c #define PPI_RW_TERMCAL_CFG_0 0x1c80 #define PPI_R_TERMCAL_DEBUG_0 0x1c82 #define PPI_RW_TERMCAL_CTRL_0 0x1c84 #define PPI_RW_OFFSETCAL_CFG_0 0x1ca0 #define PPI_R_OFFSETCAL_DEBUG_LANE0 0x1ca2 #define PPI_R_OFFSETCAL_DEBUG_LANE1 0x1ca4 #define PPI_R_OFFSETCAL_DEBUG_LANE2 0x1ca6 #define PPI_R_OFFSETCAL_DEBUG_LANE3 0x1ca8 #define PPI_R_OFFSETCAL_DEBUG_LANE4 0x1caa #define PPI_RW_HSDCOCAL_CFG_O 0x1d00 #define PPI_RW_HSDCOCAL_CFG_1 0x1d02 #define PPI_RW_HSDCOCAL_CFG_2 0x1d04 #define PPI_RW_HSDCOCAL_CFG_3 0x1d06 #define PPI_RW_HSDCOCAL_CFG_4 0x1d08 #define PPI_RW_HSDCOCAL_CFG_5 0x1d0a #define PPI_RW_HSDCOCAL_CFG_6 0x1d0c #define PPI_RW_HSDCOCAL_CFG_7 0x1d0e #define PPI_RW_HSDCOCAL_CFG_8 0x1d10 #define PPI_R_HSDCOCAL_DEBUG_RB 0x1d12 #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE0_OVR_0_0 0x2000 #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE0_OVR_0_1 0x2002 #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE0_OVR_0_2 0x2004 #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE0_OVR_0_3 0x2006 #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE0_OVR_0_4 0x2008 #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE0_OVR_0_5 0x200a #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE0_OVR_0_6 0x200c #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE0_OVR_0_7 0x200e #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE0_OVR_0_8 0x2010 #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE0_OVR_0_9 0x2012 #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE0_OVR_0_10 0x2014 #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE0_OVR_0_11 0x2016 #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE0_OVR_0_12 0x2018 #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE0_OVR_0_13 0x201a #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE0_OVR_0_14 0x201c #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE0_OVR_0_15 0x201e #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE0_OVR_1_0 0x2020 #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE0_OVR_1_1 0x2022 #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE0_OVR_1_2 0x2024 #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE0_OVR_1_3 0x2026 #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE0_OVR_1_4 0x2028 #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE0_OVR_1_5 0x202a #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE0_OVR_1_6 0x202c #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE0_OVR_1_7 0x202e #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE0_OVR_1_8 0x2030 #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE0_OVR_1_9 0x2032 #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE0_OVR_1_10 0x2034 #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE0_OVR_1_11 0x2036 #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE0_OVR_1_12 0x2038 #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE0_OVR_1_13 0x203a #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE0_OVR_1_14 0x203c #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE0_OVR_1_15 0x203e #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_0 0x2040 #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_1 0x2042 #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_2 0x2044 #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_3 0x2046 #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_4 0x2048 #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_5 0x204a #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_6 0x204c #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_7 0x204e #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_8 0x2050 #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_9 0x2052 #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_10 0x2054 #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_11 0x2056 #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_12 0x2058 #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_13 0x205a #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_14 0x205c #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_15 0x205e #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_3_0 0x2060 #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_3_1 0x2062 #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_3_2 0x2064 #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_3_3 0x2066 #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_3_4 0x2068 #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_3_5 0x206a #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_3_6 0x206c #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_3_7 0x206e #define CORE_DIG_IOCTRL_R_AFE_LANE0_CTRL_3_8 0x2070 #define CORE_DIG_IOCTRL_R_AFE_LANE0_CTRL_3_9 0x2072 #define CORE_DIG_IOCTRL_R_AFE_LANE0_CTRL_3_10 0x2074 #define CORE_DIG_IOCTRL_R_AFE_LANE0_CTRL_3_11 0x2076 #define CORE_DIG_IOCTRL_R_AFE_LANE0_CTRL_3_12 0x2078 #define CORE_DIG_IOCTRL_R_AFE_LANE0_CTRL_3_13 0x207a #define CORE_DIG_IOCTRL_R_AFE_LANE0_CTRL_3_14 0x207c #define CORE_DIG_IOCTRL_R_AFE_LANE0_CTRL_3_15 0x207e #define CORE_DIG_IOCTRL_R_AFE_LANE0_CTRL_4_0 0x2080 #define CORE_DIG_IOCTRL_R_AFE_LANE0_CTRL_4_1 0x2082 #define CORE_DIG_IOCTRL_R_AFE_LANE0_CTRL_4_2 0x2084 #define CORE_DIG_IOCTRL_R_AFE_LANE0_CTRL_4_3 0x2086 #define CORE_DIG_IOCTRL_R_AFE_LANE0_CTRL_4_4 0x2088 #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE0_OVR_5_0 0x20a0 #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE0_OVR_5_1 0x20a2 #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE0_OVR_5_2 0x20a4 #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE0_OVR_5_3 0x20a6 #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE0_OVR_5_4 0x20a8 #define CORE_DIG_RW_TRIO0_0 0x2100 #define CORE_DIG_RW_TRIO0_1 0x2102 #define CORE_DIG_RW_TRIO0_2 0x2104 #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE1_OVR_0_0 0x2400 #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE1_OVR_0_1 0x2402 #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE1_OVR_0_2 0x2404 #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE1_OVR_0_3 0x2406 #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE1_OVR_0_4 0x2408 #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE1_OVR_0_5 0x240a #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE1_OVR_0_6 0x240c #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE1_OVR_0_7 0x240e #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE1_OVR_0_8 0x2410 #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE1_OVR_0_9 0x2412 #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE1_OVR_0_10 0x2414 #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE1_OVR_0_11 0x2416 #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE1_OVR_0_12 0x2418 #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE1_OVR_0_13 0x241a #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE1_OVR_0_14 0x241c #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE1_OVR_0_15 0x241e #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE1_OVR_1_0 0x2420 #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE1_OVR_1_1 0x2422 #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE1_OVR_1_2 0x2424 #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE1_OVR_1_3 0x2426 #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE1_OVR_1_4 0x2428 #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE1_OVR_1_5 0x242a #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE1_OVR_1_6 0x242c #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE1_OVR_1_7 0x242e #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE1_OVR_1_8 0x2430 #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE1_OVR_1_9 0x2432 #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE1_OVR_1_10 0x2434 #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE1_OVR_1_11 0x2436 #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE1_OVR_1_12 0x2438 #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE1_OVR_1_13 0x243a #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE1_OVR_1_14 0x243c #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE1_OVR_1_15 0x243e #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_0 0x2440 #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_1 0x2442 #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_2 0x2444 #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_3 0x2446 #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_4 0x2448 #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_5 0x244a #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_6 0x244c #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_7 0x244e #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_8 0x2450 #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_9 0x2452 #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_10 0x2454 #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_11 0x2456 #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_12 0x2458 #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_13 0x245a #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_14 0x245c #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_15 0x245e #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_3_0 0x2460 #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_3_1 0x2462 #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_3_2 0x2464 #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_3_3 0x2466 #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_3_4 0x2468 #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_3_5 0x246a #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_3_6 0x246c #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_3_7 0x246e #define CORE_DIG_IOCTRL_R_AFE_LANE1_CTRL_3_8 0x2470 #define CORE_DIG_IOCTRL_R_AFE_LANE1_CTRL_3_9 0x2472 #define CORE_DIG_IOCTRL_R_AFE_LANE1_CTRL_3_10 0x2474 #define CORE_DIG_IOCTRL_R_AFE_LANE1_CTRL_3_11 0x2476 #define CORE_DIG_IOCTRL_R_AFE_LANE1_CTRL_3_12 0x2478 #define CORE_DIG_IOCTRL_R_AFE_LANE1_CTRL_3_13 0x247a #define CORE_DIG_IOCTRL_R_AFE_LANE1_CTRL_3_14 0x247c #define CORE_DIG_IOCTRL_R_AFE_LANE1_CTRL_3_15 0x247e #define CORE_DIG_IOCTRL_R_AFE_LANE1_CTRL_4_0 0x2480 #define CORE_DIG_IOCTRL_R_AFE_LANE1_CTRL_4_1 0x2482 #define CORE_DIG_IOCTRL_R_AFE_LANE1_CTRL_4_2 0x2484 #define CORE_DIG_IOCTRL_R_AFE_LANE1_CTRL_4_3 0x2486 #define CORE_DIG_IOCTRL_R_AFE_LANE1_CTRL_4_4 0x2488 #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE1_OVR_5_0 0x24a0 #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE1_OVR_5_1 0x24a2 #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE1_OVR_5_2 0x24a4 #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE1_OVR_5_3 0x24a6 #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE1_OVR_5_4 0x24a8 #define CORE_DIG_RW_TRIO1_0 0x2500 #define CORE_DIG_RW_TRIO1_1 0x2502 #define CORE_DIG_RW_TRIO1_2 0x2504 #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE2_OVR_0_0 0x2800 #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE2_OVR_0_1 0x2802 #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE2_OVR_0_2 0x2804 #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE2_OVR_0_3 0x2806 #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE2_OVR_0_4 0x2808 #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE2_OVR_0_5 0x280a #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE2_OVR_0_6 0x280c #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE2_OVR_0_7 0x280e #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE2_OVR_0_8 0x2810 #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE2_OVR_0_9 0x2812 #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE2_OVR_0_10 0x2814 #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE2_OVR_0_11 0x2816 #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE2_OVR_0_12 0x2818 #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE2_OVR_0_13 0x281a #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE2_OVR_0_14 0x281c #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE2_OVR_0_15 0x281e #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE2_OVR_1_0 0x2820 #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE2_OVR_1_1 0x2822 #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE2_OVR_1_2 0x2824 #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE2_OVR_1_3 0x2826 #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE2_OVR_1_4 0x2828 #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE2_OVR_1_5 0x282a #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE2_OVR_1_6 0x282c #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE2_OVR_1_7 0x282e #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE2_OVR_1_8 0x2830 #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE2_OVR_1_9 0x2832 #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE2_OVR_1_10 0x2834 #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE2_OVR_1_11 0x2836 #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE2_OVR_1_12 0x2838 #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE2_OVR_1_13 0x283a #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE2_OVR_1_14 0x283c #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE2_OVR_1_15 0x283e #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_0 0x2840 #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_1 0x2842 #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_2 0x2844 #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_3 0x2846 #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_4 0x2848 #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_5 0x284a #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_6 0x284c #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_7 0x284e #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_8 0x2850 #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_9 0x2852 #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_10 0x2854 #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_11 0x2856 #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_12 0x2858 #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_13 0x285a #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_14 0x285c #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_15 0x285e #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_3_0 0x2860 #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_3_1 0x2862 #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_3_2 0x2864 #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_3_3 0x2866 #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_3_4 0x2868 #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_3_5 0x286a #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_3_6 0x286c #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_3_7 0x286e #define CORE_DIG_IOCTRL_R_AFE_LANE2_CTRL_3_8 0x2870 #define CORE_DIG_IOCTRL_R_AFE_LANE2_CTRL_3_9 0x2872 #define CORE_DIG_IOCTRL_R_AFE_LANE2_CTRL_3_10 0x2874 #define CORE_DIG_IOCTRL_R_AFE_LANE2_CTRL_3_11 0x2876 #define CORE_DIG_IOCTRL_R_AFE_LANE2_CTRL_3_12 0x2878 #define CORE_DIG_IOCTRL_R_AFE_LANE2_CTRL_3_13 0x287a #define CORE_DIG_IOCTRL_R_AFE_LANE2_CTRL_3_14 0x287c #define CORE_DIG_IOCTRL_R_AFE_LANE2_CTRL_3_15 0x287e #define CORE_DIG_IOCTRL_R_AFE_LANE2_CTRL_4_0 0x2880 #define CORE_DIG_IOCTRL_R_AFE_LANE2_CTRL_4_1 0x2882 #define CORE_DIG_IOCTRL_R_AFE_LANE2_CTRL_4_2 0x2884 #define CORE_DIG_IOCTRL_R_AFE_LANE2_CTRL_4_3 0x2886 #define CORE_DIG_IOCTRL_R_AFE_LANE2_CTRL_4_4 0x2888 #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE2_OVR_5_0 0x28a0 #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE2_OVR_5_1 0x28a2 #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE2_OVR_5_2 0x28a4 #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE2_OVR_5_3 0x28a6 #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE2_OVR_5_4 0x28a8 #define CORE_DIG_RW_TRIO2_0 0x2900 #define CORE_DIG_RW_TRIO2_1 0x2902 #define CORE_DIG_RW_TRIO2_2 0x2904 #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE3_OVR_0_0 0x2c00 #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE3_OVR_0_1 0x2c02 #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE3_OVR_0_2 0x2c04 #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE3_OVR_0_3 0x2c06 #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE3_OVR_0_4 0x2c08 #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE3_OVR_0_5 0x2c0a #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE3_OVR_0_6 0x2c0c #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE3_OVR_0_7 0x2c0e #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE3_OVR_0_8 0x2c10 #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE3_OVR_0_9 0x2c12 #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE3_OVR_0_10 0x2c14 #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE3_OVR_0_11 0x2c16 #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE3_OVR_0_12 0x2c18 #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE3_OVR_0_13 0x2c1a #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE3_OVR_0_14 0x2c1c #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE3_OVR_0_15 0x2c1e #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_0 0x2c40 #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_1 0x2c42 #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_2 0x2c44 #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_3 0x2c46 #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_4 0x2c48 #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_5 0x2c4a #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_6 0x2c4c #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_7 0x2c4e #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_8 0x2c50 #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_9 0x2c52 #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_10 0x2c54 #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_11 0x2c56 #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_12 0x2c58 #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_13 0x2c5a #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_14 0x2c5c #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_15 0x2c5e #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_3_0 0x2c60 #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_3_1 0x2c62 #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_3_2 0x2c64 #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_3_3 0x2c66 #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_3_4 0x2c68 #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_3_5 0x2c6a #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_3_6 0x2c6c #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_3_7 0x2c6e #define CORE_DIG_IOCTRL_R_AFE_LANE3_CTRL_3_8 0x2c70 #define CORE_DIG_IOCTRL_R_AFE_LANE3_CTRL_3_9 0x2c72 #define CORE_DIG_IOCTRL_R_AFE_LANE3_CTRL_3_10 0x2c74 #define CORE_DIG_IOCTRL_R_AFE_LANE3_CTRL_3_11 0x2c76 #define CORE_DIG_IOCTRL_R_AFE_LANE3_CTRL_3_12 0x2c78 #define CORE_DIG_IOCTRL_R_AFE_LANE3_CTRL_3_13 0x2c7a #define CORE_DIG_IOCTRL_R_AFE_LANE3_CTRL_3_14 0x2c7c #define CORE_DIG_IOCTRL_R_AFE_LANE3_CTRL_3_15 0x2c7e #define CORE_DIG_IOCTRL_R_AFE_LANE3_CTRL_4_0 0x2c80 #define CORE_DIG_IOCTRL_R_AFE_LANE3_CTRL_4_1 0x2c82 #define CORE_DIG_IOCTRL_R_AFE_LANE3_CTRL_4_2 0x2c84 #define CORE_DIG_IOCTRL_R_AFE_LANE3_CTRL_4_3 0x2c86 #define CORE_DIG_IOCTRL_R_AFE_LANE3_CTRL_4_4 0x2c88 #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_0 0x3040 #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_1 0x3042 #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_2 0x3044 #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_3 0x3046 #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_4 0x3048 #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_5 0x304a #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_6 0x304c #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_7 0x304e #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_8 0x3050 #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_9 0x3052 #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_10 0x3054 #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_11 0x3056 #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_12 0x3058 #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_13 0x305a #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_14 0x305c #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_15 0x305e #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_3_0 0x3060 #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_3_1 0x3062 #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_3_2 0x3064 #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_3_3 0x3066 #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_3_4 0x3068 #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_3_5 0x306a #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_3_6 0x306c #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_3_7 0x306e #define CORE_DIG_IOCTRL_R_AFE_LANE4_CTRL_3_8 0x3070 #define CORE_DIG_IOCTRL_R_AFE_LANE4_CTRL_3_9 0x3072 #define CORE_DIG_IOCTRL_R_AFE_LANE4_CTRL_3_10 0x3074 #define CORE_DIG_IOCTRL_R_AFE_LANE4_CTRL_3_11 0x3076 #define CORE_DIG_IOCTRL_R_AFE_LANE4_CTRL_3_12 0x3078 #define CORE_DIG_IOCTRL_R_AFE_LANE4_CTRL_3_13 0x307a #define CORE_DIG_IOCTRL_R_AFE_LANE4_CTRL_3_14 0x307c #define CORE_DIG_IOCTRL_R_AFE_LANE4_CTRL_3_15 0x307e #define CORE_DIG_IOCTRL_R_AFE_LANE4_CTRL_4_0 0x3080 #define CORE_DIG_IOCTRL_R_AFE_LANE4_CTRL_4_1 0x3082 #define CORE_DIG_IOCTRL_R_AFE_LANE4_CTRL_4_2 0x3084 #define CORE_DIG_IOCTRL_R_AFE_LANE4_CTRL_4_3 0x3086 #define CORE_DIG_IOCTRL_R_AFE_LANE4_CTRL_4_4 0x3088 #define CORE_DIG_IOCTRL_RW_DPHY_PPI_CLK_OVR_0_0 0x3400 #define CORE_DIG_IOCTRL_RW_DPHY_PPI_CLK_OVR_0_1 0x3402 #define CORE_DIG_IOCTRL_R_DPHY_PPI_CLK_OVR_0_2 0x3404 #define CORE_DIG_IOCTRL_RW_COMMON_PPI_OVR_0_0 0x3800 #define CORE_DIG_IOCTRL_RW_COMMON_PPI_OVR_0_1 0x3802 #define CORE_DIG_IOCTRL_RW_COMMON_PPI_OVR_0_2 0x3804 #define CORE_DIG_IOCTRL_RW_COMMON_PPI_OVR_0_3 0x3806 #define CORE_DIG_IOCTRL_RW_COMMON_PPI_OVR_0_4 0x3808 #define CORE_DIG_IOCTRL_RW_COMMON_PPI_OVR_0_5 0x380a #define CORE_DIG_IOCTRL_RW_COMMON_PPI_OVR_0_6 0x380c #define CORE_DIG_IOCTRL_RW_COMMON_PPI_OVR_0_7 0x380e #define CORE_DIG_IOCTRL_RW_COMMON_PPI_OVR_0_8 0x3810 #define CORE_DIG_IOCTRL_RW_COMMON_PPI_OVR_0_9 0x3812 #define CORE_DIG_IOCTRL_RW_COMMON_PPI_OVR_0_10 0x3814 #define CORE_DIG_IOCTRL_R_COMMON_PPI_OVR_0_11 0x3816 #define CORE_DIG_IOCTRL_R_COMMON_PPI_OVR_0_12 0x3818 #define CORE_DIG_IOCTRL_R_COMMON_PPI_OVR_0_13 0x381a #define CORE_DIG_IOCTRL_R_COMMON_PPI_OVR_0_14 0x381c #define CORE_DIG_IOCTRL_R_COMMON_PPI_OVR_0_15 0x381e #define CORE_DIG_IOCTRL_R_COMMON_PPI_OVR_1_0 0x3820 #define CORE_DIG_IOCTRL_R_COMMON_PPI_OVR_1_1 0x3822 #define CORE_DIG_IOCTRL_R_COMMON_PPI_OVR_1_2 0x3824 #define CORE_DIG_IOCTRL_R_COMMON_PPI_OVR_1_3 0x3826 #define CORE_DIG_IOCTRL_R_COMMON_PPI_OVR_1_4 0x3828 #define CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_0 0x3840 #define CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_1 0x3842 #define CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_2 0x3844 #define CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_3 0x3846 #define CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_4 0x3848 #define CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_5 0x384a #define CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_6 0x384c #define CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_7 0x384e #define CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_8 0x3850 #define CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_9 0x3852 #define CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_10 0x3854 #define CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_11 0x3856 #define CORE_DIG_IOCTRL_R_AFE_CB_CTRL_2_12 0x3858 #define CORE_DIG_IOCTRL_R_AFE_CB_CTRL_2_13 0x385a #define CORE_DIG_IOCTRL_R_AFE_CB_CTRL_2_14 0x385c #define CORE_DIG_IOCTRL_R_AFE_CB_CTRL_2_15 0x385e #define CORE_DIG_IOCTRL_R_AFE_CB_CTRL_3_0 0x3860 #define CORE_DIG_RW_COMMON_0 0x3880 #define CORE_DIG_RW_COMMON_1 0x3882 #define CORE_DIG_RW_COMMON_2 0x3884 #define CORE_DIG_RW_COMMON_3 0x3886 #define CORE_DIG_RW_COMMON_4 0x3888 #define CORE_DIG_RW_COMMON_5 0x388a #define CORE_DIG_RW_COMMON_6 0x388c #define CORE_DIG_RW_COMMON_7 0x388e #define CORE_DIG_RW_COMMON_8 0x3890 #define CORE_DIG_RW_COMMON_9 0x3892 #define CORE_DIG_RW_COMMON_10 0x3894 #define CORE_DIG_RW_COMMON_11 0x3896 #define CORE_DIG_RW_COMMON_12 0x3898 #define CORE_DIG_RW_COMMON_13 0x389a #define CORE_DIG_RW_COMMON_14 0x389c #define CORE_DIG_RW_COMMON_15 0x389e #define CORE_DIG_ANACTRL_RW_COMMON_ANACTRL_0 0x39e0 #define CORE_DIG_ANACTRL_RW_COMMON_ANACTRL_1 0x39e2 #define CORE_DIG_ANACTRL_RW_COMMON_ANACTRL_2 0x39e4 #define CORE_DIG_ANACTRL_RW_COMMON_ANACTRL_3 0x39e6 #define CORE_DIG_COMMON_RW_DESKEW_FINE_MEM 0x3fe0 #define CORE_DIG_COMMON_R_DESKEW_FINE_MEM 0x3fe2 #define PPI_RW_DPHY_LANE0_LBERT_0 0x4000 #define PPI_RW_DPHY_LANE0_LBERT_1 0x4002 #define PPI_R_DPHY_LANE0_LBERT_0 0x4004 #define PPI_R_DPHY_LANE0_LBERT_1 0x4006 #define PPI_RW_DPHY_LANE0_SPARE 0x4008 #define PPI_RW_DPHY_LANE1_LBERT_0 0x4400 #define PPI_RW_DPHY_LANE1_LBERT_1 0x4402 #define PPI_R_DPHY_LANE1_LBERT_0 0x4404 #define PPI_R_DPHY_LANE1_LBERT_1 0x4406 #define PPI_RW_DPHY_LANE1_SPARE 0x4408 #define PPI_RW_DPHY_LANE2_LBERT_0 0x4800 #define PPI_RW_DPHY_LANE2_LBERT_1 0x4802 #define PPI_R_DPHY_LANE2_LBERT_0 0x4804 #define PPI_R_DPHY_LANE2_LBERT_1 0x4806 #define PPI_RW_DPHY_LANE2_SPARE 0x4808 #define PPI_RW_DPHY_LANE3_LBERT_0 0x4c00 #define PPI_RW_DPHY_LANE3_LBERT_1 0x4c02 #define PPI_R_DPHY_LANE3_LBERT_0 0x4c04 #define PPI_R_DPHY_LANE3_LBERT_1 0x4c06 #define PPI_RW_DPHY_LANE3_SPARE 0x4c08 #define CORE_DIG_DLANE_0_RW_CFG_0 0x6000 #define CORE_DIG_DLANE_0_RW_CFG_1 0x6002 #define CORE_DIG_DLANE_0_RW_CFG_2 0x6004 #define CORE_DIG_DLANE_0_RW_LP_0 0x6080 #define CORE_DIG_DLANE_0_RW_LP_1 0x6082 #define CORE_DIG_DLANE_0_RW_LP_2 0x6084 #define CORE_DIG_DLANE_0_R_LP_0 0x60a0 #define CORE_DIG_DLANE_0_R_LP_1 0x60a2 #define CORE_DIG_DLANE_0_R_HS_TX_0 0x60e0 #define CORE_DIG_DLANE_0_RW_HS_RX_0 0x6100 #define CORE_DIG_DLANE_0_RW_HS_RX_1 0x6102 #define CORE_DIG_DLANE_0_RW_HS_RX_2 0x6104 #define CORE_DIG_DLANE_0_RW_HS_RX_3 0x6106 #define CORE_DIG_DLANE_0_RW_HS_RX_4 0x6108 #define CORE_DIG_DLANE_0_RW_HS_RX_5 0x610a #define CORE_DIG_DLANE_0_RW_HS_RX_6 0x610c #define CORE_DIG_DLANE_0_RW_HS_RX_7 0x610e #define CORE_DIG_DLANE_0_RW_HS_RX_8 0x6110 #define CORE_DIG_DLANE_0_RW_HS_RX_9 0x6112 #define CORE_DIG_DLANE_0_R_HS_RX_0 0x6120 #define CORE_DIG_DLANE_0_R_HS_RX_1 0x6122 #define CORE_DIG_DLANE_0_R_HS_RX_2 0x6124 #define CORE_DIG_DLANE_0_R_HS_RX_3 0x6126 #define CORE_DIG_DLANE_0_R_HS_RX_4 0x6128 #define CORE_DIG_DLANE_0_RW_HS_TX_0 0x6200 #define CORE_DIG_DLANE_0_RW_HS_TX_1 0x6202 #define CORE_DIG_DLANE_0_RW_HS_TX_2 0x6204 #define CORE_DIG_DLANE_0_RW_HS_TX_3 0x6206 #define CORE_DIG_DLANE_0_RW_HS_TX_4 0x6208 #define CORE_DIG_DLANE_0_RW_HS_TX_5 0x620a #define CORE_DIG_DLANE_0_RW_HS_TX_6 0x620c #define CORE_DIG_DLANE_0_RW_HS_TX_7 0x620e #define CORE_DIG_DLANE_0_RW_HS_TX_8 0x6210 #define CORE_DIG_DLANE_0_RW_HS_TX_9 0x6212 #define CORE_DIG_DLANE_0_RW_HS_TX_10 0x6214 #define CORE_DIG_DLANE_0_RW_HS_TX_11 0x6216 #define CORE_DIG_DLANE_0_RW_HS_TX_12 0x6218 #define CORE_DIG_DLANE_1_RW_CFG_0 0x6400 #define CORE_DIG_DLANE_1_RW_CFG_1 0x6402 #define CORE_DIG_DLANE_1_RW_CFG_2 0x6404 #define CORE_DIG_DLANE_1_RW_LP_0 0x6480 #define CORE_DIG_DLANE_1_RW_LP_1 0x6482 #define CORE_DIG_DLANE_1_RW_LP_2 0x6484 #define CORE_DIG_DLANE_1_R_LP_0 0x64a0 #define CORE_DIG_DLANE_1_R_LP_1 0x64a2 #define CORE_DIG_DLANE_1_R_HS_TX_0 0x64e0 #define CORE_DIG_DLANE_1_RW_HS_RX_0 0x6500 #define CORE_DIG_DLANE_1_RW_HS_RX_1 0x6502 #define CORE_DIG_DLANE_1_RW_HS_RX_2 0x6504 #define CORE_DIG_DLANE_1_RW_HS_RX_3 0x6506 #define CORE_DIG_DLANE_1_RW_HS_RX_4 0x6508 #define CORE_DIG_DLANE_1_RW_HS_RX_5 0x650a #define CORE_DIG_DLANE_1_RW_HS_RX_6 0x650c #define CORE_DIG_DLANE_1_RW_HS_RX_7 0x650e #define CORE_DIG_DLANE_1_RW_HS_RX_8 0x6510 #define CORE_DIG_DLANE_1_RW_HS_RX_9 0x6512 #define CORE_DIG_DLANE_1_R_HS_RX_0 0x6520 #define CORE_DIG_DLANE_1_R_HS_RX_1 0x6522 #define CORE_DIG_DLANE_1_R_HS_RX_2 0x6524 #define CORE_DIG_DLANE_1_R_HS_RX_3 0x6526 #define CORE_DIG_DLANE_1_R_HS_RX_4 0x6528 #define CORE_DIG_DLANE_1_RW_HS_TX_0 0x6600 #define CORE_DIG_DLANE_1_RW_HS_TX_1 0x6602 #define CORE_DIG_DLANE_1_RW_HS_TX_2 0x6604 #define CORE_DIG_DLANE_1_RW_HS_TX_3 0x6606 #define CORE_DIG_DLANE_1_RW_HS_TX_4 0x6608 #define CORE_DIG_DLANE_1_RW_HS_TX_5 0x660a #define CORE_DIG_DLANE_1_RW_HS_TX_6 0x660c #define CORE_DIG_DLANE_1_RW_HS_TX_7 0x660e #define CORE_DIG_DLANE_1_RW_HS_TX_8 0x6610 #define CORE_DIG_DLANE_1_RW_HS_TX_9 0x6612 #define CORE_DIG_DLANE_1_RW_HS_TX_10 0x6614 #define CORE_DIG_DLANE_1_RW_HS_TX_11 0x6616 #define CORE_DIG_DLANE_1_RW_HS_TX_12 0x6618 #define CORE_DIG_DLANE_2_RW_CFG_0 0x6800 #define CORE_DIG_DLANE_2_RW_CFG_1 0x6802 #define CORE_DIG_DLANE_2_RW_CFG_2 0x6804 #define CORE_DIG_DLANE_2_RW_LP_0 0x6880 #define CORE_DIG_DLANE_2_RW_LP_1 0x6882 #define CORE_DIG_DLANE_2_RW_LP_2 0x6884 #define CORE_DIG_DLANE_2_R_LP_0 0x68a0 #define CORE_DIG_DLANE_2_R_LP_1 0x68a2 #define CORE_DIG_DLANE_2_R_HS_TX_0 0x68e0 #define CORE_DIG_DLANE_2_RW_HS_RX_0 0x6900 #define CORE_DIG_DLANE_2_RW_HS_RX_1 0x6902 #define CORE_DIG_DLANE_2_RW_HS_RX_2 0x6904 #define CORE_DIG_DLANE_2_RW_HS_RX_3 0x6906 #define CORE_DIG_DLANE_2_RW_HS_RX_4 0x6908 #define CORE_DIG_DLANE_2_RW_HS_RX_5 0x690a #define CORE_DIG_DLANE_2_RW_HS_RX_6 0x690c #define CORE_DIG_DLANE_2_RW_HS_RX_7 0x690e #define CORE_DIG_DLANE_2_RW_HS_RX_8 0x6910 #define CORE_DIG_DLANE_2_RW_HS_RX_9 0x6912 #define CORE_DIG_DLANE_2_R_HS_RX_0 0x6920 #define CORE_DIG_DLANE_2_R_HS_RX_1 0x6922 #define CORE_DIG_DLANE_2_R_HS_RX_2 0x6924 #define CORE_DIG_DLANE_2_R_HS_RX_3 0x6926 #define CORE_DIG_DLANE_2_R_HS_RX_4 0x6928 #define CORE_DIG_DLANE_2_RW_HS_TX_0 0x6a00 #define CORE_DIG_DLANE_2_RW_HS_TX_1 0x6a02 #define CORE_DIG_DLANE_2_RW_HS_TX_2 0x6a04 #define CORE_DIG_DLANE_2_RW_HS_TX_3 0x6a06 #define CORE_DIG_DLANE_2_RW_HS_TX_4 0x6a08 #define CORE_DIG_DLANE_2_RW_HS_TX_5 0x6a0a #define CORE_DIG_DLANE_2_RW_HS_TX_6 0x6a0c #define CORE_DIG_DLANE_2_RW_HS_TX_7 0x6a0e #define CORE_DIG_DLANE_2_RW_HS_TX_8 0x6a10 #define CORE_DIG_DLANE_2_RW_HS_TX_9 0x6a12 #define CORE_DIG_DLANE_2_RW_HS_TX_10 0x6a14 #define CORE_DIG_DLANE_2_RW_HS_TX_11 0x6a16 #define CORE_DIG_DLANE_2_RW_HS_TX_12 0x6a18 #define CORE_DIG_DLANE_3_RW_CFG_0 0x6c00 #define CORE_DIG_DLANE_3_RW_CFG_1 0x6c02 #define CORE_DIG_DLANE_3_RW_CFG_2 0x6c04 #define CORE_DIG_DLANE_3_RW_LP_0 0x6c80 #define CORE_DIG_DLANE_3_RW_LP_1 0x6c82 #define CORE_DIG_DLANE_3_RW_LP_2 0x6c84 #define CORE_DIG_DLANE_3_R_LP_0 0x6ca0 #define CORE_DIG_DLANE_3_R_LP_1 0x6ca2 #define CORE_DIG_DLANE_3_R_HS_TX_0 0x6ce0 #define CORE_DIG_DLANE_3_RW_HS_RX_0 0x6d00 #define CORE_DIG_DLANE_3_RW_HS_RX_1 0x6d02 #define CORE_DIG_DLANE_3_RW_HS_RX_2 0x6d04 #define CORE_DIG_DLANE_3_RW_HS_RX_3 0x6d06 #define CORE_DIG_DLANE_3_RW_HS_RX_4 0x6d08 #define CORE_DIG_DLANE_3_RW_HS_RX_5 0x6d0a #define CORE_DIG_DLANE_3_RW_HS_RX_6 0x6d0c #define CORE_DIG_DLANE_3_RW_HS_RX_7 0x6d0e #define CORE_DIG_DLANE_3_RW_HS_RX_8 0x6d10 #define CORE_DIG_DLANE_3_RW_HS_RX_9 0x6d12 #define CORE_DIG_DLANE_3_R_HS_RX_0 0x6d20 #define CORE_DIG_DLANE_3_R_HS_RX_1 0x6d22 #define CORE_DIG_DLANE_3_R_HS_RX_2 0x6d24 #define CORE_DIG_DLANE_3_R_HS_RX_3 0x6d26 #define CORE_DIG_DLANE_3_R_HS_RX_4 0x6d28 #define CORE_DIG_DLANE_3_RW_HS_TX_0 0x6e00 #define CORE_DIG_DLANE_3_RW_HS_TX_1 0x6e02 #define CORE_DIG_DLANE_3_RW_HS_TX_2 0x6e04 #define CORE_DIG_DLANE_3_RW_HS_TX_3 0x6e06 #define CORE_DIG_DLANE_3_RW_HS_TX_4 0x6e08 #define CORE_DIG_DLANE_3_RW_HS_TX_5 0x6e0a #define CORE_DIG_DLANE_3_RW_HS_TX_6 0x6e0c #define CORE_DIG_DLANE_3_RW_HS_TX_7 0x6e0e #define CORE_DIG_DLANE_3_RW_HS_TX_8 0x6e10 #define CORE_DIG_DLANE_3_RW_HS_TX_9 0x6e12 #define CORE_DIG_DLANE_3_RW_HS_TX_10 0x6e14 #define CORE_DIG_DLANE_3_RW_HS_TX_11 0x6e16 #define CORE_DIG_DLANE_3_RW_HS_TX_12 0x6e18 #define CORE_DIG_DLANE_CLK_RW_CFG_0 0x7000 #define CORE_DIG_DLANE_CLK_RW_CFG_1 0x7002 #define CORE_DIG_DLANE_CLK_RW_CFG_2 0x7004 #define CORE_DIG_DLANE_CLK_RW_LP_0 0x7080 #define CORE_DIG_DLANE_CLK_RW_LP_1 0x7082 #define CORE_DIG_DLANE_CLK_RW_LP_2 0x7084 #define CORE_DIG_DLANE_CLK_R_LP_0 0x70a0 #define CORE_DIG_DLANE_CLK_R_LP_1 0x70a2 #define CORE_DIG_DLANE_CLK_R_HS_TX_0 0x70e0 #define CORE_DIG_DLANE_CLK_RW_HS_RX_0 0x7100 #define CORE_DIG_DLANE_CLK_RW_HS_RX_1 0x7102 #define CORE_DIG_DLANE_CLK_RW_HS_RX_2 0x7104 #define CORE_DIG_DLANE_CLK_RW_HS_RX_3 0x7106 #define CORE_DIG_DLANE_CLK_RW_HS_RX_4 0x7108 #define CORE_DIG_DLANE_CLK_RW_HS_RX_5 0x710a #define CORE_DIG_DLANE_CLK_RW_HS_RX_6 0x710c #define CORE_DIG_DLANE_CLK_RW_HS_RX_7 0x710e #define CORE_DIG_DLANE_CLK_RW_HS_RX_8 0x7110 #define CORE_DIG_DLANE_CLK_RW_HS_RX_9 0x7112 #define CORE_DIG_DLANE_CLK_R_HS_RX_0 0x7120 #define CORE_DIG_DLANE_CLK_R_HS_RX_1 0x7122 #define CORE_DIG_DLANE_CLK_R_HS_RX_2 0x7124 #define CORE_DIG_DLANE_CLK_R_HS_RX_3 0x7126 #define CORE_DIG_DLANE_CLK_R_HS_RX_4 0x7128 #define CORE_DIG_DLANE_CLK_RW_HS_TX_0 0x7200 #define CORE_DIG_DLANE_CLK_RW_HS_TX_1 0x7202 #define CORE_DIG_DLANE_CLK_RW_HS_TX_2 0x7204 #define CORE_DIG_DLANE_CLK_RW_HS_TX_3 0x7206 #define CORE_DIG_DLANE_CLK_RW_HS_TX_4 0x7208 #define CORE_DIG_DLANE_CLK_RW_HS_TX_5 0x720a #define CORE_DIG_DLANE_CLK_RW_HS_TX_6 0x720c #define CORE_DIG_DLANE_CLK_RW_HS_TX_7 0x720e #define CORE_DIG_DLANE_CLK_RW_HS_TX_8 0x7210 #define CORE_DIG_DLANE_CLK_RW_HS_TX_9 0x7212 #define CORE_DIG_DLANE_CLK_RW_HS_TX_10 0x7214 #define CORE_DIG_DLANE_CLK_RW_HS_TX_11 0x7216 #define CORE_DIG_DLANE_CLK_RW_HS_TX_12 0x7218 #define PPI_RW_CPHY_TRIO0_LBERT_0 0x8000 #define PPI_RW_CPHY_TRIO0_LBERT_1 0x8002 #define PPI_R_CPHY_TRIO0_LBERT_0 0x8004 #define PPI_R_CPHY_TRIO0_LBERT_1 0x8006 #define PPI_RW_CPHY_TRIO0_SPARE 0x8008 #define PPI_RW_CPHY_TRIO1_LBERT_0 0x8400 #define PPI_RW_CPHY_TRIO1_LBERT_1 0x8402 #define PPI_R_CPHY_TRIO1_LBERT_0 0x8404 #define PPI_R_CPHY_TRIO1_LBERT_1 0x8406 #define PPI_RW_CPHY_TRIO1_SPARE 0x8408 #define PPI_RW_CPHY_TRIO2_LBERT_0 0x8800 #define PPI_RW_CPHY_TRIO2_LBERT_1 0x8802 #define PPI_R_CPHY_TRIO2_LBERT_0 0x8804 #define PPI_R_CPHY_TRIO2_LBERT_1 0x8806 #define PPI_RW_CPHY_TRIO2_SPARE 0x8808 #define CORE_DIG_CLANE_0_RW_CFG_0 0xa000 #define CORE_DIG_CLANE_0_RW_CFG_2 0xa004 #define CORE_DIG_CLANE_0_RW_LP_0 0xa080 #define CORE_DIG_CLANE_0_RW_LP_1 0xa082 #define CORE_DIG_CLANE_0_RW_LP_2 0xa084 #define CORE_DIG_CLANE_0_R_LP_0 0xa0a0 #define CORE_DIG_CLANE_0_R_LP_1 0xa0a2 #define CORE_DIG_CLANE_0_RW_HS_RX_0 0xa100 #define CORE_DIG_CLANE_0_RW_HS_RX_1 0xa102 #define CORE_DIG_CLANE_0_RW_HS_RX_2 0xa104 #define CORE_DIG_CLANE_0_RW_HS_RX_3 0xa106 #define CORE_DIG_CLANE_0_RW_HS_RX_4 0xa108 #define CORE_DIG_CLANE_0_RW_HS_RX_5 0xa10a #define CORE_DIG_CLANE_0_RW_HS_RX_6 0xa10c #define CORE_DIG_CLANE_0_R_RX_0 0xa120 #define CORE_DIG_CLANE_0_R_RX_1 0xa122 #define CORE_DIG_CLANE_0_R_TX_0 0xa124 #define CORE_DIG_CLANE_0_R_RX_2 0xa126 #define CORE_DIG_CLANE_0_R_RX_3 0xa128 #define CORE_DIG_CLANE_0_RW_HS_TX_0 0xa200 #define CORE_DIG_CLANE_0_RW_HS_TX_1 0xa202 #define CORE_DIG_CLANE_0_RW_HS_TX_2 0xa204 #define CORE_DIG_CLANE_0_RW_HS_TX_3 0xa206 #define CORE_DIG_CLANE_0_RW_HS_TX_4 0xa208 #define CORE_DIG_CLANE_0_RW_HS_TX_5 0xa20a #define CORE_DIG_CLANE_0_RW_HS_TX_6 0xa20c #define CORE_DIG_CLANE_0_RW_HS_TX_7 0xa20e #define CORE_DIG_CLANE_0_RW_HS_TX_8 0xa210 #define CORE_DIG_CLANE_0_RW_HS_TX_9 0xa212 #define CORE_DIG_CLANE_0_RW_HS_TX_10 0xa214 #define CORE_DIG_CLANE_0_RW_HS_TX_11 0xa216 #define CORE_DIG_CLANE_0_RW_HS_TX_12 0xa218 #define CORE_DIG_CLANE_0_RW_HS_TX_13 0xa21a #define CORE_DIG_CLANE_1_RW_CFG_0 0xa400 #define CORE_DIG_CLANE_1_RW_CFG_2 0xa404 #define CORE_DIG_CLANE_1_RW_LP_0 0xa480 #define CORE_DIG_CLANE_1_RW_LP_1 0xa482 #define CORE_DIG_CLANE_1_RW_LP_2 0xa484 #define CORE_DIG_CLANE_1_R_LP_0 0xa4a0 #define CORE_DIG_CLANE_1_R_LP_1 0xa4a2 #define CORE_DIG_CLANE_1_RW_HS_RX_0 0xa500 #define CORE_DIG_CLANE_1_RW_HS_RX_1 0xa502 #define CORE_DIG_CLANE_1_RW_HS_RX_2 0xa504 #define CORE_DIG_CLANE_1_RW_HS_RX_3 0xa506 #define CORE_DIG_CLANE_1_RW_HS_RX_4 0xa508 #define CORE_DIG_CLANE_1_RW_HS_RX_5 0xa50a #define CORE_DIG_CLANE_1_RW_HS_RX_6 0xa50c #define CORE_DIG_CLANE_1_R_RX_0 0xa520 #define CORE_DIG_CLANE_1_R_RX_1 0xa522 #define CORE_DIG_CLANE_1_R_TX_0 0xa524 #define CORE_DIG_CLANE_1_R_RX_2 0xa526 #define CORE_DIG_CLANE_1_R_RX_3 0xa528 #define CORE_DIG_CLANE_1_RW_HS_TX_0 0xa600 #define CORE_DIG_CLANE_1_RW_HS_TX_1 0xa602 #define CORE_DIG_CLANE_1_RW_HS_TX_2 0xa604 #define CORE_DIG_CLANE_1_RW_HS_TX_3 0xa606 #define CORE_DIG_CLANE_1_RW_HS_TX_4 0xa608 #define CORE_DIG_CLANE_1_RW_HS_TX_5 0xa60a #define CORE_DIG_CLANE_1_RW_HS_TX_6 0xa60c #define CORE_DIG_CLANE_1_RW_HS_TX_7 0xa60e #define CORE_DIG_CLANE_1_RW_HS_TX_8 0xa610 #define CORE_DIG_CLANE_1_RW_HS_TX_9 0xa612 #define CORE_DIG_CLANE_1_RW_HS_TX_10 0xa614 #define CORE_DIG_CLANE_1_RW_HS_TX_11 0xa616 #define CORE_DIG_CLANE_1_RW_HS_TX_12 0xa618 #define CORE_DIG_CLANE_1_RW_HS_TX_13 0xa61a #define CORE_DIG_CLANE_2_RW_CFG_0 0xa800 #define CORE_DIG_CLANE_2_RW_CFG_2 0xa804 #define CORE_DIG_CLANE_2_RW_LP_0 0xa880 #define CORE_DIG_CLANE_2_RW_LP_1 0xa882 #define CORE_DIG_CLANE_2_RW_LP_2 0xa884 #define CORE_DIG_CLANE_2_R_LP_0 0xa8a0 #define CORE_DIG_CLANE_2_R_LP_1 0xa8a2 #define CORE_DIG_CLANE_2_RW_HS_RX_0 0xa900 #define CORE_DIG_CLANE_2_RW_HS_RX_1 0xa902 #define CORE_DIG_CLANE_2_RW_HS_RX_2 0xa904 #define CORE_DIG_CLANE_2_RW_HS_RX_3 0xa906 #define CORE_DIG_CLANE_2_RW_HS_RX_4 0xa908 #define CORE_DIG_CLANE_2_RW_HS_RX_5 0xa90a #define CORE_DIG_CLANE_2_RW_HS_RX_6 0xa90c #define CORE_DIG_CLANE_2_R_RX_0 0xa920 #define CORE_DIG_CLANE_2_R_RX_1 0xa922 #define CORE_DIG_CLANE_2_R_TX_0 0xa924 #define CORE_DIG_CLANE_2_R_RX_2 0xa926 #define CORE_DIG_CLANE_2_R_RX_3 0xa928 #define CORE_DIG_CLANE_2_RW_HS_TX_0 0xaa00 #define CORE_DIG_CLANE_2_RW_HS_TX_1 0xaa02 #define CORE_DIG_CLANE_2_RW_HS_TX_2 0xaa04 #define CORE_DIG_CLANE_2_RW_HS_TX_3 0xaa06 #define CORE_DIG_CLANE_2_RW_HS_TX_4 0xaa08 #define CORE_DIG_CLANE_2_RW_HS_TX_5 0xaa0a #define CORE_DIG_CLANE_2_RW_HS_TX_6 0xaa0c #define CORE_DIG_CLANE_2_RW_HS_TX_7 0xaa0e #define CORE_DIG_CLANE_2_RW_HS_TX_8 0xaa10 #define CORE_DIG_CLANE_2_RW_HS_TX_9 0xaa12 #define CORE_DIG_CLANE_2_RW_HS_TX_10 0xaa14 #define CORE_DIG_CLANE_2_RW_HS_TX_11 0xaa16 #define CORE_DIG_CLANE_2_RW_HS_TX_12 0xaa18 #define CORE_DIG_CLANE_2_RW_HS_TX_13 0xaa1a /* dwc csi host controller registers */ #define IS_IO_CSI2_HOST_BASE(i) (IS_IO_BASE + 0x40000 + \ 0x2000 * (i)) #define VERSION 0x0 #define N_LANES 0x4 #define CSI2_RESETN 0x8 #define INT_ST_MAIN 0xc #define DATA_IDS_1 0x10 #define DATA_IDS_2 0x14 #define CDPHY_MODE 0x1c #define DATA_IDS_VC_1 0x30 #define DATA_IDS_VC_2 0x34 #define PHY_SHUTDOWNZ 0x40 #define DPHY_RSTZ 0x44 #define PHY_RX 0x48 #define PHY_STOPSTATE 0x4c #define PHY_TEST_CTRL0 0x50 #define PHY_TEST_CTRL1 0x54 #define PPI_PG_PATTERN_VRES 0x60 #define PPI_PG_PATTERN_HRES 0x64 #define PPI_PG_CONFIG 0x68 #define PPI_PG_ENABLE 0x6c #define PPI_PG_STATUS 0x70 #define VC_EXTENSION 0xc8 #define PHY_CAL 0xcc #define INT_ST_PHY_FATAL 0xe0 #define INT_MSK_PHY_FATAL 0xe4 #define INT_FORCE_PHY_FATAL 0xe8 #define INT_ST_PKT_FATAL 0xf0 #define INT_MSK_PKT_FATAL 0xf4 #define INT_FORCE_PKT_FATAL 0xf8 #define INT_ST_PHY 0x110 #define INT_MSK_PHY 0x114 #define INT_FORCE_PHY 0x118 #define INT_ST_LINE 0x130 #define INT_MSK_LINE 0x134 #define INT_FORCE_LINE 0x138 #define INT_ST_BNDRY_FRAME_FATAL 0x280 #define INT_MSK_BNDRY_FRAME_FATAL 0x284 #define INT_FORCE_BNDRY_FRAME_FATAL 0x288 #define INT_ST_SEQ_FRAME_FATAL 0x290 #define INT_MSK_SEQ_FRAME_FATAL 0x294 #define INT_FORCE_SEQ_FRAME_FATAL 0x298 #define INT_ST_CRC_FRAME_FATAL 0x2a0 #define INT_MSK_CRC_FRAME_FATAL 0x2a4 #define INT_FORCE_CRC_FRAME_FATAL 0x2a8 #define INT_ST_PLD_CRC_FATAL 0x2b0 #define INT_MSK_PLD_CRC_FATAL 0x2b4 #define INT_FORCE_PLD_CRC_FATAL 0x2b8 #define INT_ST_DATA_ID 0x2c0 #define INT_MSK_DATA_ID 0x2c4 #define INT_FORCE_DATA_ID 0x2c8 #define INT_ST_ECC_CORRECTED 0x2d0 #define INT_MSK_ECC_CORRECTED 0x2d4 #define INT_FORCE_ECC_CORRECTED 0x2d8 #define SCRAMBLING 0x300 #define SCRAMBLING_SEED1 0x304 #define SCRAMBLING_SEED2 0x308 #define SCRAMBLING_SEED3 0x30c #define SCRAMBLING_SEED4 0x310 #define SCRAMBLING 0x300 #define IS_IO_CSI2_ADPL_PORT_BASE(i) (IS_IO_BASE + 0x40800 + \ 0x2000 * (i)) #define CSI2_ADPL_INPUT_MODE 0x0 #define CSI2_ADPL_CSI_RX_ERR_IRQ_CLEAR_EN 0x4 #define CSI2_ADPL_CSI_RX_ERR_IRQ_CLEAR_ADDR 0x8 #define CSI2_ADPL_CSI_RX_ERR_IRQ_STATUS 0xc #define CSI2_ADPL_IRQ_CTL_COMMON_STATUS 0xa4 #define CSI2_ADPL_IRQ_CTL_COMMON_CLEAR 0xa8 #define CSI2_ADPL_IRQ_CTL_COMMON_ENABLE 0xac #define CSI2_ADPL_IRQ_CTL_FS_STATUS 0xbc #define CSI2_ADPL_IRQ_CTL_FS_CLEAR 0xc0 #define CSI2_ADPL_IRQ_CTL_FS_ENABLE 0xc4 #define CSI2_ADPL_IRQ_CTL_FE_STATUS 0xc8 #define CSI2_ADPL_IRQ_CTL_FE_CLEAR 0xcc #define CSI2_ADPL_IRQ_CTL_FE_ENABLE 0xd0 /* software control the legacy csi irq */ #define IS_IO_CSI2_ERR_LEGACY_IRQ_CTL_BASE(i) (IS_IO_BASE + 0x40c00 + \ 0x2000 * (i)) #define IS_IO_CSI2_SYNC_LEGACY_IRQ_CTL_BASE(i) (IS_IO_BASE + 0x40d00 + \ 0x2000 * (i)) #define IS_IO_CSI2_LEGACY_IRQ_CTRL_BASE (IS_IO_BASE + 0x49000) #define IS_IO_CSI2_IRQ_CTRL_BASE (IS_IO_BASE + 0x4e100) #define IRQ_CTL_EDGE 0x0 #define IRQ_CTL_MASK 0x4 #define IRQ_CTL_STATUS 0x8 #define IRQ_CTL_CLEAR 0xc #define IRQ_CTL_ENABLE 0x10 /* FE irq for PTL */ #define IRQ1_CTL_MASK 0x14 #define IRQ1_CTL_STATUS 0x18 #define IRQ1_CTL_CLEAR 0x1c #define IRQ1_CTL_ENABLE 0x20 /* software to set the clock gate to use the port or mgc */ #define IS_IO_GPREGS_BASE (IS_IO_BASE + 0x49200) #define SRST_PORT_ARB 0x0 #define SRST_MGC 0x4 #define SRST_WIDTH_CONV 0x8 #define SRST_CSI_IRQ 0xc #define SRST_CSI_LEGACY_IRQ 0x10 #define CLK_EN_TXCLKESC 0x14 #define CLK_DIV_FACTOR_IS_CLK 0x18 #define CLK_DIV_FACTOR_APB_CLK 0x1c #define CSI_PORT_CLK_GATE 0x20 #define CSI_PORTAB_AGGREGATION 0x24 #define MGC_CLK_GATE 0x2c #define CG_CTRL_BITS 0x3c #define SPARE_RW 0xf8 #define SPARE_RO 0xfc #define IS_IO_CSI2_MPF_PORT_BASE(i) (IS_IO_BASE + 0x53000 + \ 0x1000 * (i)) #define MPF_16_IRQ_CNTRL_STATUS 0x238 #define MPF_16_IRQ_CNTRL_CLEAR 0x23c #define MPF_16_IRQ_CNTRL_ENABLE 0x240 /* software config the phy */ #define IS_IO_CSI2_GPREGS_BASE(i) (IS_IO_BASE + 0x53400 + \ 0x1000 * (i)) #define CSI_ADAPT_LAYER_SRST 0x0 #define MPF_SRST_RST 0x4 #define CSI_ERR_IRQ_CTRL_SRST 0x8 #define CSI_SYNC_RC_SRST 0xc #define CSI_CG_CTRL_BITS 0x10 #define SOC_CSI2HOST_SELECT 0x14 #define PHY_RESET 0x18 #define PHY_SHUTDOWN 0x1c #define PHY_MODE 0x20 #define PHY_READY 0x24 #define PHY_CLK_LANE_FORCE_CONTROL 0x28 #define PHY_CLK_LANE_CONTROL 0x2c #define PHY_CLK_LANE_STATUS 0x30 #define PHY_LANE_RX_ESC_REQ 0x34 #define PHY_LANE_RX_ESC_DATA 0x38 #define PHY_LANE_TURNDISABLE 0x3c #define PHY_LANE_DIRECTION 0x40 #define PHY_LANE_FORCE_CONTROL 0x44 #define PHY_LANE_CONTROL_EN 0x48 #define PHY_LANE_CONTROL_DATAWIDTH 0x4c #define PHY_LANE_STATUS 0x50 #define PHY_LANE_ERR 0x54 #define PHY_LANE_RXALP 0x58 #define PHY_LANE_RXALP_NIBBLE 0x5c #define PHY_PARITY_ERROR 0x60 #define PHY_DEBUG_REGS_CLK_GATE_EN 0x64 #define SPARE_RW 0xf8 #define SPARE_RO 0xfc /* software not touch */ #define PORT_ARB_BASE (IS_IO_BASE + 0x4e000) #define PORT_ARB_IRQ_CTL_STATUS 0x4 #define PORT_ARB_IRQ_CTL_CLEAR 0x8 #define PORT_ARB_IRQ_CTL_ENABLE 0xc #define MGC_PPC 4 #define MGC_DTYPE_RAW(i) (((i) - 8) / 2) #define IS_IO_MGC_BASE (IS_IO_BASE + 0x48000) #define MGC_KICK 0x0 #define MGC_ASYNC_STOP 0x4 #define MGC_PORT_OFFSET 0x100 #define MGC_CSI_PORT_MAP(i) (0x8 + (i) * 0x4) #define MGC_MG_PORT(i) (IS_IO_MGC_BASE + \ (i) * MGC_PORT_OFFSET) /* per mgc instance */ #define MGC_MG_CSI_ADAPT_LAYER_TYPE 0x28 #define MGC_MG_MODE 0x2c #define MGC_MG_INIT_COUNTER 0x30 #define MGC_MG_MIPI_VC 0x34 #define MGC_MG_MIPI_DTYPES 0x38 #define MGC_MG_MULTI_DTYPES_MODE 0x3c #define MGC_MG_NOF_FRAMES 0x40 #define MGC_MG_FRAME_DIM 0x44 #define MGC_MG_HBLANK 0x48 #define MGC_MG_VBLANK 0x4c #define MGC_MG_TPG_MODE 0x50 #define MGC_MG_TPG_R0 0x54 #define MGC_MG_TPG_G0 0x58 #define MGC_MG_TPG_B0 0x5c #define MGC_MG_TPG_R1 0x60 #define MGC_MG_TPG_G1 0x64 #define MGC_MG_TPG_B1 0x68 #define MGC_MG_TPG_FACTORS 0x6c #define MGC_MG_TPG_MASKS 0x70 #define MGC_MG_TPG_XY_MASK 0x74 #define MGC_MG_TPG_TILE_DIM 0x78 #define MGC_MG_PRBS_LFSR_INIT_0 0x7c #define MGC_MG_PRBS_LFSR_INIT_1 0x80 #define MGC_MG_SYNC_STOP_POINT 0x84 #define MGC_MG_SYNC_STOP_POINT_LOC 0x88 #define MGC_MG_ERR_INJECT 0x8c #define MGC_MG_ERR_LOCATION 0x90 #define MGC_MG_DTO_SPEED_CTRL_EN 0x94 #define MGC_MG_DTO_SPEED_CTRL_INCR_VAL 0x98 #define MGC_MG_HOR_LOC_STTS 0x9c #define MGC_MG_VER_LOC_STTS 0xa0 #define MGC_MG_FRAME_NUM_STTS 0xa4 #define MGC_MG_BUSY_STTS 0xa8 #define MGC_MG_STOPPED_STTS 0xac /* tile width and height in pixels for Chess board and Color palette */ #define MGC_TPG_TILE_WIDTH 64 #define MGC_TPG_TILE_HEIGHT 64 #define IPU_CSI_PORT_A_ADDR_OFFSET 0x0 #define IPU_CSI_PORT_B_ADDR_OFFSET 0x0 #define IPU_CSI_PORT_C_ADDR_OFFSET 0x0 #define IPU_CSI_PORT_D_ADDR_OFFSET 0x0 /* * 0 - CSI RX Port 0 interrupt; * 1 - MPF Port 0 interrupt; * 2 - CSI RX Port 1 interrupt; * 3 - MPF Port 1 interrupt; * 4 - CSI RX Port 2 interrupt; * 5 - MPF Port 2 interrupt; * 6 - CSI RX Port 3 interrupt; * 7 - MPF Port 3 interrupt; * 8 - Port ARB FIFO 0 overflow; * 9 - Port ARB FIFO 1 overflow; * 10 - Port ARB FIFO 2 overflow; * 11 - Port ARB FIFO 3 overflow; * 12 - isys_cfgnoc_err_probe_intl; * 13-15 - reserved */ #define IPU7_CSI_IS_IO_IRQ_MASK 0xffff /* Adapter layer irq */ #define IPU7_CSI_ADPL_IRQ_MASK 0xffff /* sw irq from legacy irq control * legacy irq status * IPU7 * 0 - CSI Port 0 error interrupt * 1 - CSI Port 0 sync interrupt * 2 - CSI Port 1 error interrupt * 3 - CSI Port 1 sync interrupt * 4 - CSI Port 2 error interrupt * 5 - CSI Port 2 sync interrupt * 6 - CSI Port 3 error interrupt * 7 - CSI Port 3 sync interrupt * IPU7P5 * 0 - CSI Port 0 error interrupt * 1 - CSI Port 0 fs interrupt * 2 - CSI Port 0 fe interrupt * 3 - CSI Port 1 error interrupt * 4 - CSI Port 1 fs interrupt * 5 - CSI Port 1 fe interrupt * 6 - CSI Port 2 error interrupt * 7 - CSI Port 2 fs interrupt * 8 - CSI Port 2 fe interrupt */ #define IPU7_CSI_RX_LEGACY_IRQ_MASK 0x1ff /* legacy error status per port * 0 - Error handler FIFO full; * 1 - Reserved Short Packet encoding detected; * 2 - Reserved Long Packet encoding detected; * 3 - Received packet is too short (fewer data words than specified in header); * 4 - Received packet is too long (more data words than specified in header); * 5 - Short packet discarded due to errors; * 6 - Long packet discarded due to errors; * 7 - CSI Combo Rx interrupt; * 8 - IDI CDC FIFO overflow; remaining bits are reserved and tied to 0; */ #define IPU7_CSI_RX_ERROR_IRQ_MASK 0xfff /* * 0 - VC0 frame start received * 1 - VC0 frame end received * 2 - VC1 frame start received * 3 - VC1 frame end received * 4 - VC2 frame start received * 5 - VC2 frame end received * 6 - VC3 frame start received * 7 - VC3 frame end received * 8 - VC4 frame start received * 9 - VC4 frame end received * 10 - VC5 frame start received * 11 - VC5 frame end received * 12 - VC6 frame start received * 13 - VC6 frame end received * 14 - VC7 frame start received * 15 - VC7 frame end received * 16 - VC8 frame start received * 17 - VC8 frame end received * 18 - VC9 frame start received * 19 - VC9 frame end received * 20 - VC10 frame start received * 21 - VC10 frame end received * 22 - VC11 frame start received * 23 - VC11 frame end received * 24 - VC12 frame start received * 25 - VC12 frame end received * 26 - VC13 frame start received * 27 - VC13 frame end received * 28 - VC14 frame start received * 29 - VC14 frame end received * 30 - VC15 frame start received * 31 - VC15 frame end received */ #define IPU7_CSI_RX_SYNC_IRQ_MASK 0 /* 0xffffffff */ #define IPU7P5_CSI_RX_SYNC_FE_IRQ_MASK 0 /* 0xffffffff */ #define CSI_RX_NUM_ERRORS_IN_IRQ 12 #define CSI_RX_NUM_SYNC_IN_IRQ 32 enum CSI_FE_MODE_TYPE { CSI_FE_DPHY_MODE = 0, CSI_FE_CPHY_MODE = 1, }; enum CSI_FE_INPUT_MODE { CSI_SENSOR_INPUT = 0, CSI_MIPIGEN_INPUT = 1, }; enum MGC_CSI_ADPL_TYPE { MGC_MAPPED_2_LANES = 0, MGC_MAPPED_4_LANES = 1, }; enum CSI2HOST_SELECTION { CSI2HOST_SEL_SOC = 0, CSI2HOST_SEL_CSI2HOST = 1, }; #define IPU7_ISYS_LEGACY_IRQ_CSI2(port) (0x3 << (port)) #define IPU7P5_ISYS_LEGACY_IRQ_CSI2(port) (0x7 << (port)) /* ---------------------------------------------------------------- */ #define CSI_REG_BASE 0x220000 #define CSI_REG_BASE_PORT(id) ((id) * 0x1000) /* CSI Port Genral Purpose Registers */ #define CSI_REG_PORT_GPREG_SRST 0x0 #define CSI_REG_PORT_GPREG_CSI2_SLV_REG_SRST 0x4 #define CSI_REG_PORT_GPREG_CSI2_PORT_CONTROL 0x8 #define CSI_RX_NUM_IRQ 32 /* TODO: support multiple VCs */ #define IPU_CSI_RX_SYNC_FS_VC BIT(0) #define IPU_CSI_RX_SYNC_FE_VC BIT(1) #define IPU7P5_CSI_RX_SYNC_FE_VC BIT(0) #endif /* IPU7_ISYS_CSI2_REG_H */ ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/ipu7-isys-csi2.c000066400000000000000000000306751465530421300271420ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013 - 2024 Intel Corporation */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ipu7.h" #include "ipu7-bus.h" #include "ipu7-isys.h" #include "ipu7-isys-csi2.h" #include "ipu7-isys-csi2-regs.h" #include "ipu7-isys-csi-phy.h" static const u32 csi2_supported_codes[] = { MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_RGB565_1X16, MEDIA_BUS_FMT_RGB888_1X24, MEDIA_BUS_FMT_UYVY8_1X16, MEDIA_BUS_FMT_YUYV8_1X16, MEDIA_BUS_FMT_YUYV10_1X20, MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8, 0, }; s64 ipu7_isys_csi2_get_link_freq(struct ipu7_isys_csi2 *csi2) { struct media_pad *src_pad; struct v4l2_subdev *ext_sd; struct device *dev; if (!csi2) return -EINVAL; dev = &csi2->isys->adev->auxdev.dev; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0) src_pad = media_entity_remote_pad(csi2->asd.sd.entity.pads); #else src_pad = media_entity_remote_source_pad_unique(&csi2->asd.sd.entity); #endif if (IS_ERR(src_pad)) { dev_err(dev, "can't get source pad of %s (%ld)\n", csi2->asd.sd.name, PTR_ERR(src_pad)); return PTR_ERR(src_pad); } ext_sd = media_entity_to_v4l2_subdev(src_pad->entity); if (WARN(!ext_sd, "Failed to get subdev for %s\n", csi2->asd.sd.name)) return -ENODEV; return v4l2_get_link_freq(ext_sd->ctrl_handler, 0, 0); } static int csi2_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh, struct v4l2_event_subscription *sub) { struct ipu7_isys_subdev *asd = to_ipu7_isys_subdev(sd); struct ipu7_isys_csi2 *csi2 = to_ipu7_isys_csi2(asd); struct device *dev = &csi2->isys->adev->auxdev.dev; dev_dbg(dev, "csi2 subscribe event(type %u id %u)\n", sub->type, sub->id); switch (sub->type) { case V4L2_EVENT_FRAME_SYNC: return v4l2_event_subscribe(fh, sub, 10, NULL); case V4L2_EVENT_CTRL: return v4l2_ctrl_subscribe_event(fh, sub); default: return -EINVAL; } } static const struct v4l2_subdev_core_ops csi2_sd_core_ops = { .subscribe_event = csi2_subscribe_event, .unsubscribe_event = v4l2_event_subdev_unsubscribe, }; static void csi2_irq_en(struct ipu7_isys_csi2 *csi2, bool enable) { struct ipu7_device *isp = csi2->isys->adev->isp; unsigned int offset, mask; if (!enable) { /* disable CSI2 legacy error irq */ offset = IS_IO_CSI2_ERR_LEGACY_IRQ_CTL_BASE(csi2->port); mask = IPU7_CSI_RX_ERROR_IRQ_MASK; writel(mask, csi2->base + offset + IRQ_CTL_CLEAR); writel(0, csi2->base + offset + IRQ_CTL_MASK); writel(0, csi2->base + offset + IRQ_CTL_ENABLE); /* disable CSI2 legacy sync irq */ offset = IS_IO_CSI2_SYNC_LEGACY_IRQ_CTL_BASE(csi2->port); mask = IPU7_CSI_RX_SYNC_IRQ_MASK; writel(mask, csi2->base + offset + IRQ_CTL_CLEAR); writel(0, csi2->base + offset + IRQ_CTL_MASK); writel(0, csi2->base + offset + IRQ_CTL_ENABLE); if (is_ipu7p5(isp->hw_ver)) { writel(mask, csi2->base + offset + IRQ1_CTL_CLEAR); writel(0, csi2->base + offset + IRQ1_CTL_MASK); writel(0, csi2->base + offset + IRQ1_CTL_ENABLE); } return; } /* enable CSI2 legacy error irq */ offset = IS_IO_CSI2_ERR_LEGACY_IRQ_CTL_BASE(csi2->port); mask = IPU7_CSI_RX_ERROR_IRQ_MASK; writel(mask, csi2->base + offset + IRQ_CTL_CLEAR); writel(mask, csi2->base + offset + IRQ_CTL_MASK); writel(mask, csi2->base + offset + IRQ_CTL_ENABLE); /* enable CSI2 legacy sync irq */ offset = IS_IO_CSI2_SYNC_LEGACY_IRQ_CTL_BASE(csi2->port); mask = IPU7_CSI_RX_SYNC_IRQ_MASK; writel(mask, csi2->base + offset + IRQ_CTL_CLEAR); writel(mask, csi2->base + offset + IRQ_CTL_MASK); writel(mask, csi2->base + offset + IRQ_CTL_ENABLE); mask = IPU7P5_CSI_RX_SYNC_FE_IRQ_MASK; if (is_ipu7p5(isp->hw_ver)) { writel(mask, csi2->base + offset + IRQ1_CTL_CLEAR); writel(mask, csi2->base + offset + IRQ1_CTL_MASK); writel(mask, csi2->base + offset + IRQ1_CTL_ENABLE); } } static void ipu7_isys_csi2_disable_stream(struct ipu7_isys_csi2 *csi2, struct ipu7_isys_csi2_config *cfg) { struct ipu7_isys *isys = csi2->isys; void __iomem *isys_base = isys->pdata->base; unsigned int port, nlanes, offset, val; port = cfg->port; nlanes = cfg->nlanes; csi2_irq_en(csi2, 0); offset = IS_IO_GPREGS_BASE; val = readl(isys_base + offset + CSI_PORT_CLK_GATE); val &= ~(1 << port); if (port == 0 && nlanes == 4 && is_ipu7p5(isys->adev->isp->hw_ver)) val &= ~BIT(1); writel(val, isys_base + offset + CSI_PORT_CLK_GATE); /* power down */ ipu7_isys_csi_phy_powerdown(isys, cfg); writel(0x4, isys_base + offset + CLK_DIV_FACTOR_APB_CLK); } static int ipu7_isys_csi2_enable_stream(struct ipu7_isys_csi2 *csi2, struct ipu7_isys_csi2_config *cfg) { struct ipu7_isys *isys = csi2->isys; struct device *dev = &isys->adev->auxdev.dev; void __iomem *isys_base = isys->pdata->base; unsigned int port, nlanes, offset, val; int ret; port = cfg->port; nlanes = cfg->nlanes; offset = IS_IO_GPREGS_BASE; /* port AB support aggregation, configure 2 ports */ val = readl(isys_base + offset + CSI_PORT_CLK_GATE); writel(val | (0x3 << (port & 0x2)), isys_base + offset + CSI_PORT_CLK_GATE); writel(0x2, isys_base + offset + CLK_DIV_FACTOR_APB_CLK); dev_dbg(dev, "port %u CLK_GATE = 0x%04x DIV_FACTOR_APB_CLK=0x%04x\n", port, readl(isys_base + offset + CSI_PORT_CLK_GATE), readl(isys_base + offset + CLK_DIV_FACTOR_APB_CLK)); if (port == 0 && nlanes == 4 && is_ipu7p5(isys->adev->isp->hw_ver)) { dev_info(dev, "CSI port %u in aggregation mode\n", port); writel(0x1, isys_base + offset + CSI_PORTAB_AGGREGATION); } /* input is coming from CSI receiver (sensor) */ offset = IS_IO_CSI2_ADPL_PORT_BASE(port); writel(CSI_SENSOR_INPUT, isys_base + offset + CSI2_ADPL_INPUT_MODE); writel(1, isys_base + offset + CSI2_ADPL_CSI_RX_ERR_IRQ_CLEAR_EN); /* Enable DPHY power */ ret = ipu7_isys_csi_phy_powerup(isys, cfg); if (ret) { dev_err(dev, "CSI-%d PHY power up failed %d\n", port, ret); return ret; } csi2_irq_en(csi2, 1); return 0; } static int set_stream(struct v4l2_subdev *sd, int enable) { struct ipu7_isys_subdev *asd = to_ipu7_isys_subdev(sd); struct ipu7_isys_csi2 *csi2 = to_ipu7_isys_csi2(asd); struct ipu7_isys_stream *stream = container_of(media_entity_pipeline(&sd->entity), struct ipu7_isys_stream, pipe); struct v4l2_subdev *esd = media_entity_to_v4l2_subdev(stream->external->entity); struct ipu7_isys_csi2_config *cfg; stream->asd = asd; cfg = v4l2_get_subdev_hostdata(esd); dev_dbg(&csi2->isys->adev->auxdev.dev, "stream %s CSI2-%u with %u lanes\n", enable ? "on" : "off", cfg->port, cfg->nlanes); if (!enable) { ipu7_isys_csi2_disable_stream(csi2, cfg); return 0; } return ipu7_isys_csi2_enable_stream(csi2, cfg); } static const struct v4l2_subdev_video_ops csi2_sd_video_ops = { .s_stream = set_stream, }; static int ipu7_isys_csi2_set_sel(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_selection *sel) { struct ipu7_isys_subdev *asd = to_ipu7_isys_subdev(sd); struct device *dev = &asd->isys->adev->auxdev.dev; struct v4l2_mbus_framefmt *sink_ffmt = ipu7_isys_get_ffmt(sd, state, CSI2_PAD_SINK, sel->which); struct v4l2_mbus_framefmt *src_ffmt = ipu7_isys_get_ffmt(sd, state, sel->pad, sel->which); if (sel->pad == CSI2_PAD_SINK || sel->target != V4L2_SEL_TGT_CROP) return -EINVAL; mutex_lock(&asd->mutex); /* Only vertical cropping is supported */ sel->r.left = 0; sel->r.width = sink_ffmt->width; /* Non-bayer formats can't be single line cropped */ if (!ipu7_isys_is_bayer_format(sink_ffmt->code)) sel->r.top &= ~1; sel->r.height = clamp(sel->r.height & ~1, IPU_ISYS_MIN_HEIGHT, sink_ffmt->height - sel->r.top); *ipu7_isys_get_crop(sd, state, sel->pad, sel->which) = sel->r; /* update source pad format */ src_ffmt->width = sel->r.width; src_ffmt->height = sel->r.height; if (ipu7_isys_is_bayer_format(sink_ffmt->code)) src_ffmt->code = ipu7_isys_convert_bayer_order(sink_ffmt->code, sel->r.left, sel->r.top); dev_dbg(dev, "set crop for %s sel: %d,%d,%d,%d code: 0x%x\n", sd->name, sel->r.left, sel->r.top, sel->r.width, sel->r.height, src_ffmt->code); mutex_unlock(&asd->mutex); return 0; } static int ipu7_isys_csi2_get_sel(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_selection *sel) { struct ipu7_isys_subdev *asd = to_ipu7_isys_subdev(sd); struct v4l2_mbus_framefmt *sink_ffmt = ipu7_isys_get_ffmt(sd, state, CSI2_PAD_SINK, sel->which); struct v4l2_rect *crop = ipu7_isys_get_crop(sd, state, sel->pad, sel->which); int ret = 0; if (sd->entity.pads[sel->pad].flags & MEDIA_PAD_FL_SINK) return -EINVAL; mutex_lock(&asd->mutex); switch (sel->target) { case V4L2_SEL_TGT_CROP_DEFAULT: case V4L2_SEL_TGT_CROP_BOUNDS: sel->r.left = 0; sel->r.top = 0; sel->r.width = sink_ffmt->width; sel->r.height = sink_ffmt->height; break; case V4L2_SEL_TGT_CROP: sel->r = *crop; break; default: ret = -EINVAL; } mutex_unlock(&asd->mutex); return ret; } static const struct v4l2_subdev_pad_ops csi2_sd_pad_ops = { .link_validate = ipu7_isys_subdev_link_validate, .get_fmt = ipu7_isys_subdev_get_fmt, .set_fmt = ipu7_isys_subdev_set_fmt, .get_selection = ipu7_isys_csi2_get_sel, .set_selection = ipu7_isys_csi2_set_sel, .enum_mbus_code = ipu7_isys_subdev_enum_mbus_code, }; static const struct v4l2_subdev_ops csi2_sd_ops = { .core = &csi2_sd_core_ops, .video = &csi2_sd_video_ops, .pad = &csi2_sd_pad_ops, }; static const struct media_entity_operations csi2_entity_ops = { .link_validate = v4l2_subdev_link_validate, }; void ipu7_isys_csi2_cleanup(struct ipu7_isys_csi2 *csi2) { if (!csi2->isys) return; v4l2_device_unregister_subdev(&csi2->asd.sd); #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0) v4l2_subdev_cleanup(&csi2->asd.sd); #endif ipu7_isys_subdev_cleanup(&csi2->asd); csi2->isys = NULL; } int ipu7_isys_csi2_init(struct ipu7_isys_csi2 *csi2, struct ipu7_isys *isys, void __iomem *base, unsigned int index) { struct device *dev = &isys->adev->auxdev.dev; int ret; csi2->isys = isys; csi2->base = base; csi2->port = index; if (is_ipu7p5(isys->adev->isp->hw_ver)) csi2->legacy_irq_mask = 0x7 << (index * 3); else csi2->legacy_irq_mask = 0x3 << (index * 2); dev_dbg(dev, "csi-%d legacy irq mask = 0x%x\n", index, csi2->legacy_irq_mask); csi2->asd.sd.entity.ops = &csi2_entity_ops; csi2->asd.isys = isys; ret = ipu7_isys_subdev_init(&csi2->asd, &csi2_sd_ops, 0, NR_OF_CSI2_SINK_PADS, NR_OF_CSI2_SRC_PADS); if (ret) goto fail; csi2->asd.source = IPU_INSYS_MIPI_PORT_0 + index; csi2->asd.supported_codes = csi2_supported_codes; snprintf(csi2->asd.sd.name, sizeof(csi2->asd.sd.name), IPU_ISYS_ENTITY_PREFIX " CSI2 %u", index); v4l2_set_subdevdata(&csi2->asd.sd, &csi2->asd); #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0) ret = v4l2_subdev_init_finalize(&csi2->asd.sd); if (ret) { dev_err(dev, "failed to init v4l2 subdev (%d)\n", ret); goto fail; } #endif ret = v4l2_device_register_subdev(&isys->v4l2_dev, &csi2->asd.sd); if (ret) { dev_err(dev, "failed to register v4l2 subdev (%d)\n", ret); goto fail; } return 0; fail: ipu7_isys_csi2_cleanup(csi2); return ret; } void ipu7_isys_csi2_sof_event_by_stream(struct ipu7_isys_stream *stream) { struct ipu7_isys_csi2 *csi2 = ipu7_isys_subdev_to_csi2(stream->asd); struct device *dev = &stream->isys->adev->auxdev.dev; struct video_device *vdev = csi2->asd.sd.devnode; struct v4l2_event ev = { .type = V4L2_EVENT_FRAME_SYNC, }; ev.u.frame_sync.frame_sequence = atomic_fetch_inc(&stream->sequence); v4l2_event_queue(vdev, &ev); dev_dbg(dev, "sof_event::csi2-%i sequence: %i\n", csi2->port, ev.u.frame_sync.frame_sequence); } void ipu7_isys_csi2_eof_event_by_stream(struct ipu7_isys_stream *stream) { struct ipu7_isys_csi2 *csi2 = ipu7_isys_subdev_to_csi2(stream->asd); struct device *dev = &stream->isys->adev->auxdev.dev; u32 frame_sequence = atomic_read(&stream->sequence); dev_dbg(dev, "eof_event::csi2-%i sequence: %i\n", csi2->port, frame_sequence); } ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/ipu7-isys-csi2.h000066400000000000000000000037371465530421300271460ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 - 2024 Intel Corporation */ #ifndef IPU7_ISYS_CSI2_H #define IPU7_ISYS_CSI2_H #include #include #include "ipu7-isys-subdev.h" #include "ipu7-isys-video.h" struct ipu7_isys; struct ipu7_isys_csi2_pdata; struct ipu7_isys_stream; #define NR_OF_CSI2_VC 16 #define INVALID_VC_ID -1 #define NR_OF_CSI2_SINK_PADS 1 #define CSI2_PAD_SINK 0 #define NR_OF_CSI2_SRC_PADS 8 #define CSI2_PAD_SRC 1 #define NR_OF_CSI2_PADS (NR_OF_CSI2_SINK_PADS + NR_OF_CSI2_SRC_PADS) #define CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_A 0 #define CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_B 0 #define CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_A 95 #define CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_B -8 #define CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_A 0 #define CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_B 0 #define CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_A 85 #define CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_B -2 #define CSI2_CROP_HOR BIT(0) #define CSI2_CROP_VER BIT(1) #define CSI2_CROP_MASK (CSI2_CROP_VER | CSI2_CROP_HOR) /* * struct ipu7_isys_csi2 * * @nlanes: number of lanes in the receiver */ struct ipu7_isys_csi2 { struct ipu7_isys_subdev asd; struct ipu7_isys_csi2_pdata *pdata; struct ipu7_isys *isys; struct ipu7_isys_video av[NR_OF_CSI2_SRC_PADS]; void __iomem *base; u32 receiver_errors; u32 legacy_irq_mask; unsigned int nlanes; unsigned int port; }; #define ipu7_isys_subdev_to_csi2(__sd) \ container_of(__sd, struct ipu7_isys_csi2, asd) #define to_ipu7_isys_csi2(__asd) container_of(__asd, struct ipu7_isys_csi2, asd) s64 ipu7_isys_csi2_get_link_freq(struct ipu7_isys_csi2 *csi2); int ipu7_isys_csi2_init(struct ipu7_isys_csi2 *csi2, struct ipu7_isys *isys, void __iomem *base, unsigned int index); void ipu7_isys_csi2_cleanup(struct ipu7_isys_csi2 *csi2); void ipu7_isys_csi2_sof_event_by_stream(struct ipu7_isys_stream *stream); void ipu7_isys_csi2_eof_event_by_stream(struct ipu7_isys_stream *stream); #endif /* IPU7_ISYS_CSI2_H */ ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/ipu7-isys-queue.c000066400000000000000000000532721465530421300274240ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013 - 2024 Intel Corporation */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "abi/ipu7_fw_isys_abi.h" #include "ipu7-bus.h" #include "ipu7-fw-isys.h" #include "ipu7-isys.h" #include "ipu7-isys-video.h" #define IPU_MAX_FRAME_COUNTER (U8_MAX + 1) static int queue_setup(struct vb2_queue *q, unsigned int *num_buffers, unsigned int *num_planes, unsigned int sizes[], struct device *alloc_devs[]) { struct ipu7_isys_queue *aq = vb2_queue_to_isys_queue(q); struct ipu7_isys_video *av = ipu7_isys_queue_to_video(aq); struct device *dev = &av->isys->adev->auxdev.dev; u32 size = av->pix_fmt.sizeimage; /* num_planes == 0: we're being called through VIDIOC_REQBUFS */ if (!*num_planes) { sizes[0] = size; } else if (sizes[0] < size) { dev_dbg(dev, "%s: queue setup: size %u < %u\n", av->vdev.name, sizes[0], size); return -EINVAL; } *num_planes = 1; return 0; } static int ipu7_isys_buf_prepare(struct vb2_buffer *vb) { struct ipu7_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue); struct ipu7_isys_video *av = ipu7_isys_queue_to_video(aq); struct device *dev = &av->isys->adev->auxdev.dev; u32 bytesperline = av->pix_fmt.bytesperline; u32 height = av->pix_fmt.height; dev_dbg(dev, "buffer: %s: configured size %u, buffer size %lu\n", av->vdev.name, av->pix_fmt.sizeimage, vb2_plane_size(vb, 0)); if (av->pix_fmt.sizeimage > vb2_plane_size(vb, 0)) return -EINVAL; dev_dbg(dev, "buffer: %s: bytesperline %u, height %u\n", av->vdev.name, bytesperline, height); vb2_set_plane_payload(vb, 0, bytesperline * height); vb->planes[0].data_offset = 0; return 0; } /* * Queue a buffer list back to incoming or active queues. The buffers * are removed from the buffer list. */ void ipu7_isys_buffer_list_queue(struct ipu7_isys_buffer_list *bl, unsigned long op_flags, enum vb2_buffer_state state) { struct ipu7_isys_buffer *ib, *ib_safe; unsigned long flags; bool first = true; if (!bl) return; WARN_ON_ONCE(!bl->nbufs); WARN_ON_ONCE(op_flags & IPU_ISYS_BUFFER_LIST_FL_ACTIVE && op_flags & IPU_ISYS_BUFFER_LIST_FL_INCOMING); list_for_each_entry_safe(ib, ib_safe, &bl->head, head) { struct ipu7_isys_video *av; struct vb2_buffer *vb = ipu7_isys_buffer_to_vb2_buffer(ib); struct ipu7_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue); av = ipu7_isys_queue_to_video(aq); spin_lock_irqsave(&aq->lock, flags); list_del(&ib->head); if (op_flags & IPU_ISYS_BUFFER_LIST_FL_ACTIVE) list_add(&ib->head, &aq->active); else if (op_flags & IPU_ISYS_BUFFER_LIST_FL_INCOMING) list_add_tail(&ib->head, &aq->incoming); spin_unlock_irqrestore(&aq->lock, flags); if (op_flags & IPU_ISYS_BUFFER_LIST_FL_SET_STATE) vb2_buffer_done(vb, state); if (first) { dev_dbg(&av->isys->adev->auxdev.dev, "queue buf list %p flags %lx, s %d, %d bufs\n", bl, op_flags, state, bl->nbufs); first = false; } bl->nbufs--; } WARN_ON(bl->nbufs); } /* * flush_firmware_streamon_fail() - Flush in cases where requests may * have been queued to firmware and the *firmware streamon fails for a * reason or another. */ static void flush_firmware_streamon_fail(struct ipu7_isys_stream *stream) { struct ipu7_isys_queue *aq; unsigned long flags; lockdep_assert_held(&stream->mutex); list_for_each_entry(aq, &stream->queues, node) { struct ipu7_isys_video *av = ipu7_isys_queue_to_video(aq); struct device *dev = &av->isys->adev->auxdev.dev; struct ipu7_isys_buffer *ib, *ib_safe; spin_lock_irqsave(&aq->lock, flags); list_for_each_entry_safe(ib, ib_safe, &aq->active, head) { struct vb2_buffer *vb = ipu7_isys_buffer_to_vb2_buffer(ib); list_del(&ib->head); if (av->streaming) { dev_dbg(dev, "%s: queue buffer %u back to incoming\n", av->vdev.name, vb->index); /* Queue already streaming, return to driver. */ list_add(&ib->head, &aq->incoming); continue; } /* Queue not yet streaming, return to user. */ dev_dbg(dev, "%s: return %u back to videobuf2\n", av->vdev.name, vb->index); vb2_buffer_done(ipu7_isys_buffer_to_vb2_buffer(ib), VB2_BUF_STATE_QUEUED); } spin_unlock_irqrestore(&aq->lock, flags); } } /* * Attempt obtaining a buffer list from the incoming queues, a list of buffers * that contains one entry from each video buffer queue. If a buffer can't be * obtained from every queue, the buffers are returned back to the queue. */ static int buffer_list_get(struct ipu7_isys_stream *stream, struct ipu7_isys_buffer_list *bl) { unsigned long buf_flag = IPU_ISYS_BUFFER_LIST_FL_INCOMING; struct device *dev = &stream->isys->adev->auxdev.dev; struct ipu7_isys_queue *aq; unsigned long flags; bl->nbufs = 0; INIT_LIST_HEAD(&bl->head); list_for_each_entry(aq, &stream->queues, node) { struct ipu7_isys_buffer *ib; spin_lock_irqsave(&aq->lock, flags); if (list_empty(&aq->incoming)) { spin_unlock_irqrestore(&aq->lock, flags); if (!list_empty(&bl->head)) ipu7_isys_buffer_list_queue(bl, buf_flag, 0); return -ENODATA; } ib = list_last_entry(&aq->incoming, struct ipu7_isys_buffer, head); dev_dbg(dev, "buffer: %s: buffer %u\n", ipu7_isys_queue_to_video(aq)->vdev.name, ipu7_isys_buffer_to_vb2_buffer(ib)->index); list_del(&ib->head); list_add(&ib->head, &bl->head); spin_unlock_irqrestore(&aq->lock, flags); bl->nbufs++; } dev_dbg(dev, "get buffer list %p, %u buffers\n", bl, bl->nbufs); return 0; } static void ipu7_isys_buf_to_fw_frame_buf_pin(struct vb2_buffer *vb, struct ipu7_insys_buffset *set) { struct ipu7_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue); set->output_pins[aq->fw_output].addr = vb2_dma_contig_plane_dma_addr(vb, 0); set->output_pins[aq->fw_output].user_token = (u64)set; } /* * Convert a buffer list to a isys fw ABI framebuffer set. The * buffer list is not modified. */ #define IPU_ISYS_FRAME_NUM_THRESHOLD (30) void ipu7_isys_buffer_to_fw_frame_buff(struct ipu7_insys_buffset *set, struct ipu7_isys_stream *stream, struct ipu7_isys_buffer_list *bl) { struct ipu7_isys_buffer *ib; u32 buf_id; WARN_ON(!bl->nbufs); /* none-skip case */ set->skip_frame = 0; set->capture_msg_map = 0; /* ignore the flag, always enable the ack */ set->capture_msg_map |= IPU_INSYS_FRAME_ENABLE_MSG_SEND_RESP; set->capture_msg_map |= IPU_INSYS_FRAME_ENABLE_MSG_SEND_IRQ; list_for_each_entry(ib, &bl->head, head) { struct vb2_buffer *vb = ipu7_isys_buffer_to_vb2_buffer(ib); buf_id = atomic_fetch_inc(&stream->buf_id); set->frame_id = buf_id % IPU_MAX_FRAME_COUNTER; ipu7_isys_buf_to_fw_frame_buf_pin(vb, set); } } /* Start streaming for real. The buffer list must be available. */ static int ipu7_isys_stream_start(struct ipu7_isys_video *av, struct ipu7_isys_buffer_list *bl, bool error) { struct ipu7_isys_stream *stream = av->stream; struct device *dev = &stream->isys->adev->auxdev.dev; struct ipu7_isys_buffer_list __bl; int ret; mutex_lock(&stream->isys->stream_mutex); ret = ipu7_isys_video_set_streaming(av, 1, bl); mutex_unlock(&stream->isys->stream_mutex); if (ret) goto out_requeue; stream->streaming = 1; bl = &__bl; do { struct ipu7_insys_buffset *buf = NULL; struct isys_fw_msgs *msg; enum ipu7_insys_send_type send_type = IPU_INSYS_SEND_TYPE_STREAM_CAPTURE; ret = buffer_list_get(stream, bl); if (ret < 0) break; msg = ipu7_get_fw_msg_buf(stream); if (!msg) return -ENOMEM; buf = &msg->fw_msg.frame; ipu7_isys_buffer_to_fw_frame_buff(buf, stream, bl); ipu7_fw_isys_dump_frame_buff_set(dev, buf, stream->nr_output_pins); ipu7_isys_buffer_list_queue(bl, IPU_ISYS_BUFFER_LIST_FL_ACTIVE, 0); ret = ipu7_fw_isys_complex_cmd(stream->isys, stream->stream_handle, buf, msg->dma_addr, sizeof(*buf), send_type); } while (!WARN_ON(ret)); return 0; out_requeue: if (bl && bl->nbufs) ipu7_isys_buffer_list_queue(bl, IPU_ISYS_BUFFER_LIST_FL_INCOMING | (error ? IPU_ISYS_BUFFER_LIST_FL_SET_STATE : 0), error ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_QUEUED); flush_firmware_streamon_fail(stream); return ret; } static void buf_queue(struct vb2_buffer *vb) { struct ipu7_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue); struct ipu7_isys_video *av = ipu7_isys_queue_to_video(aq); struct vb2_v4l2_buffer *vvb = to_vb2_v4l2_buffer(vb); struct ipu7_isys_video_buffer *ivb = vb2_buffer_to_ipu_isys_video_buffer(vvb); struct media_pipeline *media_pipe = media_entity_pipeline(&av->vdev.entity); struct device *dev = &av->isys->adev->auxdev.dev; struct ipu7_isys_stream *stream = av->stream; struct ipu7_isys_buffer *ib = &ivb->ib; struct ipu7_insys_buffset *buf = NULL; struct ipu7_isys_buffer_list bl; struct isys_fw_msgs *msg; unsigned long flags; dma_addr_t dma; int ret; dev_dbg(dev, "queue buffer %u for %s\n", vb->index, av->vdev.name); dma = vb2_dma_contig_plane_dma_addr(vb, 0); dev_dbg(dev, "iova: iova %pad\n", &dma); spin_lock_irqsave(&aq->lock, flags); list_add(&ib->head, &aq->incoming); spin_unlock_irqrestore(&aq->lock, flags); if (!media_pipe || !vb->vb2_queue->start_streaming_called) { dev_dbg(dev, "media pipeline is not ready for %s\n", av->vdev.name); return; } mutex_lock(&stream->mutex); if (stream->nr_streaming != stream->nr_queues) { dev_dbg(dev, "not streaming yet, adding to incoming\n"); goto out; } /* * We just put one buffer to the incoming list of this queue * (above). Let's see whether all queues in the pipeline would * have a buffer. */ ret = buffer_list_get(stream, &bl); if (ret < 0) { dev_dbg(dev, "No buffers available\n"); goto out; } msg = ipu7_get_fw_msg_buf(stream); if (!msg) { ret = -ENOMEM; goto out; } buf = &msg->fw_msg.frame; ipu7_isys_buffer_to_fw_frame_buff(buf, stream, &bl); ipu7_fw_isys_dump_frame_buff_set(dev, buf, stream->nr_output_pins); if (!stream->streaming) { ret = ipu7_isys_stream_start(av, &bl, true); if (ret) dev_err(dev, "stream start failed.\n"); goto out; } /* * We must queue the buffers in the buffer list to the * appropriate video buffer queues BEFORE passing them to the * firmware since we could get a buffer event back before we * have queued them ourselves to the active queue. */ ipu7_isys_buffer_list_queue(&bl, IPU_ISYS_BUFFER_LIST_FL_ACTIVE, 0); ret = ipu7_fw_isys_complex_cmd(stream->isys, stream->stream_handle, buf, msg->dma_addr, sizeof(*buf), IPU_INSYS_SEND_TYPE_STREAM_CAPTURE); if (ret < 0) dev_err(dev, "send stream capture failed\n"); out: mutex_unlock(&stream->mutex); } static int ipu7_isys_link_fmt_validate(struct ipu7_isys_queue *aq) { struct ipu7_isys_video *av = ipu7_isys_queue_to_video(aq); struct v4l2_subdev_format fmt = { 0 }; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0) struct media_pad *pad = media_entity_remote_pad(av->vdev.entity.pads); #else struct media_pad *pad = media_pad_remote_pad_first(av->vdev.entity.pads); #endif struct device *dev = &av->isys->adev->auxdev.dev; struct v4l2_subdev *sd; u32 code; int ret; if (!pad) { dev_dbg(dev, "video node %s pad not connected\n", av->vdev.name); return -ENOTCONN; } sd = media_entity_to_v4l2_subdev(pad->entity); fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; fmt.pad = pad->index; ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt); if (ret) return ret; if (fmt.format.width != av->pix_fmt.width || fmt.format.height != av->pix_fmt.height) { dev_dbg(dev, "wrong width or height %ux%u (%ux%u expected)\n", av->pix_fmt.width, av->pix_fmt.height, fmt.format.width, fmt.format.height); return -EINVAL; } code = ipu7_isys_get_isys_format(av->pix_fmt.pixelformat)->code; if (fmt.format.code != code) { dev_dbg(dev, "wrong media bus code 0x%8.8x (0x%8.8x expected)\n", code, fmt.format.code); return -EINVAL; } return 0; } static void return_buffers(struct ipu7_isys_queue *aq, enum vb2_buffer_state state) { struct ipu7_isys_video *av = ipu7_isys_queue_to_video(aq); struct ipu7_isys_buffer *ib; bool need_reset = false; struct vb2_buffer *vb; unsigned long flags; spin_lock_irqsave(&aq->lock, flags); while (!list_empty(&aq->incoming)) { ib = list_first_entry(&aq->incoming, struct ipu7_isys_buffer, head); vb = ipu7_isys_buffer_to_vb2_buffer(ib); list_del(&ib->head); spin_unlock_irqrestore(&aq->lock, flags); vb2_buffer_done(vb, state); spin_lock_irqsave(&aq->lock, flags); } /* * Something went wrong (FW crash / HW hang / not all buffers * returned from isys) if there are still buffers queued in active * queue. We have to clean up places a bit. */ while (!list_empty(&aq->active)) { ib = list_first_entry(&aq->active, struct ipu7_isys_buffer, head); vb = ipu7_isys_buffer_to_vb2_buffer(ib); list_del(&ib->head); spin_unlock_irqrestore(&aq->lock, flags); vb2_buffer_done(vb, state); spin_lock_irqsave(&aq->lock, flags); need_reset = true; } spin_unlock_irqrestore(&aq->lock, flags); if (need_reset) { mutex_lock(&av->isys->mutex); av->isys->need_reset = true; mutex_unlock(&av->isys->mutex); } } static int start_streaming(struct vb2_queue *q, unsigned int count) { struct ipu7_isys_queue *aq = vb2_queue_to_isys_queue(q); struct ipu7_isys_video *av = ipu7_isys_queue_to_video(aq); struct device *dev = &av->isys->adev->auxdev.dev; struct ipu7_isys_stream *stream; struct ipu7_isys_buffer_list __bl, *bl = NULL; const struct ipu7_isys_pixelformat *pfmt = ipu7_isys_get_isys_format(av->pix_fmt.pixelformat); bool first = false; int ret; dev_dbg(dev, "stream: %s: width %u, height %u, css pixelformat %u\n", av->vdev.name, av->pix_fmt.width, av->pix_fmt.height, pfmt->css_pixelformat); /* every ipu7_isys_stream is only enabled once */ av->stream = ipu7_isys_get_stream(av->isys); if (!av->stream) { dev_err(dev, "no available stream for firmware\n"); ret = -EBUSY; goto out_return_buffers; } stream = av->stream; mutex_lock(&stream->mutex); if (!stream->nr_streaming) { first = true; ret = ipu7_isys_video_prepare_streaming(av); if (ret) { mutex_unlock(&stream->mutex); goto out_isys_put_stream; } } mutex_unlock(&stream->mutex); ret = ipu7_isys_link_fmt_validate(aq); if (ret) { dev_dbg(dev, "%s: link format validation failed (%d)\n", av->vdev.name, ret); goto out_unprepare_streaming; } mutex_lock(&stream->mutex); /* TODO: move it from link_validate() temporarily */ stream->nr_queues++; stream->nr_streaming++; dev_dbg(dev, "queue %u of %u\n", stream->nr_streaming, stream->nr_queues); list_add(&aq->node, &stream->queues); if (stream->nr_streaming != stream->nr_queues) goto out; if (list_empty(&av->isys->requests)) { bl = &__bl; ret = buffer_list_get(stream, bl); if (ret == -EINVAL) { goto out_stream_start; } else if (ret < 0) { dev_dbg(dev, "no request available, postponing streamon\n"); goto out; } } ret = ipu7_isys_fw_open(av->isys); if (ret) goto out_stream_start; ret = ipu7_isys_stream_start(av, bl, false); if (ret) goto out_isys_fw_close; out: mutex_unlock(&stream->mutex); return 0; out_isys_fw_close: ipu7_isys_fw_close(av->isys); out_stream_start: list_del(&aq->node); stream->nr_streaming--; mutex_unlock(&stream->mutex); out_unprepare_streaming: if (first) #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) media_pipeline_stop(&av->vdev.entity); #else media_pipeline_stop(av->vdev.entity.pads); #endif out_isys_put_stream: ipu7_isys_put_stream(av->stream); av->stream = NULL; out_return_buffers: return_buffers(aq, VB2_BUF_STATE_QUEUED); return ret; } static void stop_streaming(struct vb2_queue *q) { struct ipu7_isys_queue *aq = vb2_queue_to_isys_queue(q); struct ipu7_isys_video *av = ipu7_isys_queue_to_video(aq); struct ipu7_isys_stream *stream = av->stream; mutex_lock(&stream->mutex); mutex_lock(&av->isys->stream_mutex); if (stream->nr_streaming == stream->nr_queues && stream->streaming) ipu7_isys_video_set_streaming(av, 0, NULL); mutex_unlock(&av->isys->stream_mutex); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) media_pipeline_stop(&av->vdev.entity); #else media_pipeline_stop(av->vdev.entity.pads); #endif av->stream = NULL; stream->nr_streaming--; list_del(&aq->node); stream->streaming = 0; mutex_unlock(&stream->mutex); ipu7_isys_put_stream(stream); return_buffers(aq, VB2_BUF_STATE_ERROR); ipu7_isys_fw_close(av->isys); } static unsigned int get_sof_sequence_by_timestamp(struct ipu7_isys_stream *stream, struct ipu7_insys_resp *info) { u64 time = (u64)info->timestamp[1] << 32 | info->timestamp[0]; struct ipu7_isys *isys = stream->isys; struct device *dev = &isys->adev->auxdev.dev; unsigned int i; /* * The timestamp is invalid as no TSC in some FPGA platform, * so get the sequence from pipeline directly in this case. */ if (time == 0) return atomic_read(&stream->sequence) - 1; for (i = 0; i < IPU_ISYS_MAX_PARALLEL_SOF; i++) if (time == stream->seq[i].timestamp) { dev_dbg(dev, "SOF: using seq nr %u for ts %llu\n", stream->seq[i].sequence, time); return stream->seq[i].sequence; } dev_dbg(dev, "SOF: looking for %llu\n", time); for (i = 0; i < IPU_ISYS_MAX_PARALLEL_SOF; i++) dev_dbg(dev, "SOF: sequence %u, timestamp value %llu\n", stream->seq[i].sequence, stream->seq[i].timestamp); dev_dbg(dev, "SOF sequence number not found\n"); return 0; } static u64 get_sof_ns_delta(struct ipu7_isys_video *av, struct ipu7_insys_resp *info) { struct ipu7_bus_device *adev = av->isys->adev; struct ipu7_device *isp = adev->isp; u64 delta, tsc_now; ipu7_buttress_tsc_read(isp, &tsc_now); if (!tsc_now) return 0; delta = tsc_now - ((u64)info->timestamp[1] << 32 | info->timestamp[0]); return ipu7_buttress_tsc_ticks_to_ns(delta, isp); } void ipu7_isys_buf_calc_sequence_time(struct ipu7_isys_buffer *ib, struct ipu7_insys_resp *info) { struct vb2_buffer *vb = ipu7_isys_buffer_to_vb2_buffer(ib); struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct ipu7_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue); struct ipu7_isys_video *av = ipu7_isys_queue_to_video(aq); struct device *dev = &av->isys->adev->auxdev.dev; struct ipu7_isys_stream *stream = av->stream; u64 ns; u32 sequence; ns = ktime_get_ns() - get_sof_ns_delta(av, info); sequence = get_sof_sequence_by_timestamp(stream, info); vbuf->vb2_buf.timestamp = ns; vbuf->sequence = sequence; dev_dbg(dev, "buf: %s: buffer done, CPU-timestamp:%lld, sequence:%d\n", av->vdev.name, ktime_get_ns(), sequence); dev_dbg(dev, "index:%d, vbuf timestamp:%lld, endl\n", vb->index, vbuf->vb2_buf.timestamp); } void ipu7_isys_queue_buf_done(struct ipu7_isys_buffer *ib) { struct vb2_buffer *vb = ipu7_isys_buffer_to_vb2_buffer(ib); if (atomic_read(&ib->str2mmio_flag)) { vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); /* * Operation on buffer is ended with error and will be reported * to the userspace when it is de-queued */ atomic_set(&ib->str2mmio_flag, 0); } else { vb2_buffer_done(vb, VB2_BUF_STATE_DONE); } } void ipu7_isys_queue_buf_ready(struct ipu7_isys_stream *stream, struct ipu7_insys_resp *info) { struct ipu7_isys_queue *aq = stream->output_pins[info->pin_id].aq; struct ipu7_isys *isys = stream->isys; struct device *dev = &isys->adev->auxdev.dev; struct ipu7_isys_buffer *ib; struct vb2_buffer *vb; unsigned long flags; bool first = true; struct vb2_v4l2_buffer *buf; dev_dbg(dev, "buffer: %s: received buffer %8.8x %d\n", ipu7_isys_queue_to_video(aq)->vdev.name, info->pin.addr, info->frame_id); spin_lock_irqsave(&aq->lock, flags); if (list_empty(&aq->active)) { spin_unlock_irqrestore(&aq->lock, flags); dev_err(dev, "active queue empty\n"); return; } list_for_each_entry_reverse(ib, &aq->active, head) { dma_addr_t addr; vb = ipu7_isys_buffer_to_vb2_buffer(ib); addr = vb2_dma_contig_plane_dma_addr(vb, 0); if (info->pin.addr != addr) { if (first) dev_err(dev, "Unexpected buffer address %pad\n", &addr); first = false; continue; } dev_dbg(dev, "buffer: found buffer %pad\n", &addr); buf = to_vb2_v4l2_buffer(vb); buf->field = V4L2_FIELD_NONE; list_del(&ib->head); spin_unlock_irqrestore(&aq->lock, flags); ipu7_isys_buf_calc_sequence_time(ib, info); ipu7_isys_queue_buf_done(ib); return; } dev_err(dev, "Failed to find a matching video buffer\n"); spin_unlock_irqrestore(&aq->lock, flags); } static const struct vb2_ops ipu7_isys_queue_ops = { .queue_setup = queue_setup, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, .buf_prepare = ipu7_isys_buf_prepare, .start_streaming = start_streaming, .stop_streaming = stop_streaming, .buf_queue = buf_queue, }; int ipu7_isys_queue_init(struct ipu7_isys_queue *aq) { struct ipu7_isys *isys = ipu7_isys_queue_to_video(aq)->isys; struct ipu7_isys_video *av = ipu7_isys_queue_to_video(aq); int ret; if (!aq->vbq.io_modes) aq->vbq.io_modes = VB2_MMAP | VB2_DMABUF; aq->vbq.drv_priv = aq; aq->vbq.ops = &ipu7_isys_queue_ops; aq->vbq.lock = &av->mutex; aq->vbq.mem_ops = &vb2_dma_contig_memops; aq->vbq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 8, 0) aq->vbq.min_queued_buffers = 1; #else aq->vbq.min_buffers_needed = 1; #endif aq->vbq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; ret = vb2_queue_init(&aq->vbq); if (ret) return ret; aq->dev = &isys->adev->auxdev.dev; aq->vbq.dev = &isys->adev->auxdev.dev; spin_lock_init(&aq->lock); INIT_LIST_HEAD(&aq->active); INIT_LIST_HEAD(&aq->incoming); return 0; } ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/ipu7-isys-queue.h000066400000000000000000000041141465530421300274200ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 - 2024 Intel Corporation */ #ifndef IPU7_ISYS_QUEUE_H #define IPU7_ISYS_QUEUE_H #include #include #include #include #include struct device; struct ipu7_isys_stream; struct ipu7_insys_resp; struct ipu7_insys_buffset; struct ipu7_isys_queue { struct vb2_queue vbq; struct list_head node; struct device *dev; /* * @lock: serialise access to queued and pre_streamon_queued */ spinlock_t lock; struct list_head active; struct list_head incoming; unsigned int fw_output; }; struct ipu7_isys_buffer { struct list_head head; atomic_t str2mmio_flag; }; struct ipu7_isys_video_buffer { struct vb2_v4l2_buffer vb_v4l2; struct ipu7_isys_buffer ib; }; #define IPU_ISYS_BUFFER_LIST_FL_INCOMING BIT(0) #define IPU_ISYS_BUFFER_LIST_FL_ACTIVE BIT(1) #define IPU_ISYS_BUFFER_LIST_FL_SET_STATE BIT(2) struct ipu7_isys_buffer_list { struct list_head head; unsigned int nbufs; }; #define vb2_queue_to_isys_queue(__vb2) \ container_of(__vb2, struct ipu7_isys_queue, vbq) #define ipu7_isys_to_isys_video_buffer(__ib) \ container_of(__ib, struct ipu7_isys_video_buffer, ib) #define vb2_buffer_to_ipu_isys_video_buffer(__vvb) \ container_of(__vvb, struct ipu7_isys_video_buffer, vb_v4l2) #define ipu7_isys_buffer_to_vb2_buffer(__ib) \ (&ipu7_isys_to_isys_video_buffer(__ib)->vb_v4l2.vb2_buf) void ipu7_isys_buffer_list_queue(struct ipu7_isys_buffer_list *bl, unsigned long op_flags, enum vb2_buffer_state state); void ipu7_isys_buffer_to_fw_frame_buff(struct ipu7_insys_buffset *set, struct ipu7_isys_stream *stream, struct ipu7_isys_buffer_list *bl); void ipu7_isys_buf_calc_sequence_time(struct ipu7_isys_buffer *ib, struct ipu7_insys_resp *info); void ipu7_isys_queue_buf_done(struct ipu7_isys_buffer *ib); void ipu7_isys_queue_buf_ready(struct ipu7_isys_stream *stream, struct ipu7_insys_resp *info); int ipu7_isys_queue_init(struct ipu7_isys_queue *aq); #endif /* IPU7_ISYS_QUEUE_H */ ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/ipu7-isys-subdev.c000066400000000000000000000230141465530421300275570ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013 - 2024 Intel Corporation */ #include #include #include #include #include #include #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0) #include #endif #include #include #include #include "ipu7-bus.h" #include "ipu7-isys.h" #include "ipu7-isys-subdev.h" unsigned int ipu7_isys_mbus_code_to_bpp(u32 code) { switch (code) { case MEDIA_BUS_FMT_RGB888_1X24: return 24; case MEDIA_BUS_FMT_YUYV10_1X20: return 20; case MEDIA_BUS_FMT_Y10_1X10: case MEDIA_BUS_FMT_RGB565_1X16: case MEDIA_BUS_FMT_UYVY8_1X16: case MEDIA_BUS_FMT_YUYV8_1X16: return 16; case MEDIA_BUS_FMT_SBGGR12_1X12: case MEDIA_BUS_FMT_SGBRG12_1X12: case MEDIA_BUS_FMT_SGRBG12_1X12: case MEDIA_BUS_FMT_SRGGB12_1X12: return 12; case MEDIA_BUS_FMT_SBGGR10_1X10: case MEDIA_BUS_FMT_SGBRG10_1X10: case MEDIA_BUS_FMT_SGRBG10_1X10: case MEDIA_BUS_FMT_SRGGB10_1X10: return 10; case MEDIA_BUS_FMT_SBGGR8_1X8: case MEDIA_BUS_FMT_SGBRG8_1X8: case MEDIA_BUS_FMT_SGRBG8_1X8: case MEDIA_BUS_FMT_SRGGB8_1X8: return 8; default: WARN_ON(1); return -EINVAL; } } unsigned int ipu7_isys_mbus_code_to_data_type(u32 code) { switch (code) { case MEDIA_BUS_FMT_RGB565_1X16: return MIPI_CSI2_TYPE_RGB565; case MEDIA_BUS_FMT_RGB888_1X24: return MIPI_CSI2_TYPE_RGB888; case MEDIA_BUS_FMT_YUYV10_1X20: return MIPI_CSI2_TYPE_YUV422_10; case MEDIA_BUS_FMT_UYVY8_1X16: case MEDIA_BUS_FMT_YUYV8_1X16: return MIPI_CSI2_TYPE_YUV422_8; case MEDIA_BUS_FMT_SBGGR12_1X12: case MEDIA_BUS_FMT_SGBRG12_1X12: case MEDIA_BUS_FMT_SGRBG12_1X12: case MEDIA_BUS_FMT_SRGGB12_1X12: return MIPI_CSI2_TYPE_RAW12; case MEDIA_BUS_FMT_Y10_1X10: case MEDIA_BUS_FMT_SBGGR10_1X10: case MEDIA_BUS_FMT_SGBRG10_1X10: case MEDIA_BUS_FMT_SGRBG10_1X10: case MEDIA_BUS_FMT_SRGGB10_1X10: return MIPI_CSI2_TYPE_RAW10; case MEDIA_BUS_FMT_SBGGR8_1X8: case MEDIA_BUS_FMT_SGBRG8_1X8: case MEDIA_BUS_FMT_SGRBG8_1X8: case MEDIA_BUS_FMT_SRGGB8_1X8: return MIPI_CSI2_TYPE_RAW8; default: WARN_ON(1); return -EINVAL; } } bool ipu7_isys_is_bayer_format(u32 code) { switch (ipu7_isys_mbus_code_to_data_type(code)) { case MIPI_CSI2_TYPE_RAW8: case MIPI_CSI2_TYPE_RAW10: case MIPI_CSI2_TYPE_RAW12: return true; } return false; } u32 ipu7_isys_convert_bayer_order(u32 code, int x, int y) { static const u32 code_map[] = { MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SBGGR12_1X12, }; unsigned int i; for (i = 0; i < ARRAY_SIZE(code_map); i++) if (code_map[i] == code) break; if (i == ARRAY_SIZE(code_map)) { WARN_ON(1); return code; } return code_map[i ^ (((y & 1) << 1) | (x & 1))]; } struct v4l2_mbus_framefmt *ipu7_isys_get_ffmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, unsigned int pad, unsigned int which) { struct ipu7_isys_subdev *asd = to_ipu7_isys_subdev(sd); if (which == V4L2_SUBDEV_FORMAT_ACTIVE) return &asd->ffmt[pad]; #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 8, 0) return v4l2_subdev_state_get_format(state, pad); #else return v4l2_subdev_get_try_format(sd, state, pad); #endif } struct v4l2_rect *ipu7_isys_get_crop(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, unsigned int pad, unsigned int which) { struct ipu7_isys_subdev *asd = to_ipu7_isys_subdev(sd); if (which == V4L2_SUBDEV_FORMAT_ACTIVE) return &asd->crop; #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 8, 0) return v4l2_subdev_state_get_crop(state, pad); #else return v4l2_subdev_get_try_crop(sd, state, pad); #endif } int ipu7_isys_subdev_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_format *format) { struct ipu7_isys_subdev *asd = to_ipu7_isys_subdev(sd); struct v4l2_mbus_framefmt *fmt = ipu7_isys_get_ffmt(sd, state, format->pad, format->which); struct v4l2_mbus_framefmt *source_fmt; u32 code = asd->supported_codes[0]; unsigned int i; dev_dbg(&asd->isys->adev->auxdev.dev, "set format for %s pad %d.\n", sd->name, format->pad); mutex_lock(&asd->mutex); /* No transcoding, source and sink formats must match. */ if ((sd->entity.pads[format->pad].flags & MEDIA_PAD_FL_SOURCE) && sd->entity.num_pads > 1) { format->format = *fmt; mutex_unlock(&asd->mutex); return 0; } format->format.width = clamp(format->format.width, IPU_ISYS_MIN_WIDTH, IPU_ISYS_MAX_WIDTH); format->format.height = clamp(format->format.height, IPU_ISYS_MIN_HEIGHT, IPU_ISYS_MAX_HEIGHT); for (i = 0; asd->supported_codes[i]; i++) { if (asd->supported_codes[i] == format->format.code) { code = asd->supported_codes[i]; break; } } format->format.code = code; format->format.field = V4L2_FIELD_NONE; *fmt = format->format; if (sd->entity.pads[format->pad].flags & MEDIA_PAD_FL_SINK) { /* propagate format to following source pad */ struct v4l2_rect *crop = ipu7_isys_get_crop(sd, state, format->pad + 1, format->which); dev_dbg(&asd->isys->adev->auxdev.dev, "propagate format to source pad.\n"); source_fmt = ipu7_isys_get_ffmt(sd, state, format->pad + 1, format->which); *source_fmt = format->format; /* reset crop */ crop->left = 0; crop->top = 0; crop->width = fmt->width; crop->height = fmt->height; } mutex_unlock(&asd->mutex); return 0; } int ipu7_isys_subdev_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_format *fmt) { struct ipu7_isys_subdev *asd = to_ipu7_isys_subdev(sd); mutex_lock(&asd->mutex); fmt->format = *ipu7_isys_get_ffmt(sd, state, fmt->pad, fmt->which); mutex_unlock(&asd->mutex); return 0; } int ipu7_isys_subdev_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_mbus_code_enum *code) { struct ipu7_isys_subdev *asd = to_ipu7_isys_subdev(sd); const u32 *supported_codes = asd->supported_codes; u32 index; for (index = 0; supported_codes[index]; index++) { if (index == code->index) { code->code = supported_codes[index]; return 0; } } return -EINVAL; } /* * Besides validating the link, figure out the external pad and the * ISYS FW ABI source. */ int ipu7_isys_subdev_link_validate(struct v4l2_subdev *sd, struct media_link *link, struct v4l2_subdev_format *source_fmt, struct v4l2_subdev_format *sink_fmt) { struct v4l2_subdev *source_sd = media_entity_to_v4l2_subdev(link->source->entity); struct ipu7_isys_stream *stream = to_ipu7_isys_pipeline(media_entity_pipeline(&sd->entity)); struct device *dev = &stream->isys->adev->auxdev.dev; if (!source_sd) return -ENODEV; if (strncmp(source_sd->name, IPU_ISYS_ENTITY_PREFIX, strlen(IPU_ISYS_ENTITY_PREFIX)) != 0) { /* * source_sd isn't ours --- sd must be the external * sub-device. */ stream->external = link->source; stream->stream_source = to_ipu7_isys_subdev(sd)->source; dev_dbg(dev, "%s: using source %d\n", sd->entity.name, stream->stream_source); } else if (source_sd->entity.num_pads == 1) { /* All internal sources have a single pad. */ stream->external = link->source; stream->stream_source = to_ipu7_isys_subdev(source_sd)->source; dev_dbg(dev, "%s: using source %d\n", sd->entity.name, stream->stream_source); } return v4l2_subdev_link_validate_default(sd, link, source_fmt, sink_fmt); } int ipu7_isys_subdev_init(struct ipu7_isys_subdev *asd, const struct v4l2_subdev_ops *ops, unsigned int nr_ctrls, unsigned int num_sink_pads, unsigned int num_source_pads) { unsigned int num_pads = num_sink_pads + num_source_pads; unsigned int i; int ret; mutex_init(&asd->mutex); v4l2_subdev_init(&asd->sd, ops); asd->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS; asd->sd.owner = THIS_MODULE; asd->sd.dev = &asd->isys->adev->auxdev.dev; asd->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE; asd->pad = devm_kcalloc(&asd->isys->adev->auxdev.dev, num_pads, sizeof(*asd->pad), GFP_KERNEL); asd->ffmt = devm_kcalloc(&asd->isys->adev->auxdev.dev, num_pads, sizeof(*asd->ffmt), GFP_KERNEL); if (!asd->pad || !asd->ffmt) return -ENOMEM; for (i = 0; i < num_sink_pads; i++) asd->pad[i].flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT; for (i = num_sink_pads; i < num_pads; i++) asd->pad[i].flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_pads_init(&asd->sd.entity, num_pads, asd->pad); if (ret) { pr_err("isys subdev init failed %d.\n", ret); goto out_mutex_destroy; } if (asd->ctrl_init) { ret = v4l2_ctrl_handler_init(&asd->ctrl_handler, nr_ctrls); if (ret) goto out_media_entity_cleanup; asd->ctrl_init(&asd->sd); if (asd->ctrl_handler.error) { ret = asd->ctrl_handler.error; goto out_v4l2_ctrl_handler_free; } asd->sd.ctrl_handler = &asd->ctrl_handler; } return 0; out_v4l2_ctrl_handler_free: v4l2_ctrl_handler_free(&asd->ctrl_handler); out_media_entity_cleanup: media_entity_cleanup(&asd->sd.entity); out_mutex_destroy: mutex_destroy(&asd->mutex); return ret; } void ipu7_isys_subdev_cleanup(struct ipu7_isys_subdev *asd) { media_entity_cleanup(&asd->sd.entity); v4l2_ctrl_handler_free(&asd->ctrl_handler); mutex_destroy(&asd->mutex); } ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/ipu7-isys-subdev.h000066400000000000000000000053351465530421300275720ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 - 2024 Intel Corporation */ #ifndef IPU7_ISYS_SUBDEV_H #define IPU7_ISYS_SUBDEV_H #include #include #include #include #include struct ipu7_isys; #define MIPI_CSI2_TYPE_NULL 0x10 #define MIPI_CSI2_TYPE_BLANKING 0x11 #define MIPI_CSI2_TYPE_EMBEDDED8 0x12 #define MIPI_CSI2_TYPE_YUV422_8 0x1e #define MIPI_CSI2_TYPE_YUV422_10 0x1f #define MIPI_CSI2_TYPE_RGB565 0x22 #define MIPI_CSI2_TYPE_RGB888 0x24 #define MIPI_CSI2_TYPE_RAW6 0x28 #define MIPI_CSI2_TYPE_RAW7 0x29 #define MIPI_CSI2_TYPE_RAW8 0x2a #define MIPI_CSI2_TYPE_RAW10 0x2b #define MIPI_CSI2_TYPE_RAW12 0x2c #define MIPI_CSI2_TYPE_RAW14 0x2d struct ipu7_isys_subdev { /* Serialise access to any other field in the struct */ struct mutex mutex; struct v4l2_subdev sd; struct ipu7_isys *isys; u32 const *supported_codes; struct media_pad *pad; struct v4l2_mbus_framefmt *ffmt; struct v4l2_rect crop; struct v4l2_ctrl_handler ctrl_handler; void (*ctrl_init)(struct v4l2_subdev *sd); int source; /* SSI stream source; -1 if unset */ #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC bool is_tpg; #endif }; #define to_ipu7_isys_subdev(__sd) \ container_of(__sd, struct ipu7_isys_subdev, sd) struct v4l2_mbus_framefmt *ipu7_isys_get_ffmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, unsigned int pad, unsigned int which); unsigned int ipu7_isys_mbus_code_to_bpp(u32 code); unsigned int ipu7_isys_mbus_code_to_data_type(u32 code); bool ipu7_isys_is_bayer_format(u32 code); u32 ipu7_isys_convert_bayer_order(u32 code, int x, int y); int ipu7_isys_subdev_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_format *fmt); int ipu7_isys_subdev_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_format *fmt); struct v4l2_rect *ipu7_isys_get_crop(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, unsigned int pad, unsigned int which); int ipu7_isys_subdev_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_mbus_code_enum *code); int ipu7_isys_subdev_link_validate(struct v4l2_subdev *sd, struct media_link *link, struct v4l2_subdev_format *source_fmt, struct v4l2_subdev_format *sink_fmt); int ipu7_isys_subdev_init(struct ipu7_isys_subdev *asd, const struct v4l2_subdev_ops *ops, unsigned int nr_ctrls, unsigned int num_sink_pads, unsigned int num_source_pads); void ipu7_isys_subdev_cleanup(struct ipu7_isys_subdev *asd); #endif /* IPU7_ISYS_SUBDEV_H */ ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/ipu7-isys-tpg.c000066400000000000000000000422411465530421300270640ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013 - 2024 Intel Corporation */ #include #include #include #include #include #include #include "ipu7.h" #include "ipu7-bus.h" #include "ipu7-buttress-regs.h" #include "ipu7-isys.h" #include "ipu7-isys-subdev.h" #include "ipu7-isys-tpg.h" #include "ipu7-isys-video.h" #include "ipu7-isys-csi2-regs.h" #include "ipu7-platform-regs.h" static const u32 tpg_supported_codes[] = { MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SRGGB12_1X12, 0, }; static const struct v4l2_subdev_video_ops tpg_sd_video_ops = { .s_stream = tpg_set_stream, }; static int ipu7_isys_tpg_s_ctrl(struct v4l2_ctrl *ctrl) { struct ipu7_isys_tpg *tpg = container_of(container_of(ctrl->handler, struct ipu7_isys_subdev, ctrl_handler), struct ipu7_isys_tpg, asd); switch (ctrl->id) { case V4L2_CID_HBLANK: writel(ctrl->val, tpg->base + MGC_MG_HBLANK); break; case V4L2_CID_VBLANK: writel(ctrl->val, tpg->base + MGC_MG_VBLANK); break; case V4L2_CID_TEST_PATTERN: writel(ctrl->val, tpg->base + MGC_MG_TPG_MODE); break; } return 0; } static const struct v4l2_ctrl_ops ipu7_isys_tpg_ctrl_ops = { .s_ctrl = ipu7_isys_tpg_s_ctrl, }; static s64 ipu7_isys_tpg_rate(struct ipu7_isys_tpg *tpg, unsigned int bpp) { return MGC_PPC * IPU_ISYS_FREQ / bpp; } static const char *const tpg_mode_items[] = { "Ramp", "Checkerboard", "Monochrome per frame", "Color palette", }; static struct v4l2_ctrl_config tpg_mode = { .ops = &ipu7_isys_tpg_ctrl_ops, .id = V4L2_CID_TEST_PATTERN, .name = "Test Pattern", .type = V4L2_CTRL_TYPE_MENU, .min = TPG_MODE_RAMP, .max = ARRAY_SIZE(tpg_mode_items) - 1, .def = TPG_MODE_COLOR_PALETTE, .menu_skip_mask = 0x2, .qmenu = tpg_mode_items, }; static void ipu7_isys_tpg_init_controls(struct v4l2_subdev *sd) { struct ipu7_isys_tpg *tpg = to_ipu7_isys_tpg(sd); int hblank; u64 default_pixel_rate; hblank = 1024; tpg->hblank = v4l2_ctrl_new_std(&tpg->asd.ctrl_handler, &ipu7_isys_tpg_ctrl_ops, V4L2_CID_HBLANK, 8, 65535, 1, hblank); tpg->vblank = v4l2_ctrl_new_std(&tpg->asd.ctrl_handler, &ipu7_isys_tpg_ctrl_ops, V4L2_CID_VBLANK, 8, 65535, 1, 1024); default_pixel_rate = ipu7_isys_tpg_rate(tpg, 8); tpg->pixel_rate = v4l2_ctrl_new_std(&tpg->asd.ctrl_handler, &ipu7_isys_tpg_ctrl_ops, V4L2_CID_PIXEL_RATE, default_pixel_rate, default_pixel_rate, 1, default_pixel_rate); if (tpg->pixel_rate) { tpg->pixel_rate->cur.val = default_pixel_rate; tpg->pixel_rate->flags |= V4L2_CTRL_FLAG_READ_ONLY; } v4l2_ctrl_new_custom(&tpg->asd.ctrl_handler, &tpg_mode, NULL); } static const struct v4l2_subdev_pad_ops tpg_sd_pad_ops = { .get_fmt = ipu7_isys_subdev_get_fmt, .set_fmt = ipu7_isys_subdev_set_fmt, .enum_mbus_code = ipu7_isys_subdev_enum_mbus_code, }; static int subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh, struct v4l2_event_subscription *sub) { switch (sub->type) { case V4L2_EVENT_FRAME_SYNC: return v4l2_event_subscribe(fh, sub, 10, NULL); case V4L2_EVENT_CTRL: return v4l2_ctrl_subscribe_event(fh, sub); default: return -EINVAL; } }; /* V4L2 subdev core operations */ static const struct v4l2_subdev_core_ops tpg_sd_core_ops = { .subscribe_event = subscribe_event, .unsubscribe_event = v4l2_event_subdev_unsubscribe, }; static const struct v4l2_subdev_ops tpg_sd_ops = { .core = &tpg_sd_core_ops, .video = &tpg_sd_video_ops, .pad = &tpg_sd_pad_ops, }; static struct media_entity_operations tpg_entity_ops = { .link_validate = v4l2_subdev_link_validate, }; void ipu7_isys_tpg_sof_event_by_stream(struct ipu7_isys_stream *stream) { struct ipu7_isys_tpg *tpg = ipu7_isys_subdev_to_tpg(stream->asd); struct video_device *vdev = tpg->asd.sd.devnode; struct v4l2_event ev = { .type = V4L2_EVENT_FRAME_SYNC, }; ev.u.frame_sync.frame_sequence = atomic_fetch_inc(&stream->sequence); v4l2_event_queue(vdev, &ev); dev_dbg(&stream->isys->adev->auxdev.dev, "sof_event::tpg-%i sequence: %i\n", tpg->index, ev.u.frame_sync.frame_sequence); } void ipu7_isys_tpg_eof_event_by_stream(struct ipu7_isys_stream *stream) { struct ipu7_isys_tpg *tpg = ipu7_isys_subdev_to_tpg(stream->asd); u32 frame_sequence = atomic_read(&stream->sequence); dev_dbg(&stream->isys->adev->auxdev.dev, "eof_event::tpg-%i sequence: %i\n", tpg->index, frame_sequence); } #define DEFAULT_VC_ID 0 static bool is_metadata_enabled(const struct ipu7_isys_tpg *tpg) { return false; } static void ipu7_mipigen_regdump(const struct ipu7_isys_tpg *tpg, void __iomem *mg_base) { struct device *dev = &tpg->isys->adev->auxdev.dev; dev_dbg(dev, "---------MGC REG DUMP START----------"); dev_dbg(dev, "MGC RX_TYPE_REG 0x%x = 0x%x", MGC_MG_CSI_ADAPT_LAYER_TYPE, readl(mg_base + MGC_MG_CSI_ADAPT_LAYER_TYPE)); dev_dbg(dev, "MGC MG_MODE_REG 0x%x = 0x%x", MGC_MG_MODE, readl(mg_base + MGC_MG_MODE)); dev_dbg(dev, "MGC MIPI_VC_REG 0x%x = 0x%x", MGC_MG_MIPI_VC, readl(mg_base + MGC_MG_MIPI_VC)); dev_dbg(dev, "MGC MIPI_DTYPES_REG 0x%x = 0x%x", MGC_MG_MIPI_DTYPES, readl(mg_base + MGC_MG_MIPI_DTYPES)); dev_dbg(dev, "MGC MULTI_DTYPES_REG 0x%x = 0x%x", MGC_MG_MULTI_DTYPES_MODE, readl(mg_base + MGC_MG_MULTI_DTYPES_MODE)); dev_dbg(dev, "MGC NOF_FRAMES_REG 0x%x = 0x%x", MGC_MG_NOF_FRAMES, readl(mg_base + MGC_MG_NOF_FRAMES)); dev_dbg(dev, "MGC FRAME_DIM_REG 0x%x = 0x%x", MGC_MG_FRAME_DIM, readl(mg_base + MGC_MG_FRAME_DIM)); dev_dbg(dev, "MGC HBLANK_REG 0x%x = 0x%x", MGC_MG_HBLANK, readl(mg_base + MGC_MG_HBLANK)); dev_dbg(dev, "MGC VBLANK_REG 0x%x = 0x%x", MGC_MG_VBLANK, readl(mg_base + MGC_MG_VBLANK)); dev_dbg(dev, "MGC TPG_MODE_REG 0x%x = 0x%x", MGC_MG_TPG_MODE, readl(mg_base + MGC_MG_TPG_MODE)); dev_dbg(dev, "MGC R0=0x%x G0=0x%x B0=0x%x", readl(mg_base + MGC_MG_TPG_R0), readl(mg_base + MGC_MG_TPG_G0), readl(mg_base + MGC_MG_TPG_B0)); dev_dbg(dev, "MGC R1=0x%x G1=0x%x B1=0x%x", readl(mg_base + MGC_MG_TPG_R1), readl(mg_base + MGC_MG_TPG_G1), readl(mg_base + MGC_MG_TPG_B1)); dev_dbg(dev, "MGC TPG_MASKS_REG 0x%x = 0x%x", MGC_MG_TPG_MASKS, readl(mg_base + MGC_MG_TPG_MASKS)); dev_dbg(dev, "MGC TPG_XY_MASK_REG 0x%x = 0x%x", MGC_MG_TPG_XY_MASK, readl(mg_base + MGC_MG_TPG_XY_MASK)); dev_dbg(dev, "MGC TPG_TILE_DIM_REG 0x%x = 0x%x", MGC_MG_TPG_TILE_DIM, readl(mg_base + MGC_MG_TPG_TILE_DIM)); dev_dbg(dev, "MGC DTO_SPEED_CTRL_EN_REG 0x%x = 0x%x", MGC_MG_DTO_SPEED_CTRL_EN, readl(mg_base + MGC_MG_DTO_SPEED_CTRL_EN)); dev_dbg(dev, "MGC DTO_SPEED_CTRL_INCR_VAL_REG 0x%x = 0x%x", MGC_MG_DTO_SPEED_CTRL_INCR_VAL, readl(mg_base + MGC_MG_DTO_SPEED_CTRL_INCR_VAL)); dev_dbg(dev, "MGC MG_FRAME_NUM_STTS 0x%x = 0x%x", MGC_MG_FRAME_NUM_STTS, readl(mg_base + MGC_MG_FRAME_NUM_STTS)); dev_dbg(dev, "---------MGC REG DUMP END----------"); } #define TPG_STOP_TIMEOUT 50000 static int tpg_stop_stream(const struct ipu7_isys_tpg *tpg) { struct device *dev = &tpg->isys->adev->auxdev.dev; int ret; unsigned int port; u32 status; void __iomem *reg; void __iomem *mgc_base = tpg->isys->pdata->base + IS_IO_MGC_BASE; void __iomem *mg_base = tpg->base; port = 1 << tpg->index; dev_dbg(dev, "MG%d generated %u frames", tpg->index, readl(mgc_base + MGC_MG_FRAME_NUM_STTS)); writel(port, mgc_base + MGC_ASYNC_STOP); dev_dbg(dev, "wait for MG%d stop", tpg->index); reg = mg_base + MGC_MG_STOPPED_STTS; ret = readl_poll_timeout(reg, status, status & 0x1, 200, TPG_STOP_TIMEOUT); if (ret < 0) { dev_err(dev, "mgc stop timeout"); return ret; } dev_dbg(dev, "MG%d STOPPED", tpg->index); return 0; } static u8 mapped_lane_config[] = { MGC_MAPPED_4_LANES, MGC_MAPPED_2_LANES, MGC_MAPPED_2_LANES, MGC_MAPPED_4_LANES, }; #define IS_IO_CLK (IPU7_IS_FREQ_CTL_DEFAULT_RATIO * 100 / 6) #define TPG_FRAME_RATE 30 #define TPG_BLANK_RATIO (4 / 3) static void tpg_get_timing(const struct ipu7_isys_tpg *tpg, u32 *dto, u32 *hblank_cycles, u32 *vblank_cycles) { u32 width, height; u32 code; u32 bpp; u32 bits_per_line; u64 line_time_ns, frame_time_us, cycles, ns_per_cycle, rate; u64 vblank_us, hblank_us; u32 ref_clk; struct device *dev = &tpg->isys->adev->auxdev.dev; u32 dto_incr_val = 0x100; u8 mgif_width = 64; width = tpg->asd.ffmt[TPG_PAD_SOURCE].width; height = tpg->asd.ffmt[TPG_PAD_SOURCE].height; code = tpg->asd.ffmt[TPG_PAD_SOURCE].code; bpp = ipu7_isys_mbus_code_to_bpp(code); dev_dbg(dev, "MG%d code = 0x%x bpp = %u\n", tpg->index, code, bpp); bits_per_line = width * bpp * TPG_BLANK_RATIO; /* MGIF width 32bits or 64bits */ if (mapped_lane_config[tpg->index] == MGC_MAPPED_2_LANES) mgif_width = 64; cycles = div_u64(bits_per_line, mgif_width); dev_dbg(dev, "MG%d bits_per_line = %u cycles = %llu\n", tpg->index, bits_per_line, cycles); do { dev_dbg(dev, "MG%d try dto_incr_val 0x%x\n", tpg->index, dto_incr_val); rate = div_u64(1 << 16, dto_incr_val); ns_per_cycle = div_u64(rate * 1000, IS_IO_CLK); dev_dbg(dev, "MG%d ns_per_cycles = %llu\n", tpg->index, ns_per_cycle); line_time_ns = cycles * ns_per_cycle; frame_time_us = line_time_ns * height / 1000; dev_dbg(dev, "MG%d line_time_ns = %llu frame_time_us = %llu\n", tpg->index, line_time_ns, frame_time_us); if (frame_time_us * TPG_FRAME_RATE < USEC_PER_SEC) break; /* dto incr val step 0x100 */ dto_incr_val += 0x100; } while (dto_incr_val < (1 << 16)); if (dto_incr_val >= (1 << 16)) { dev_warn(dev, "No DTO_INCR_VAL found\n"); hblank_us = 10; /* 10us */ vblank_us = 10000; /* 10ms */ dto_incr_val = 0x1000; } else { hblank_us = line_time_ns * (TPG_BLANK_RATIO - 1) / 1000; vblank_us = div_u64(1000000, TPG_FRAME_RATE) - frame_time_us; } dev_dbg(dev, "hblank_us = %llu, vblank_us = %llu dto_incr_val = %u\n", hblank_us, vblank_us, dto_incr_val); ref_clk = ipu7_buttress_get_ref_clk(tpg->isys->adev->isp); *dto = dto_incr_val; *hblank_cycles = hblank_us * ref_clk / 10; *vblank_cycles = vblank_us * ref_clk / 10; dev_dbg(dev, "hblank_cycles = %u, vblank_cycles = %u\n", *hblank_cycles, *vblank_cycles); } static int tpg_start_stream(const struct ipu7_isys_tpg *tpg) { u32 port_map; u32 csi_port; u32 code, bpp; u32 width, height; u32 dto, hblank, vblank; struct device *dev = &tpg->isys->adev->auxdev.dev; void __iomem *mgc_base = tpg->isys->pdata->base + IS_IO_MGC_BASE; void __iomem *mg_base = tpg->base; width = tpg->asd.ffmt[TPG_PAD_SOURCE].width; height = tpg->asd.ffmt[TPG_PAD_SOURCE].height; code = tpg->asd.ffmt[TPG_PAD_SOURCE].code; dev_dbg(dev, "MG%d code: 0x%x resolution: %ux%u\n", tpg->index, code, width, height); bpp = ipu7_isys_mbus_code_to_bpp(code); csi_port = tpg->index; if (csi_port >= ARRAY_SIZE(mapped_lane_config)) dev_err(dev, "invalid tpg index %u\n", tpg->index); dev_dbg(dev, "INSYS MG%d was mapped to CSI%d\n", DEFAULT_VC_ID, csi_port); /* config port map * TODO: add VC support and TPG with multiple * source pads. Currently, for simplicity, only map 1 mg to 1 csi port */ port_map = 1 << tpg->index; writel(port_map, mgc_base + MGC_CSI_PORT_MAP(csi_port)); /* configure adapt layer type */ writel(mapped_lane_config[csi_port], mg_base + MGC_MG_CSI_ADAPT_LAYER_TYPE); /* configure MGC mode * 0 - Disable MGC * 1 - Enable PRBS * 2 - Enable TPG * 3 - Reserved [Write phase: SW/FW debug] */ writel(2, mg_base + MGC_MG_MODE); /* config mg init counter */ writel(0, mg_base + MGC_MG_INIT_COUNTER); /* * configure virtual channel * TODO: VC support if need * currently each MGC just uses 1 virtual channel */ writel(DEFAULT_VC_ID, mg_base + MGC_MG_MIPI_VC); /* * configure data type and multi dtypes mode * TODO: it needs to add the metedata flow. */ if (is_metadata_enabled(tpg)) { writel(MGC_DTYPE_RAW(bpp) << 4 | MGC_DTYPE_RAW(bpp), mg_base + MGC_MG_MIPI_DTYPES); writel(2, mg_base + MGC_MG_MULTI_DTYPES_MODE); } else { writel(MGC_DTYPE_RAW(bpp) << 4 | MGC_DTYPE_RAW(bpp), mg_base + MGC_MG_MIPI_DTYPES); writel(0, mg_base + MGC_MG_MULTI_DTYPES_MODE); } /* * configure frame information */ writel(0, mg_base + MGC_MG_NOF_FRAMES); writel(width | height << 16, mg_base + MGC_MG_FRAME_DIM); tpg_get_timing(tpg, &dto, &hblank, &vblank); writel(hblank, mg_base + MGC_MG_HBLANK); writel(vblank, mg_base + MGC_MG_VBLANK); /* * configure tpg mode, colors, mask, tile dimension * Mode was set by user configuration * 0 - Ramp mode * 1 - Checkerboard * 2 - Monochrome per frame * 3 - Color palette */ writel(TPG_MODE_COLOR_PALETTE, mg_base + MGC_MG_TPG_MODE); /* red and green for checkerboard, n/a for other modes */ writel(58, mg_base + MGC_MG_TPG_R0); writel(122, mg_base + MGC_MG_TPG_G0); writel(46, mg_base + MGC_MG_TPG_B0); writel(123, mg_base + MGC_MG_TPG_R1); writel(85, mg_base + MGC_MG_TPG_G1); writel(67, mg_base + MGC_MG_TPG_B1); writel(0x0, mg_base + MGC_MG_TPG_FACTORS); /* hor_mask [15:0] ver_mask [31:16] */ writel(0xffffffff, mg_base + MGC_MG_TPG_MASKS); /* xy_mask [11:0] */ writel(0xfff, mg_base + MGC_MG_TPG_XY_MASK); writel(((MGC_TPG_TILE_WIDTH << 16) | MGC_TPG_TILE_HEIGHT), mg_base + MGC_MG_TPG_TILE_DIM); writel(dto, mg_base + MGC_MG_DTO_SPEED_CTRL_INCR_VAL); writel(1, mg_base + MGC_MG_DTO_SPEED_CTRL_EN); /* disable err_injection */ writel(0, mg_base + MGC_MG_ERR_INJECT); writel(0, mg_base + MGC_MG_ERR_LOCATION); ipu7_mipigen_regdump(tpg, mg_base); dev_dbg(dev, "starting MG%d streaming...\n", csi_port); /* kick and start */ writel(port_map, mgc_base + MGC_KICK); return 0; } static void ipu7_isys_ungate_mgc(struct ipu7_isys_tpg *tpg, int enable) { struct ipu7_isys_csi2 *csi2; u32 offset; struct ipu7_isys *isys = tpg->isys; csi2 = &isys->csi2[tpg->index]; offset = IS_IO_GPREGS_BASE; /* MGC is in use by SW or not */ if (enable) writel(1, csi2->base + offset + MGC_CLK_GATE); else writel(0, csi2->base + offset + MGC_CLK_GATE); } static void ipu7_isys_mgc_csi2_s_stream(struct ipu7_isys_tpg *tpg, int enable) { struct device *dev = &tpg->isys->adev->auxdev.dev; struct ipu7_isys *isys = tpg->isys; struct ipu7_isys_csi2 *csi2; u32 port, offset, val; void __iomem *isys_base = isys->pdata->base; port = tpg->index; csi2 = &isys->csi2[port]; offset = IS_IO_GPREGS_BASE; val = readl(isys_base + offset + CSI_PORT_CLK_GATE); dev_dbg(dev, "current CSI port %u clk gate 0x%x\n", port, val); if (!enable) { writel(~(1 << port) & val, isys_base + offset + CSI_PORT_CLK_GATE); return; } /* set csi port is using by SW */ writel(1 << port | val, isys_base + offset + CSI_PORT_CLK_GATE); /* input is coming from MGC */ offset = IS_IO_CSI2_ADPL_PORT_BASE(port); writel(CSI_MIPIGEN_INPUT, csi2->base + offset + CSI2_ADPL_INPUT_MODE); } /* TODO: add the processing of vc */ int tpg_set_stream(struct v4l2_subdev *sd, int enable) { struct ipu7_isys_tpg *tpg = to_ipu7_isys_tpg(sd); struct ipu7_isys_stream *stream = to_ipu7_isys_pipeline(media_entity_pipeline(&sd->entity)); struct device *dev = &tpg->isys->adev->auxdev.dev; int ret; if (tpg->index >= IPU7_ISYS_CSI_PORT_NUM) { dev_err(dev, "invalid MGC index %d\n", tpg->index); return -EINVAL; } if (!enable) { /* Stop MGC */ stream->asd->is_tpg = false; stream->asd = NULL; ipu7_isys_mgc_csi2_s_stream(tpg, enable); ret = tpg_stop_stream(tpg); ipu7_isys_ungate_mgc(tpg, enable); return ret; } stream->asd = &tpg->asd; stream->asd->is_tpg = true; /* ungate the MGC clock to program */ ipu7_isys_ungate_mgc(tpg, enable); /* Start MGC */ ret = tpg_start_stream(tpg); v4l2_ctrl_handler_setup(&tpg->asd.ctrl_handler); ipu7_isys_mgc_csi2_s_stream(tpg, enable); return ret; } void ipu7_isys_tpg_cleanup(struct ipu7_isys_tpg *tpg) { v4l2_device_unregister_subdev(&tpg->asd.sd); ipu7_isys_subdev_cleanup(&tpg->asd); } int ipu7_isys_tpg_init(struct ipu7_isys_tpg *tpg, struct ipu7_isys *isys, void __iomem *base, void __iomem *sel, unsigned int index) { struct device *dev = &isys->adev->auxdev.dev; int ret; tpg->isys = isys; tpg->base = base; tpg->sel = sel; tpg->index = index; tpg->asd.sd.entity.ops = &tpg_entity_ops; tpg->asd.ctrl_init = ipu7_isys_tpg_init_controls; tpg->asd.isys = isys; ret = ipu7_isys_subdev_init(&tpg->asd, &tpg_sd_ops, 5, NR_OF_TPG_SINK_PADS, NR_OF_TPG_SOURCE_PADS); if (ret) return ret; tpg->asd.sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; tpg->asd.pad[TPG_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE; tpg->asd.source = IPU_INSYS_MIPI_PORT_0 + index; tpg->asd.supported_codes = tpg_supported_codes; snprintf(tpg->asd.sd.name, sizeof(tpg->asd.sd.name), IPU_ISYS_ENTITY_PREFIX " TPG %u", index); v4l2_set_subdevdata(&tpg->asd.sd, &tpg->asd); #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0) ret = v4l2_subdev_init_finalize(&tpg->asd.sd); if (ret) { dev_err(dev, "failed to finalize subdev (%d)\n", ret); goto fail; } #endif ret = v4l2_device_register_subdev(&isys->v4l2_dev, &tpg->asd.sd); if (ret) { dev_info(dev, "can't register v4l2 subdev\n"); goto fail; } return 0; fail: ipu7_isys_tpg_cleanup(tpg); return ret; } ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/ipu7-isys-tpg.h000066400000000000000000000032341465530421300270700ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 - 2024 Intel Corporation */ #ifndef IPU7_ISYS_TPG_H #define IPU7_ISYS_TPG_H #include #include #include #include "ipu7-isys-subdev.h" #include "ipu7-isys-video.h" #include "ipu7-isys-queue.h" struct ipu7_isys_tpg_pdata; struct ipu7_isys; #define TPG_PAD_SOURCE 0 #define NR_OF_TPG_PADS 1 #define NR_OF_TPG_SOURCE_PADS 1 #define NR_OF_TPG_SINK_PADS 0 #define NR_OF_TPG_STREAMS 1 enum isys_tpg_mode { TPG_MODE_RAMP = 0, TPG_MODE_CHECKERBOARD = 1, TPG_MODE_MONO = 2, TPG_MODE_COLOR_PALETTE = 3, }; /* * struct ipu7_isys_tpg * * @nlanes: number of lanes in the receiver */ struct ipu7_isys_tpg { struct ipu7_isys_subdev asd; struct ipu7_isys_tpg_pdata *pdata; struct ipu7_isys *isys; struct ipu7_isys_video *av; /* MG base not MGC */ void __iomem *base; void __iomem *sel; unsigned int index; struct v4l2_ctrl *hblank; struct v4l2_ctrl *vblank; struct v4l2_ctrl *pixel_rate; }; #define ipu7_isys_subdev_to_tpg(__sd) \ container_of(__sd, struct ipu7_isys_tpg, asd) #define to_ipu7_isys_tpg(sd) \ container_of(to_ipu7_isys_subdev(sd), \ struct ipu7_isys_tpg, asd) void ipu7_isys_tpg_sof_event_by_stream(struct ipu7_isys_stream *stream); void ipu7_isys_tpg_eof_event_by_stream(struct ipu7_isys_stream *stream); int ipu7_isys_tpg_init(struct ipu7_isys_tpg *tpg, struct ipu7_isys *isys, void __iomem *base, void __iomem *sel, unsigned int index); void ipu7_isys_tpg_cleanup(struct ipu7_isys_tpg *tpg); int tpg_set_stream(struct v4l2_subdev *sd, int enable); #endif /* IPU7_ISYS_TPG_H */ ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/ipu7-isys-video.c000066400000000000000000000715261465530421300274100ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013 - 2024 Intel Corporation */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "abi/ipu7_fw_isys_abi.h" #include "ipu7.h" #include "ipu7-bus.h" #include "ipu7-buttress-regs.h" #include "ipu7-fw-isys.h" #include "ipu7-isys.h" #include "ipu7-isys-video.h" #include "ipu7-platform-regs.h" const struct ipu7_isys_pixelformat ipu7_isys_pfmts[] = { {V4L2_PIX_FMT_SBGGR12, 16, 12, MEDIA_BUS_FMT_SBGGR12_1X12, IPU_INSYS_FRAME_FORMAT_RAW16}, {V4L2_PIX_FMT_SGBRG12, 16, 12, MEDIA_BUS_FMT_SGBRG12_1X12, IPU_INSYS_FRAME_FORMAT_RAW16}, {V4L2_PIX_FMT_SGRBG12, 16, 12, MEDIA_BUS_FMT_SGRBG12_1X12, IPU_INSYS_FRAME_FORMAT_RAW16}, {V4L2_PIX_FMT_SRGGB12, 16, 12, MEDIA_BUS_FMT_SRGGB12_1X12, IPU_INSYS_FRAME_FORMAT_RAW16}, {V4L2_PIX_FMT_SBGGR10, 16, 10, MEDIA_BUS_FMT_SBGGR10_1X10, IPU_INSYS_FRAME_FORMAT_RAW16}, {V4L2_PIX_FMT_SGBRG10, 16, 10, MEDIA_BUS_FMT_SGBRG10_1X10, IPU_INSYS_FRAME_FORMAT_RAW16}, {V4L2_PIX_FMT_SGRBG10, 16, 10, MEDIA_BUS_FMT_SGRBG10_1X10, IPU_INSYS_FRAME_FORMAT_RAW16}, {V4L2_PIX_FMT_SRGGB10, 16, 10, MEDIA_BUS_FMT_SRGGB10_1X10, IPU_INSYS_FRAME_FORMAT_RAW16}, {V4L2_PIX_FMT_SBGGR8, 8, 8, MEDIA_BUS_FMT_SBGGR8_1X8, IPU_INSYS_FRAME_FORMAT_RAW8}, {V4L2_PIX_FMT_SGBRG8, 8, 8, MEDIA_BUS_FMT_SGBRG8_1X8, IPU_INSYS_FRAME_FORMAT_RAW8}, {V4L2_PIX_FMT_SGRBG8, 8, 8, MEDIA_BUS_FMT_SGRBG8_1X8, IPU_INSYS_FRAME_FORMAT_RAW8}, {V4L2_PIX_FMT_SRGGB8, 8, 8, MEDIA_BUS_FMT_SRGGB8_1X8, IPU_INSYS_FRAME_FORMAT_RAW8}, {V4L2_PIX_FMT_SBGGR12P, 12, 12, MEDIA_BUS_FMT_SBGGR12_1X12, IPU_INSYS_FRAME_FORMAT_RAW12}, {V4L2_PIX_FMT_SGBRG12P, 12, 12, MEDIA_BUS_FMT_SGBRG12_1X12, IPU_INSYS_FRAME_FORMAT_RAW12}, {V4L2_PIX_FMT_SGRBG12P, 12, 12, MEDIA_BUS_FMT_SGRBG12_1X12, IPU_INSYS_FRAME_FORMAT_RAW12}, {V4L2_PIX_FMT_SRGGB12P, 12, 12, MEDIA_BUS_FMT_SRGGB12_1X12, IPU_INSYS_FRAME_FORMAT_RAW12}, {V4L2_PIX_FMT_SBGGR10P, 10, 10, MEDIA_BUS_FMT_SBGGR10_1X10, IPU_INSYS_FRAME_FORMAT_RAW10}, {V4L2_PIX_FMT_SGBRG10P, 10, 10, MEDIA_BUS_FMT_SGBRG10_1X10, IPU_INSYS_FRAME_FORMAT_RAW10}, {V4L2_PIX_FMT_SGRBG10P, 10, 10, MEDIA_BUS_FMT_SGRBG10_1X10, IPU_INSYS_FRAME_FORMAT_RAW10}, {V4L2_PIX_FMT_SRGGB10P, 10, 10, MEDIA_BUS_FMT_SRGGB10_1X10, IPU_INSYS_FRAME_FORMAT_RAW10}, {V4L2_PIX_FMT_UYVY, 16, 16, MEDIA_BUS_FMT_UYVY8_1X16, IPU_INSYS_FRAME_FORMAT_UYVY}, {V4L2_PIX_FMT_YUYV, 16, 16, MEDIA_BUS_FMT_YUYV8_1X16, IPU_INSYS_FRAME_FORMAT_YUYV}, {V4L2_PIX_FMT_RGB565, 16, 16, MEDIA_BUS_FMT_RGB565_1X16, IPU_INSYS_FRAME_FORMAT_RGB565}, {V4L2_PIX_FMT_BGR24, 24, 24, MEDIA_BUS_FMT_RGB888_1X24, IPU_INSYS_FRAME_FORMAT_RGBA888}, }; static int video_open(struct file *file) { struct ipu7_isys_video *av = video_drvdata(file); struct ipu7_isys *isys = av->isys; struct ipu7_bus_device *adev = isys->adev; mutex_lock(&isys->mutex); if (isys->need_reset) { mutex_unlock(&isys->mutex); dev_warn(&adev->auxdev.dev, "isys power cycle required\n"); return -EIO; } mutex_unlock(&isys->mutex); return v4l2_fh_open(file); } const struct ipu7_isys_pixelformat *ipu7_isys_get_isys_format(u32 pixelformat) { unsigned int i; for (i = 0; i < ARRAY_SIZE(ipu7_isys_pfmts); i++) { const struct ipu7_isys_pixelformat *pfmt = &ipu7_isys_pfmts[i]; if (pfmt->pixelformat == pixelformat) return pfmt; } return &ipu7_isys_pfmts[0]; } int ipu7_isys_vidioc_querycap(struct file *file, void *fh, struct v4l2_capability *cap) { struct ipu7_isys_video *av = video_drvdata(file); strscpy(cap->driver, IPU_ISYS_NAME, sizeof(cap->driver)); strscpy(cap->card, av->isys->media_dev.model, sizeof(cap->card)); return 0; } int ipu7_isys_vidioc_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f) { unsigned int i, num_found; for (i = 0, num_found = 0; i < ARRAY_SIZE(ipu7_isys_pfmts); i++) { if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) continue; if (f->mbus_code && f->mbus_code != ipu7_isys_pfmts[i].code) continue; if (num_found < f->index) { num_found++; continue; } f->flags = 0; f->pixelformat = ipu7_isys_pfmts[i].pixelformat; return 0; } return -EINVAL; } static int ipu7_isys_vidioc_enum_framesizes(struct file *file, void *fh, struct v4l2_frmsizeenum *fsize) { unsigned int i; if (fsize->index > 0) return -EINVAL; for (i = 0; i < ARRAY_SIZE(ipu7_isys_pfmts); i++) { if (fsize->pixel_format != ipu7_isys_pfmts[i].pixelformat) continue; fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; fsize->stepwise.min_width = IPU_ISYS_MIN_WIDTH; fsize->stepwise.max_width = IPU_ISYS_MAX_WIDTH; fsize->stepwise.min_height = IPU_ISYS_MIN_HEIGHT; fsize->stepwise.max_height = IPU_ISYS_MAX_HEIGHT; fsize->stepwise.step_width = 2; fsize->stepwise.step_height = 2; return 0; } return -EINVAL; } static int ipu7_isys_vidioc_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f) { struct ipu7_isys_video *av = video_drvdata(file); f->fmt.pix = av->pix_fmt; return 0; } static void ipu7_isys_try_fmt_cap(struct ipu7_isys_video *av, u32 type, u32 *format, u32 *width, u32 *height, u32 *bytesperline, u32 *sizeimage) { const struct ipu7_isys_pixelformat *pfmt = ipu7_isys_get_isys_format(*format); *format = pfmt->pixelformat; *width = clamp(*width, IPU_ISYS_MIN_WIDTH, IPU_ISYS_MAX_WIDTH); *height = clamp(*height, IPU_ISYS_MIN_HEIGHT, IPU_ISYS_MAX_HEIGHT); if (pfmt->bpp != pfmt->bpp_packed) *bytesperline = *width * DIV_ROUND_UP(pfmt->bpp, BITS_PER_BYTE); else *bytesperline = DIV_ROUND_UP(*width * pfmt->bpp, BITS_PER_BYTE); *bytesperline = ALIGN(*bytesperline, av->isys->line_align); /* * (height + 1) * bytesperline due to a hardware issue: the DMA unit * is a power of two, and a line should be transferred as few units * as possible. The result is that up to line length more data than * the image size may be transferred to memory after the image. * Another limitation is the GDA allocation unit size. For low * resolution it gives a bigger number. Use larger one to avoid * memory corruption. */ *sizeimage = *bytesperline * *height + max(*bytesperline, av->isys->pdata->ipdata->isys_dma_overshoot); } static void __ipu_isys_vidioc_try_fmt_vid_cap(struct ipu7_isys_video *av, struct v4l2_format *f) { ipu7_isys_try_fmt_cap(av, f->type, &f->fmt.pix.pixelformat, &f->fmt.pix.width, &f->fmt.pix.height, &f->fmt.pix.bytesperline, &f->fmt.pix.sizeimage); f->fmt.pix.field = V4L2_FIELD_NONE; f->fmt.pix.colorspace = V4L2_COLORSPACE_RAW; f->fmt.pix.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; f->fmt.pix.quantization = V4L2_QUANTIZATION_DEFAULT; f->fmt.pix.xfer_func = V4L2_XFER_FUNC_DEFAULT; } static int ipu7_isys_vidioc_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f) { struct ipu7_isys_video *av = video_drvdata(file); if (vb2_is_busy(&av->aq.vbq)) return -EBUSY; __ipu_isys_vidioc_try_fmt_vid_cap(av, f); return 0; } static int ipu7_isys_vidioc_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f) { struct ipu7_isys_video *av = video_drvdata(file); ipu7_isys_vidioc_try_fmt_vid_cap(file, fh, f); av->pix_fmt = f->fmt.pix; return 0; } static int ipu7_isys_vidioc_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *p) { struct ipu7_isys_video *av = video_drvdata(file); int ret; av->aq.vbq.is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(p->type); av->aq.vbq.is_output = V4L2_TYPE_IS_OUTPUT(p->type); ret = vb2_queue_change_type(&av->aq.vbq, p->type); if (ret) return ret; return vb2_ioctl_reqbufs(file, priv, p); } static int ipu7_isys_vidioc_create_bufs(struct file *file, void *priv, struct v4l2_create_buffers *p) { struct ipu7_isys_video *av = video_drvdata(file); int ret; av->aq.vbq.is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(p->format.type); av->aq.vbq.is_output = V4L2_TYPE_IS_OUTPUT(p->format.type); ret = vb2_queue_change_type(&av->aq.vbq, p->format.type); if (ret) return ret; return vb2_ioctl_create_bufs(file, priv, p); } /* * Return true if an entity directly connected to an Iunit entity is * an image source for the ISP. This can be any external directly * connected entity or any of the test pattern generators in the * Iunit. */ static bool is_external(struct ipu7_isys_video *av, struct media_entity *entity) { struct v4l2_subdev *sd; #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC unsigned int i; #endif /* All video nodes are ours. */ if (!is_media_entity_v4l2_subdev(entity)) return false; sd = media_entity_to_v4l2_subdev(entity); if (strncmp(sd->name, IPU_ISYS_ENTITY_PREFIX, strlen(IPU_ISYS_ENTITY_PREFIX)) != 0) return true; #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC for (i = 0; i < av->isys->pdata->ipdata->tpg.ntpgs && av->isys->tpg[i].isys; i++) if (entity == &av->isys->tpg[i].asd.sd.entity) return true; #endif return false; } static int link_validate(struct media_link *link) { struct ipu7_isys_video *av = container_of(link->sink, struct ipu7_isys_video, pad); struct ipu7_isys_stream *stream = av->stream; struct v4l2_mbus_framefmt *ffmt; struct ipu7_isys_subdev *asd; struct v4l2_subdev *sd; u32 code; if (!stream || !link->source->entity) return -EINVAL; sd = media_entity_to_v4l2_subdev(link->source->entity); asd = to_ipu7_isys_subdev(sd); ffmt = &asd->ffmt[link->source->index]; code = ipu7_isys_get_isys_format(av->pix_fmt.pixelformat)->code; if (ffmt->code != code || ffmt->width != av->pix_fmt.width || ffmt->height != av->pix_fmt.height) { dev_err(&asd->isys->adev->auxdev.dev, "vdev link validation failed. %dx%d,%x != %dx%d,%x\n", ffmt->width, ffmt->height, ffmt->code, av->pix_fmt.width, av->pix_fmt.height, code); return -EINVAL; } if (is_external(av, link->source->entity)) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0) stream->external = media_entity_remote_pad(av->vdev.entity.pads); #else stream->external = media_pad_remote_pad_first(av->vdev.entity.pads); #endif stream->stream_source = to_ipu7_isys_subdev(sd)->source; } return 0; } static void get_stream_opened(struct ipu7_isys_video *av) { unsigned long flags; spin_lock_irqsave(&av->isys->streams_lock, flags); av->isys->stream_opened++; spin_unlock_irqrestore(&av->isys->streams_lock, flags); } static void put_stream_opened(struct ipu7_isys_video *av) { unsigned long flags; spin_lock_irqsave(&av->isys->streams_lock, flags); av->isys->stream_opened--; spin_unlock_irqrestore(&av->isys->streams_lock, flags); } static int get_external_facing_format(struct ipu7_isys_stream *stream, struct v4l2_subdev_format *format) { struct device *dev = &stream->isys->adev->auxdev.dev; struct v4l2_subdev *sd; struct media_pad *external_facing; if (!stream->external->entity) { WARN_ON(1); return -ENODEV; } sd = media_entity_to_v4l2_subdev(stream->external->entity); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0) external_facing = (strncmp(sd->name, IPU_ISYS_ENTITY_PREFIX, strlen(IPU_ISYS_ENTITY_PREFIX)) == 0) ? stream->external : media_entity_remote_pad(stream->external); #else external_facing = (strncmp(sd->name, IPU_ISYS_ENTITY_PREFIX, strlen(IPU_ISYS_ENTITY_PREFIX)) == 0) ? stream->external : media_pad_remote_pad_first(stream->external); #endif if (WARN_ON(!external_facing)) { dev_warn(dev, "no external facing pad --- driver bug?\n"); return -EINVAL; } format->which = V4L2_SUBDEV_FORMAT_ACTIVE; format->pad = 0; sd = media_entity_to_v4l2_subdev(external_facing->entity); return v4l2_subdev_call(sd, pad, get_fmt, NULL, format); } void ipu7_isys_prepare_fw_cfg_default(struct ipu7_isys_video *av, struct ipu7_insys_stream_cfg *cfg) { struct ipu7_isys_stream *stream = av->stream; const struct ipu7_isys_pixelformat *pfmt; struct ipu7_isys_queue *aq = &av->aq; struct ipu7_insys_output_pin *pin_info; int pin = cfg->nof_output_pins++; aq->fw_output = pin; stream->output_pins[pin].pin_ready = ipu7_isys_queue_buf_ready; stream->output_pins[pin].aq = aq; pin_info = &cfg->output_pins[pin]; /* output pin msg link */ pin_info->link.buffer_lines = 0; pin_info->link.foreign_key = IPU_MSG_LINK_FOREIGN_KEY_NONE; pin_info->link.granularity_pointer_update = 0; pin_info->link.msg_link_streaming_mode = IA_GOFO_MSG_LINK_STREAMING_MODE_SOFF; pin_info->link.pbk_id = IPU_MSG_LINK_PBK_ID_DONT_CARE; pin_info->link.pbk_slot_id = IPU_MSG_LINK_PBK_SLOT_ID_DONT_CARE; pin_info->link.dest = IPU_INSYS_OUTPUT_LINK_DEST_MEM; pin_info->link.use_sw_managed = 1; /* output pin crop */ pin_info->crop.line_top = 0; pin_info->crop.line_bottom = 0; /* output de-compression */ pin_info->dpcm.enable = 0; /* frame fomat type */ pfmt = ipu7_isys_get_isys_format(av->pix_fmt.pixelformat); pin_info->ft = (u16)pfmt->css_pixelformat; /* stride in bytes */ pin_info->stride = av->pix_fmt.bytesperline; pin_info->send_irq = 1; pin_info->early_ack_en = 0; /* input pin id */ pin_info->input_pin_id = 0; cfg->vc = 0; } /* Create stream and start it using the CSS FW ABI. */ static int start_stream_firmware(struct ipu7_isys_video *av, struct ipu7_isys_buffer_list *bl) { struct ipu7_isys_stream *stream = av->stream; struct device *dev = &av->isys->adev->auxdev.dev; struct ipu7_insys_stream_cfg *stream_cfg; struct isys_fw_msgs *msg = NULL; struct ipu7_insys_buffset *buf = NULL; struct ipu7_isys_queue *aq; struct v4l2_subdev_format source_fmt = { 0 }; enum ipu7_insys_send_type send_type; const struct ipu7_isys_pixelformat *pfmt; int ret, retout, tout; ret = get_external_facing_format(stream, &source_fmt); if (ret) return ret; msg = ipu7_get_fw_msg_buf(stream); if (!msg) return -ENOMEM; stream_cfg = &msg->fw_msg.stream; stream_cfg->input_pins[0].input_res.width = source_fmt.format.width; stream_cfg->input_pins[0].input_res.height = source_fmt.format.height; stream_cfg->input_pins[0].dt = ipu7_isys_mbus_code_to_data_type(source_fmt.format.code); /* It is similar with mipi_store_mod */ stream_cfg->input_pins[0].disable_mipi_unpacking = 0; pfmt = ipu7_isys_get_isys_format(av->pix_fmt.pixelformat); if (pfmt->bpp == pfmt->bpp_packed && pfmt->bpp % BITS_PER_BYTE) stream_cfg->input_pins[0].disable_mipi_unpacking = 1; stream_cfg->input_pins[0].mapped_dt = N_IPU_INSYS_MIPI_DATA_TYPE; stream_cfg->input_pins[0].dt_rename_mode = IPU_INSYS_MIPI_DT_NO_RENAME; stream_cfg->input_pins[0].sync_msg_map = (IPU_INSYS_STREAM_SYNC_MSG_SEND_RESP_SOF | IPU_INSYS_STREAM_SYNC_MSG_SEND_RESP_SOF_DISCARDED); stream_cfg->stream_msg_map = IPU_INSYS_STREAM_ENABLE_MSG_SEND_RESP; /* if enable polling isys interrupt, the follow values maybe set */ stream_cfg->stream_msg_map |= IPU_INSYS_STREAM_ENABLE_MSG_SEND_IRQ; stream_cfg->input_pins[0].sync_msg_map |= (IPU_INSYS_STREAM_SYNC_MSG_SEND_IRQ_SOF | IPU_INSYS_STREAM_SYNC_MSG_SEND_IRQ_SOF_DISCARDED); /* TODO: it needs to handle the port_id, it isn't source. */ stream_cfg->port_id = stream->stream_source; stream_cfg->vc = 0; stream_cfg->nof_input_pins = 1; list_for_each_entry(aq, &stream->queues, node) { struct ipu7_isys_video *__av = ipu7_isys_queue_to_video(aq); ipu7_isys_prepare_fw_cfg_default(__av, stream_cfg); } ipu7_fw_isys_dump_stream_cfg(dev, stream_cfg); stream->nr_output_pins = stream_cfg->nof_output_pins; reinit_completion(&stream->stream_open_completion); ret = ipu7_fw_isys_complex_cmd(av->isys, stream->stream_handle, stream_cfg, msg->dma_addr, sizeof(*stream_cfg), IPU_INSYS_SEND_TYPE_STREAM_OPEN); if (ret < 0) { dev_err(dev, "can't open stream (%d)\n", ret); ipu7_put_fw_msg_buf(av->isys, (uintptr_t)stream_cfg); return ret; } get_stream_opened(av); tout = wait_for_completion_timeout(&stream->stream_open_completion, FW_CALL_TIMEOUT_JIFFIES); ipu7_put_fw_msg_buf(av->isys, (uintptr_t)stream_cfg); if (!tout) { dev_err(dev, "stream open time out\n"); ret = -ETIMEDOUT; goto out_put_stream_opened; } if (stream->error) { dev_err(dev, "stream open error: %d\n", stream->error); ret = -EIO; goto out_put_stream_opened; } dev_dbg(dev, "start stream: open complete\n"); if (WARN_ON(!bl)) return -EIO; msg = ipu7_get_fw_msg_buf(stream); if (!msg) { ret = -ENOMEM; goto out_put_stream_opened; } buf = &msg->fw_msg.frame; ipu7_isys_buffer_to_fw_frame_buff(buf, stream, bl); ipu7_isys_buffer_list_queue(bl, IPU_ISYS_BUFFER_LIST_FL_ACTIVE, 0); reinit_completion(&stream->stream_start_completion); send_type = IPU_INSYS_SEND_TYPE_STREAM_START_AND_CAPTURE; ipu7_fw_isys_dump_frame_buff_set(dev, buf, stream_cfg->nof_output_pins); ret = ipu7_fw_isys_complex_cmd(av->isys, stream->stream_handle, buf, msg->dma_addr, sizeof(*buf), send_type); if (ret < 0) { dev_err(dev, "can't start streaming (%d)\n", ret); goto out_stream_close; } tout = wait_for_completion_timeout(&stream->stream_start_completion, FW_CALL_TIMEOUT_JIFFIES); if (!tout) { dev_err(dev, "stream start time out\n"); ret = -ETIMEDOUT; goto out_stream_close; } if (stream->error) { dev_err(dev, "stream start error: %d\n", stream->error); ret = -EIO; goto out_stream_close; } dev_dbg(dev, "start stream: complete\n"); return 0; out_stream_close: reinit_completion(&stream->stream_close_completion); retout = ipu7_fw_isys_simple_cmd(av->isys, stream->stream_handle, IPU_INSYS_SEND_TYPE_STREAM_CLOSE); if (retout < 0) { dev_dbg(dev, "can't close stream (%d)\n", retout); goto out_put_stream_opened; } tout = wait_for_completion_timeout(&stream->stream_close_completion, FW_CALL_TIMEOUT_JIFFIES); if (!tout) dev_err(dev, "stream close time out\n"); else if (stream->error) dev_err(dev, "stream close error: %d\n", stream->error); else dev_dbg(dev, "stream close complete\n"); out_put_stream_opened: put_stream_opened(av); return ret; } static void stop_streaming_firmware(struct ipu7_isys_video *av) { struct ipu7_isys_stream *stream = av->stream; struct device *dev = &av->isys->adev->auxdev.dev; int ret, tout; enum ipu7_insys_send_type send_type = IPU_INSYS_SEND_TYPE_STREAM_FLUSH; reinit_completion(&stream->stream_stop_completion); ret = ipu7_fw_isys_simple_cmd(av->isys, stream->stream_handle, send_type); if (ret < 0) { dev_err(dev, "can't stop stream (%d)\n", ret); return; } tout = wait_for_completion_timeout(&stream->stream_stop_completion, FW_CALL_TIMEOUT_JIFFIES); if (!tout) dev_err(dev, "stream stop time out\n"); else if (stream->error) dev_err(dev, "stream stop error: %d\n", stream->error); else dev_dbg(dev, "stop stream: complete\n"); } static void close_streaming_firmware(struct ipu7_isys_video *av) { struct ipu7_isys_stream *stream = av->stream; struct device *dev = &av->isys->adev->auxdev.dev; int ret, tout; reinit_completion(&stream->stream_close_completion); ret = ipu7_fw_isys_simple_cmd(av->isys, stream->stream_handle, IPU_INSYS_SEND_TYPE_STREAM_CLOSE); if (ret < 0) { dev_err(dev, "can't close stream (%d)\n", ret); return; } tout = wait_for_completion_timeout(&stream->stream_close_completion, FW_CALL_TIMEOUT_JIFFIES); if (!tout) dev_err(dev, "stream close time out\n"); else if (stream->error) dev_err(dev, "stream close error: %d\n", stream->error); else dev_dbg(dev, "close stream: complete\n"); put_stream_opened(av); } int ipu7_isys_video_prepare_streaming(struct ipu7_isys_video *av) { struct device *dev = &av->isys->adev->auxdev.dev; struct ipu7_isys_stream *stream = av->stream; int ret; WARN_ON(stream->nr_streaming); stream->nr_queues = 0; stream->external = NULL; atomic_set(&stream->sequence, 0); atomic_set(&stream->buf_id, 0); stream->asd = NULL; stream->seq_index = 0; memset(stream->seq, 0, sizeof(stream->seq)); WARN_ON(!list_empty(&stream->queues)); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) ret = media_pipeline_start(&av->vdev.entity, &stream->pipe); #else ret = media_pipeline_start(av->vdev.entity.pads, &stream->pipe); #endif if (ret < 0) { dev_dbg(dev, "pipeline start failed\n"); return ret; } if (!stream->external) { dev_err(dev, "no external entity set! Driver bug?\n"); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) media_pipeline_stop(&av->vdev.entity); #else media_pipeline_stop(av->vdev.entity.pads); #endif return -EINVAL; } dev_dbg(dev, "prepare stream: external entity %s\n", stream->external->entity->name); return 0; } int ipu7_isys_video_set_streaming(struct ipu7_isys_video *av, unsigned int state, struct ipu7_isys_buffer_list *bl) { struct device *dev = &av->isys->adev->auxdev.dev; struct media_device *mdev = av->vdev.entity.graph_obj.mdev; struct media_entity_enum entities; struct media_graph graph; struct media_entity *entity, *entity2; struct ipu7_isys_stream *stream = av->stream; struct v4l2_subdev *sd, *esd; int ret = 0; dev_dbg(dev, "set stream: %d\n", state); if (!stream->external->entity) { WARN_ON(1); return -ENODEV; } esd = media_entity_to_v4l2_subdev(stream->external->entity); if (!state) { stop_streaming_firmware(av); /* stop external sub-device now. */ dev_info(dev, "stream off %s\n", stream->external->entity->name); v4l2_subdev_call(esd, video, s_stream, state); } ret = media_graph_walk_init(&graph, mdev); if (ret) return ret; ret = media_entity_enum_init(&entities, mdev); if (ret) goto out_media_entity_graph_init; mutex_lock(&mdev->graph_mutex); media_graph_walk_start(&graph, &av->vdev.entity); while ((entity = media_graph_walk_next(&graph))) { sd = media_entity_to_v4l2_subdev(entity); /* Non-subdev nodes can be safely ignored here. */ if (!is_media_entity_v4l2_subdev(entity)) continue; /* Don't start truly external devices quite yet. */ if (strncmp(sd->name, IPU_ISYS_ENTITY_PREFIX, strlen(IPU_ISYS_ENTITY_PREFIX)) != 0 || stream->external->entity == entity) continue; dev_dbg(dev, "s_stream %s entity %s\n", state ? "on" : "off", entity->name); ret = v4l2_subdev_call(sd, video, s_stream, state); if (!state) continue; if (ret && ret != -ENOIOCTLCMD) { mutex_unlock(&mdev->graph_mutex); goto out_media_entity_stop_streaming; } media_entity_enum_set(&entities, entity); } mutex_unlock(&mdev->graph_mutex); /* Oh crap */ if (state) { ret = start_stream_firmware(av, bl); if (ret) goto out_media_entity_stop_streaming; dev_dbg(dev, "set stream: source %d, stream_handle %d\n", stream->stream_source, stream->stream_handle); /* Start external sub-device now. */ dev_info(dev, "stream on %s\n", stream->external->entity->name); ret = v4l2_subdev_call(esd, video, s_stream, state); if (ret) goto out_media_entity_stop_streaming_firmware; } else { close_streaming_firmware(av); } media_entity_enum_cleanup(&entities); media_graph_walk_cleanup(&graph); av->streaming = state; return 0; out_media_entity_stop_streaming_firmware: stop_streaming_firmware(av); out_media_entity_stop_streaming: mutex_lock(&mdev->graph_mutex); media_graph_walk_start(&graph, &av->vdev.entity); while (state && (entity2 = media_graph_walk_next(&graph)) && entity2 != entity) { sd = media_entity_to_v4l2_subdev(entity2); if (!media_entity_enum_test(&entities, entity2)) continue; v4l2_subdev_call(sd, video, s_stream, 0); } mutex_unlock(&mdev->graph_mutex); media_entity_enum_cleanup(&entities); out_media_entity_graph_init: media_graph_walk_cleanup(&graph); return ret; } void ipu7_isys_put_stream(struct ipu7_isys_stream *stream) { unsigned int i; unsigned long flags; if (WARN_ON_ONCE(!stream)) return; spin_lock_irqsave(&stream->isys->streams_lock, flags); for (i = 0; i < IPU_ISYS_MAX_STREAMS; i++) { if (&stream->isys->streams[i] == stream) { if (stream->isys->streams_ref_count[i] > 0) { stream->isys->streams_ref_count[i]--; } else { dev_warn(&stream->isys->adev->auxdev.dev, "stream %d isn't used\n", i); } break; } } spin_unlock_irqrestore(&stream->isys->streams_lock, flags); } struct ipu7_isys_stream *ipu7_isys_get_stream(struct ipu7_isys *isys) { unsigned int i; unsigned long flags; struct ipu7_isys_stream *stream = NULL; if (!isys) return NULL; spin_lock_irqsave(&isys->streams_lock, flags); for (i = 0; i < IPU_ISYS_MAX_STREAMS; i++) { if (!isys->streams_ref_count[i]) { isys->streams_ref_count[i]++; stream = &isys->streams[i]; break; } } spin_unlock_irqrestore(&isys->streams_lock, flags); return stream; } struct ipu7_isys_stream * ipu7_isys_query_stream_by_handle(struct ipu7_isys *isys, u8 stream_handle) { unsigned long flags; struct ipu7_isys_stream *stream = NULL; if (!isys) return NULL; if (stream_handle >= IPU_ISYS_MAX_STREAMS) { dev_err(&isys->adev->auxdev.dev, "stream_handle %d is invalid\n", stream_handle); return NULL; } spin_lock_irqsave(&isys->streams_lock, flags); if (isys->streams_ref_count[stream_handle] > 0) { isys->streams_ref_count[stream_handle]++; stream = &isys->streams[stream_handle]; } spin_unlock_irqrestore(&isys->streams_lock, flags); return stream; } static const struct v4l2_ioctl_ops ipu7_v4l2_ioctl_ops = { .vidioc_querycap = ipu7_isys_vidioc_querycap, .vidioc_enum_fmt_vid_cap = ipu7_isys_vidioc_enum_fmt, .vidioc_enum_framesizes = ipu7_isys_vidioc_enum_framesizes, .vidioc_g_fmt_vid_cap = ipu7_isys_vidioc_g_fmt_vid_cap, .vidioc_s_fmt_vid_cap = ipu7_isys_vidioc_s_fmt_vid_cap, .vidioc_try_fmt_vid_cap = ipu7_isys_vidioc_try_fmt_vid_cap, .vidioc_reqbufs = ipu7_isys_vidioc_reqbufs, .vidioc_create_bufs = ipu7_isys_vidioc_create_bufs, .vidioc_prepare_buf = vb2_ioctl_prepare_buf, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, .vidioc_expbuf = vb2_ioctl_expbuf, }; static const struct media_entity_operations entity_ops = { .link_validate = link_validate, }; static const struct v4l2_file_operations isys_fops = { .owner = THIS_MODULE, .poll = vb2_fop_poll, .unlocked_ioctl = video_ioctl2, .mmap = vb2_fop_mmap, .open = video_open, .release = vb2_fop_release, }; int ipu7_isys_fw_open(struct ipu7_isys *isys) { struct ipu7_bus_device *adev = isys->adev; int ret; ret = pm_runtime_resume_and_get(&adev->auxdev.dev); if (ret < 0) return ret; mutex_lock(&isys->mutex); if (isys->ref_count++) goto unlock; /* * Buffers could have been left to wrong queue at last closure. * Move them now back to empty buffer queue. */ ipu7_cleanup_fw_msg_bufs(isys); ret = ipu7_fw_isys_open(isys); if (ret < 0) goto out; unlock: mutex_unlock(&isys->mutex); return 0; out: isys->ref_count--; mutex_unlock(&isys->mutex); pm_runtime_put(&adev->auxdev.dev); return ret; } void ipu7_isys_fw_close(struct ipu7_isys *isys) { mutex_lock(&isys->mutex); isys->ref_count--; if (!isys->ref_count) ipu7_fw_isys_close(isys); mutex_unlock(&isys->mutex); if (isys->need_reset) pm_runtime_put_sync(&isys->adev->auxdev.dev); else pm_runtime_put(&isys->adev->auxdev.dev); } /* * Do everything that's needed to initialise things related to video * buffer queue, video node, and the related media entity. The caller * is expected to assign isys field and set the name of the video * device. */ int ipu7_isys_video_init(struct ipu7_isys_video *av) { struct v4l2_format format = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, .fmt.pix = { .width = 1920, .height = 1080, }, }; int ret; mutex_init(&av->mutex); av->vdev.device_caps = V4L2_CAP_STREAMING | V4L2_CAP_IO_MC | V4L2_CAP_VIDEO_CAPTURE; av->vdev.vfl_dir = VFL_DIR_RX; ret = ipu7_isys_queue_init(&av->aq); if (ret) goto out_mutex_destroy; av->pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT; ret = media_entity_pads_init(&av->vdev.entity, 1, &av->pad); if (ret) goto out_vb2_queue_cleanup; av->vdev.entity.ops = &entity_ops; av->vdev.release = video_device_release_empty; av->vdev.fops = &isys_fops; av->vdev.v4l2_dev = &av->isys->v4l2_dev; av->vdev.ioctl_ops = &ipu7_v4l2_ioctl_ops; av->vdev.queue = &av->aq.vbq; av->vdev.lock = &av->mutex; __ipu_isys_vidioc_try_fmt_vid_cap(av, &format); av->pix_fmt = format.fmt.pix; set_bit(V4L2_FL_USES_V4L2_FH, &av->vdev.flags); video_set_drvdata(&av->vdev, av); ret = video_register_device(&av->vdev, VFL_TYPE_VIDEO, -1); if (ret) goto out_media_entity_cleanup; return ret; out_media_entity_cleanup: media_entity_cleanup(&av->vdev.entity); out_vb2_queue_cleanup: vb2_queue_release(&av->aq.vbq); out_mutex_destroy: mutex_destroy(&av->mutex); return ret; } void ipu7_isys_video_cleanup(struct ipu7_isys_video *av) { vb2_video_unregister_device(&av->vdev); media_entity_cleanup(&av->vdev.entity); mutex_destroy(&av->mutex); } ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/ipu7-isys-video.h000066400000000000000000000062061465530421300274060ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 - 2024 Intel Corporation */ #ifndef IPU7_ISYS_VIDEO_H #define IPU7_ISYS_VIDEO_H #include #include #include #include #include #include #include #include "ipu7-isys-queue.h" #define IPU_INSYS_OUTPUT_PINS 11 #define IPU_ISYS_MAX_PARALLEL_SOF 2 struct file; struct ipu7_isys; struct ipu7_isys_csi2; struct ipu7_insys_stream_cfg; struct ipu7_isys_subdev; struct ipu7_isys_pixelformat { u32 pixelformat; u32 bpp; u32 bpp_packed; u32 code; u32 css_pixelformat; }; struct sequence_info { unsigned int sequence; u64 timestamp; }; struct output_pin_data { void (*pin_ready)(struct ipu7_isys_stream *stream, struct ipu7_insys_resp *info); struct ipu7_isys_queue *aq; }; struct ipu7_isys_stream { struct mutex mutex; struct media_pipeline pipe; struct media_pad *external; atomic_t sequence; atomic_t buf_id; unsigned int seq_index; struct sequence_info seq[IPU_ISYS_MAX_PARALLEL_SOF]; int stream_source; int stream_handle; unsigned int nr_output_pins; struct ipu7_isys_subdev *asd; int nr_queues; /* Number of capture queues */ int nr_streaming; int streaming; struct list_head queues; struct completion stream_open_completion; struct completion stream_close_completion; struct completion stream_start_completion; struct completion stream_stop_completion; struct ipu7_isys *isys; struct output_pin_data output_pins[IPU_INSYS_OUTPUT_PINS]; int error; }; #define to_ipu7_isys_pipeline(__pipe) \ container_of((__pipe), struct ipu7_isys_stream, pipe) struct ipu7_isys_video { struct ipu7_isys_queue aq; /* Serialise access to other fields in the struct. */ struct mutex mutex; struct media_pad pad; struct video_device vdev; struct v4l2_pix_format pix_fmt; struct ipu7_isys *isys; struct ipu7_isys_stream *stream; unsigned int streaming; }; #define ipu7_isys_queue_to_video(__aq) \ container_of(__aq, struct ipu7_isys_video, aq) extern const struct ipu7_isys_pixelformat ipu7_isys_pfmts[]; const struct ipu7_isys_pixelformat *ipu7_isys_get_isys_format(u32 pixelformat); int ipu7_isys_vidioc_querycap(struct file *file, void *fh, struct v4l2_capability *cap); int ipu7_isys_vidioc_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f); void ipu7_isys_prepare_fw_cfg_default(struct ipu7_isys_video *av, struct ipu7_insys_stream_cfg *cfg); int ipu7_isys_video_prepare_streaming(struct ipu7_isys_video *av); int ipu7_isys_video_set_streaming(struct ipu7_isys_video *av, unsigned int state, struct ipu7_isys_buffer_list *bl); int ipu7_isys_fw_open(struct ipu7_isys *isys); void ipu7_isys_fw_close(struct ipu7_isys *isys); int ipu7_isys_video_init(struct ipu7_isys_video *av); void ipu7_isys_video_cleanup(struct ipu7_isys_video *av); void ipu7_isys_put_stream(struct ipu7_isys_stream *stream); struct ipu7_isys_stream *ipu7_isys_get_stream(struct ipu7_isys *isys); struct ipu7_isys_stream * ipu7_isys_query_stream_by_handle(struct ipu7_isys *isys, u8 stream_handle); #endif /* IPU7_ISYS_VIDEO_H */ ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/ipu7-isys.c000066400000000000000000001127001465530421300262720ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013 - 2024 Intel Corporation */ #include #include #include #include #include #include #ifdef CONFIG_DEBUG_FS #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if LINUX_VERSION_CODE > KERNEL_VERSION(6, 6, 0) #include #endif #include #include #include #include #include #include #include "abi/ipu7_fw_isys_abi.h" #include "ipu7-bus.h" #include "ipu7-buttress-regs.h" #include "ipu7-cpd.h" #include "ipu7-fw-isys.h" #include "ipu7-mmu.h" #include "ipu7-isys.h" #include "ipu7-isys-csi2.h" #include "ipu7-isys-csi2-regs.h" #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC #include "ipu7-isys-tpg.h" #endif #include "ipu7-isys-video.h" #include "ipu7-platform-regs.h" #define ISYS_PM_QOS_VALUE 300 static int isys_complete_ext_device_registration(struct ipu7_isys *isys, struct v4l2_subdev *sd, struct ipu7_isys_csi2_config *csi2) { struct device *dev = &isys->adev->auxdev.dev; unsigned int i; int ret; v4l2_set_subdev_hostdata(sd, csi2); for (i = 0; i < sd->entity.num_pads; i++) { if (sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE) break; } if (i == sd->entity.num_pads) { dev_warn(dev, "no source pad in external entity\n"); ret = -ENOENT; goto skip_unregister_subdev; } ret = media_create_pad_link(&sd->entity, i, &isys->csi2[csi2->port].asd.sd.entity, 0, 0); if (ret) { dev_warn(dev, "can't create link\n"); goto skip_unregister_subdev; } isys->csi2[csi2->port].nlanes = csi2->nlanes; return 0; skip_unregister_subdev: v4l2_device_unregister_subdev(sd); return ret; } static void isys_stream_init(struct ipu7_isys *isys) { for (int i = 0; i < IPU_ISYS_MAX_STREAMS; i++) { mutex_init(&isys->streams[i].mutex); init_completion(&isys->streams[i].stream_open_completion); init_completion(&isys->streams[i].stream_close_completion); init_completion(&isys->streams[i].stream_start_completion); init_completion(&isys->streams[i].stream_stop_completion); INIT_LIST_HEAD(&isys->streams[i].queues); isys->streams[i].isys = isys; isys->streams[i].stream_handle = i; } } #define FW_LOG_BUF_SIZE (2 * 1024 * 1024) static int isys_fw_log_init(struct ipu7_isys *isys) { struct device *dev = &isys->adev->auxdev.dev; struct isys_fw_log *fw_log; void *log_buf; if (isys->fw_log) return 0; fw_log = devm_kzalloc(dev, sizeof(*fw_log), GFP_KERNEL); if (!fw_log) return -ENOMEM; mutex_init(&fw_log->mutex); log_buf = devm_kzalloc(dev, FW_LOG_BUF_SIZE, GFP_KERNEL); if (!log_buf) return -ENOMEM; fw_log->head = log_buf; fw_log->addr = log_buf; fw_log->count = 0; fw_log->size = 0; isys->fw_log = fw_log; return 0; } /* The .bound() notifier callback when a match is found */ #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) static int isys_notifier_bound(struct v4l2_async_notifier *notifier, struct v4l2_subdev *sd, struct v4l2_async_subdev *asd) #else static int isys_notifier_bound(struct v4l2_async_notifier *notifier, struct v4l2_subdev *sd, struct v4l2_async_connection *asc) #endif { struct ipu7_isys *isys = container_of(notifier, struct ipu7_isys, notifier); struct device *dev = &isys->adev->auxdev.dev; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) struct sensor_async_subdev *s_asd = container_of(asd, struct sensor_async_subdev, asd); #else struct sensor_async_sd *s_asd = container_of(asc, struct sensor_async_sd, asc); int ret; ret = ipu_bridge_instantiate_vcm(sd->dev); if (ret) { dev_err(dev, "instantiate vcm failed\n"); return ret; } #endif dev_info(dev, "bind %s nlanes is %d port is %d\n", sd->name, s_asd->csi2.nlanes, s_asd->csi2.port); isys_complete_ext_device_registration(isys, sd, &s_asd->csi2); return v4l2_device_register_subdev_nodes(&isys->v4l2_dev); } static int isys_notifier_complete(struct v4l2_async_notifier *notifier) { struct ipu7_isys *isys = container_of(notifier, struct ipu7_isys, notifier); dev_info(&isys->adev->auxdev.dev, "All sensor registration completed.\n"); return v4l2_device_register_subdev_nodes(&isys->v4l2_dev); } static const struct v4l2_async_notifier_operations isys_async_ops = { .bound = isys_notifier_bound, .complete = isys_notifier_complete, }; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) static int isys_fwnode_parse(struct device *dev, struct v4l2_fwnode_endpoint *vep, struct v4l2_async_subdev *asd) { struct sensor_async_subdev *s_asd = container_of(asd, struct sensor_async_subdev, asd); s_asd->csi2.port = vep->base.port; s_asd->csi2.nlanes = vep->bus.mipi_csi2.num_data_lanes; return 0; } #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0) && \ LINUX_VERSION_CODE != KERNEL_VERSION(5, 15, 71) static int isys_notifier_init(struct ipu7_isys *isys) { struct ipu7_device *isp = isys->adev->isp; size_t asd_struct_size = sizeof(struct sensor_async_subdev); struct device *dev = &isys->adev->auxdev.dev; int ret; v4l2_async_notifier_init(&isys->notifier); ret = v4l2_async_notifier_parse_fwnode_endpoints(&isp->pdev->dev, &isys->notifier, asd_struct_size, isys_fwnode_parse); if (ret < 0) { dev_err(dev, "v4l2 parse_fwnode_endpoints() failed: %d\n", ret); return ret; } if (list_empty(&isys->notifier.asd_list)) { /* isys probe could continue with async subdevs missing */ dev_warn(dev, "no subdev found in graph\n"); return 0; } isys->notifier.ops = &isys_async_ops; ret = v4l2_async_notifier_register(&isys->v4l2_dev, &isys->notifier); if (ret) { dev_err(dev, "failed to register async notifier : %d\n", ret); v4l2_async_notifier_cleanup(&isys->notifier); } return ret; } #elif LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) static int isys_notifier_init(struct ipu7_isys *isys) { struct ipu7_device *isp = isys->adev->isp; size_t asd_struct_size = sizeof(struct sensor_async_subdev); struct device *dev = &isys->adev->auxdev.dev; int ret; v4l2_async_nf_init(&isys->notifier); ret = v4l2_async_nf_parse_fwnode_endpoints(&isp->pdev->dev, &isys->notifier, asd_struct_size, isys_fwnode_parse); if (ret < 0) { dev_err(dev, "v4l2 parse_fwnode_endpoints() failed: %d\n", ret); return ret; } if (list_empty(&isys->notifier.asd_list)) { /* isys probe could continue with async subdevs missing */ dev_warn(dev, "no subdev found in graph\n"); return 0; } isys->notifier.ops = &isys_async_ops; ret = v4l2_async_nf_register(&isys->v4l2_dev, &isys->notifier); if (ret) { dev_err(dev, "failed to register async notifier : %d\n", ret); v4l2_async_nf_cleanup(&isys->notifier); } return ret; } #else static int isys_notifier_init(struct ipu7_isys *isys) { const struct ipu7_isys_internal_csi2_pdata *csi2 = &isys->pdata->ipdata->csi2; struct ipu7_device *isp = isys->adev->isp; struct device *dev = &isp->pdev->dev; unsigned int i; int ret; v4l2_async_nf_init(&isys->notifier, &isys->v4l2_dev); for (i = 0; i < csi2->nports; i++) { struct v4l2_fwnode_endpoint vep = { .bus_type = V4L2_MBUS_CSI2_DPHY }; struct sensor_async_sd *s_asd; struct fwnode_handle *ep; ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), i, 0, FWNODE_GRAPH_ENDPOINT_NEXT); if (!ep) continue; ret = v4l2_fwnode_endpoint_parse(ep, &vep); if (ret) goto err_parse; s_asd = v4l2_async_nf_add_fwnode_remote(&isys->notifier, ep, struct sensor_async_sd); if (IS_ERR(s_asd)) { ret = PTR_ERR(s_asd); goto err_parse; } s_asd->csi2.port = vep.base.port; s_asd->csi2.nlanes = vep.bus.mipi_csi2.num_data_lanes; fwnode_handle_put(ep); continue; err_parse: fwnode_handle_put(ep); return ret; } if (list_empty(&isys->notifier.waiting_list)) { /* isys probe could continue with async subdevs missing */ dev_warn(dev, "no subdev found in graph\n"); return 0; } isys->notifier.ops = &isys_async_ops; ret = v4l2_async_nf_register(&isys->notifier); if (ret) { dev_err(dev, "failed to register async notifier : %d\n", ret); v4l2_async_nf_cleanup(&isys->notifier); } return ret; } #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0) && \ LINUX_VERSION_CODE != KERNEL_VERSION(5, 15, 71) static void isys_notifier_cleanup(struct ipu7_isys *isys) { v4l2_async_notifier_unregister(&isys->notifier); v4l2_async_notifier_cleanup(&isys->notifier); } #else static void isys_notifier_cleanup(struct ipu7_isys *isys) { v4l2_async_nf_unregister(&isys->notifier); v4l2_async_nf_cleanup(&isys->notifier); } #endif static void isys_unregister_video_devices(struct ipu7_isys *isys) { const struct ipu7_isys_internal_csi2_pdata *csi2_pdata = &isys->pdata->ipdata->csi2; unsigned int i, j; for (i = 0; i < csi2_pdata->nports; i++) for (j = 0; j < NR_OF_CSI2_SRC_PADS; j++) ipu7_isys_video_cleanup(&isys->csi2[i].av[j]); } static int isys_register_video_devices(struct ipu7_isys *isys) { const struct ipu7_isys_internal_csi2_pdata *csi2_pdata = &isys->pdata->ipdata->csi2; unsigned int i, j; int ret; for (i = 0; i < csi2_pdata->nports; i++) { for (j = 0; j < NR_OF_CSI2_SRC_PADS; j++) { struct ipu7_isys_video *av = &isys->csi2[i].av[j]; snprintf(av->vdev.name, sizeof(av->vdev.name), IPU_ISYS_ENTITY_PREFIX " ISYS Capture %u", i * NR_OF_CSI2_SRC_PADS + j); av->isys = isys; av->aq.vbq.buf_struct_size = sizeof(struct ipu7_isys_video_buffer); ret = ipu7_isys_video_init(av); if (ret) goto fail; } } return 0; fail: i = i + 1; while (i--) { while (j--) ipu7_isys_video_cleanup(&isys->csi2[i].av[j]); j = NR_OF_CSI2_SRC_PADS; } return ret; } static void isys_csi2_unregister_subdevices(struct ipu7_isys *isys) { const struct ipu7_isys_internal_csi2_pdata *csi2 = &isys->pdata->ipdata->csi2; unsigned int i; for (i = 0; i < csi2->nports; i++) ipu7_isys_csi2_cleanup(&isys->csi2[i]); } static int isys_csi2_register_subdevices(struct ipu7_isys *isys) { const struct ipu7_isys_internal_csi2_pdata *csi2_pdata = &isys->pdata->ipdata->csi2; unsigned int i; int ret; for (i = 0; i < csi2_pdata->nports; i++) { ret = ipu7_isys_csi2_init(&isys->csi2[i], isys, isys->pdata->base + csi2_pdata->offsets[i], i); if (ret) goto fail; } isys->isr_csi2_mask = IPU7_CSI_RX_LEGACY_IRQ_MASK; return 0; fail: while (i--) ipu7_isys_csi2_cleanup(&isys->csi2[i]); return ret; } static int isys_csi2_create_media_links(struct ipu7_isys *isys) { const struct ipu7_isys_internal_csi2_pdata *csi2_pdata = &isys->pdata->ipdata->csi2; struct device *dev = &isys->adev->auxdev.dev; struct media_entity *sd; unsigned int i, j; int ret; for (i = 0; i < csi2_pdata->nports; i++) { sd = &isys->csi2[i].asd.sd.entity; for (j = 0; j < NR_OF_CSI2_SRC_PADS; j++) { struct ipu7_isys_video *av = &isys->csi2[i].av[j]; ret = media_create_pad_link(sd, CSI2_PAD_SRC + j, &av->vdev.entity, 0, 0); if (ret) { dev_err(dev, "CSI2 can't create link\n"); return ret; } } } return 0; } #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC static void isys_tpg_unregister_subdevices(struct ipu7_isys *isys) { const struct ipu7_isys_internal_tpg_pdata *tpg_pdata = &isys->pdata->ipdata->tpg; unsigned int i; if (!isys->tpg) return; for (i = 0; i < tpg_pdata->ntpgs; i++) ipu7_isys_tpg_cleanup(&isys->tpg[i]); kfree(isys->tpg); isys->tpg = NULL; } static int isys_tpg_register_subdevices(struct ipu7_isys *isys) { const struct ipu7_isys_internal_tpg_pdata *tpg_pdata = &isys->pdata->ipdata->tpg; unsigned int i; int ret; isys->tpg = kcalloc(tpg_pdata->ntpgs, sizeof(*isys->tpg), GFP_KERNEL); if (!isys->tpg) return -ENOMEM; for (i = 0; i < tpg_pdata->ntpgs; i++) { ret = ipu7_isys_tpg_init(&isys->tpg[i], isys, isys->pdata->base + tpg_pdata->offsets[i], tpg_pdata->sels ? (isys->pdata->base + tpg_pdata->sels[i]) : NULL, i); if (ret) goto fail; } return 0; fail: while (i--) ipu7_isys_tpg_cleanup(&isys->tpg[i]); kfree(isys->tpg); isys->tpg = NULL; return ret; } static int isys_tpg_create_media_links(struct ipu7_isys *isys) { const struct ipu7_isys_internal_tpg_pdata *tpg_pdata = &isys->pdata->ipdata->tpg; struct device *dev = &isys->adev->auxdev.dev; struct ipu7_isys_tpg *tpg; struct media_entity *sd; unsigned int i; int ret; for (i = 0; i < tpg_pdata->ntpgs; i++) { tpg = &isys->tpg[i]; sd = &tpg->asd.sd.entity; tpg->av = &isys->csi2[tpg->index].av[0]; ret = media_create_pad_link(sd, TPG_PAD_SOURCE, &tpg->av->vdev.entity, TPG_PAD_SOURCE, 0); if (ret) { dev_err(dev, "TPG can't create link\n"); return ret; } } return 0; } #endif static int isys_register_devices(struct ipu7_isys *isys) { struct device *dev = &isys->adev->auxdev.dev; struct pci_dev *pdev = isys->adev->isp->pdev; int ret; media_device_pci_init(&isys->media_dev, pdev, IPU_MEDIA_DEV_MODEL_NAME); strscpy(isys->v4l2_dev.name, isys->media_dev.model, sizeof(isys->v4l2_dev.name)); ret = media_device_register(&isys->media_dev); if (ret < 0) goto out_media_device_unregister; isys->v4l2_dev.mdev = &isys->media_dev; isys->v4l2_dev.ctrl_handler = NULL; ret = v4l2_device_register(dev, &isys->v4l2_dev); if (ret < 0) goto out_media_device_unregister; ret = isys_register_video_devices(isys); if (ret) goto out_v4l2_device_unregister; ret = isys_csi2_register_subdevices(isys); if (ret) goto out_video_unregister_device; ret = isys_csi2_create_media_links(isys); if (ret) goto out_csi2_unregister_subdevices; #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC ret = isys_tpg_register_subdevices(isys); if (!ret) ret = isys_tpg_create_media_links(isys); if (ret) goto out_tpg_unregister_subdevices; #endif ret = isys_notifier_init(isys); if (ret) #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC goto out_tpg_unregister_subdevices; #else goto out_csi2_unregister_subdevices; #endif return 0; #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC out_tpg_unregister_subdevices: isys_tpg_unregister_subdevices(isys); #endif out_csi2_unregister_subdevices: isys_csi2_unregister_subdevices(isys); out_video_unregister_device: isys_unregister_video_devices(isys); out_v4l2_device_unregister: v4l2_device_unregister(&isys->v4l2_dev); out_media_device_unregister: media_device_unregister(&isys->media_dev); media_device_cleanup(&isys->media_dev); dev_err(dev, "failed to register isys devices\n"); return ret; } static void isys_unregister_devices(struct ipu7_isys *isys) { isys_unregister_video_devices(isys); isys_csi2_unregister_subdevices(isys); #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC isys_tpg_unregister_subdevices(isys); #endif v4l2_device_unregister(&isys->v4l2_dev); media_device_unregister(&isys->media_dev); media_device_cleanup(&isys->media_dev); } #ifdef CONFIG_PM static void enable_csi2_legacy_irq(struct ipu7_isys *isys, bool enable) { u32 offset, mask; void __iomem *base = isys->pdata->base; offset = IS_IO_CSI2_LEGACY_IRQ_CTRL_BASE; mask = isys->isr_csi2_mask; if (!enable) { writel(mask, base + offset + IRQ_CTL_CLEAR); writel(0, base + offset + IRQ_CTL_ENABLE); return; } writel(mask, base + offset + IRQ_CTL_EDGE); writel(mask, base + offset + IRQ_CTL_CLEAR); writel(mask, base + offset + IRQ_CTL_MASK); writel(mask, base + offset + IRQ_CTL_ENABLE); } static void enable_to_sw_irq(struct ipu7_isys *isys, bool enable) { u32 offset, mask; void __iomem *base = isys->pdata->base; offset = IS_UC_CTRL_BASE; mask = IS_UC_TO_SW_IRQ_MASK; if (!enable) { writel(0, base + offset + TO_SW_IRQ_CNTL_ENABLE); return; } writel(mask, base + offset + TO_SW_IRQ_CNTL_CLEAR); writel(mask, base + offset + TO_SW_IRQ_CNTL_MASK_N); writel(mask, base + offset + TO_SW_IRQ_CNTL_ENABLE); } static void isys_setup_hw(struct ipu7_isys *isys) { u32 offset; void __iomem *base = isys->pdata->base; /* soft reset */ offset = IS_IO_GPREGS_BASE; writel(0x0, base + offset + CLK_EN_TXCLKESC); /* Update if ISYS freq updated (0: 400/1, 1:400/2, 63:400/64) */ writel(0x0, base + offset + CLK_DIV_FACTOR_IS_CLK); /* correct the initial printf configuration */ writel(0x200, base + IS_UC_CTRL_BASE + PRINTF_AXI_CNTL); enable_to_sw_irq(isys, 1); enable_csi2_legacy_irq(isys, 1); } static void isys_cleanup_hw(struct ipu7_isys *isys) { enable_csi2_legacy_irq(isys, 0); enable_to_sw_irq(isys, 0); } static int isys_runtime_pm_resume(struct device *dev) { struct ipu7_bus_device *adev = to_ipu7_bus_device(dev); struct ipu7_device *isp = adev->isp; struct ipu7_isys *isys = ipu7_bus_get_drvdata(adev); unsigned long flags; int ret; if (!isys) return 0; ret = ipu7_mmu_hw_init(adev->mmu); if (ret) return ret; cpu_latency_qos_update_request(&isys->pm_qos, ISYS_PM_QOS_VALUE); ret = ipu7_buttress_start_tsc_sync(isp); if (ret) return ret; spin_lock_irqsave(&isys->power_lock, flags); isys->power = 1; spin_unlock_irqrestore(&isys->power_lock, flags); isys_setup_hw(isys); return 0; } static int isys_runtime_pm_suspend(struct device *dev) { struct ipu7_bus_device *adev = to_ipu7_bus_device(dev); struct ipu7_isys *isys = ipu7_bus_get_drvdata(adev); unsigned long flags; if (!isys) return 0; isys_cleanup_hw(isys); spin_lock_irqsave(&isys->power_lock, flags); isys->power = 0; spin_unlock_irqrestore(&isys->power_lock, flags); mutex_lock(&isys->mutex); isys->need_reset = false; mutex_unlock(&isys->mutex); cpu_latency_qos_update_request(&isys->pm_qos, PM_QOS_DEFAULT_VALUE); ipu7_mmu_hw_cleanup(adev->mmu); return 0; } static int isys_suspend(struct device *dev) { struct ipu7_bus_device *adev = to_ipu7_bus_device(dev); struct ipu7_isys *isys = ipu7_bus_get_drvdata(adev); /* If stream is open, refuse to suspend */ if (isys->stream_opened) return -EBUSY; return 0; } static int isys_resume(struct device *dev) { return 0; } static const struct dev_pm_ops isys_pm_ops = { .runtime_suspend = isys_runtime_pm_suspend, .runtime_resume = isys_runtime_pm_resume, .suspend = isys_suspend, .resume = isys_resume, }; #define ISYS_PM_OPS (&isys_pm_ops) #else #define ISYS_PM_OPS NULL #endif static void isys_remove(struct auxiliary_device *auxdev) { struct ipu7_bus_device *adev = auxdev_to_adev(auxdev); struct ipu7_isys *isys = dev_get_drvdata(&auxdev->dev); struct isys_fw_msgs *fwmsg, *safe; #ifdef CONFIG_DEBUG_FS if (adev->isp->ipu7_dir) debugfs_remove_recursive(isys->debugfsdir); #endif for (int i = 0; i < IPU_ISYS_MAX_STREAMS; i++) mutex_destroy(&isys->streams[i].mutex); list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist, head) dma_free_attrs(&auxdev->dev, sizeof(struct isys_fw_msgs), fwmsg, fwmsg->dma_addr, 0); list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist_fw, head) dma_free_attrs(&auxdev->dev, sizeof(struct isys_fw_msgs), fwmsg, fwmsg->dma_addr, 0); isys_notifier_cleanup(isys); isys_unregister_devices(isys); cpu_latency_qos_remove_request(&isys->pm_qos); mutex_destroy(&isys->stream_mutex); mutex_destroy(&isys->mutex); } #ifdef CONFIG_DEBUG_FS static int ipu7_isys_icache_prefetch_get(void *data, u64 *val) { struct ipu7_isys *isys = data; *val = isys->icache_prefetch; return 0; } static int ipu7_isys_icache_prefetch_set(void *data, u64 val) { struct ipu7_isys *isys = data; if (val != !!val) return -EINVAL; isys->icache_prefetch = val; return 0; } DEFINE_SIMPLE_ATTRIBUTE(isys_icache_prefetch_fops, ipu7_isys_icache_prefetch_get, ipu7_isys_icache_prefetch_set, "%llu\n"); static ssize_t fwlog_read(struct file *file, char __user *userbuf, size_t size, loff_t *pos) { struct ipu7_isys *isys = file->private_data; struct isys_fw_log *fw_log = isys->fw_log; struct device *dev = &isys->adev->auxdev.dev; void *buf; int ret = 0; if (!fw_log) return 0; buf = kvzalloc(FW_LOG_BUF_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; mutex_lock(&fw_log->mutex); if (!isys->fw_log->size) { dev_warn(dev, "no available fw log"); mutex_unlock(&fw_log->mutex); goto free_and_return; } memcpy(buf, isys->fw_log->addr, isys->fw_log->size); dev_info(dev, "copy %d bytes fw log to user...", isys->fw_log->size); mutex_unlock(&fw_log->mutex); ret = simple_read_from_buffer(userbuf, size, pos, buf, isys->fw_log->size); free_and_return: kvfree(buf); return ret; } static const struct file_operations isys_fw_log_fops = { .open = simple_open, .owner = THIS_MODULE, .read = fwlog_read, .llseek = default_llseek, }; static int ipu7_isys_init_debugfs(struct ipu7_isys *isys) { struct dentry *file; struct dentry *dir; dir = debugfs_create_dir("isys", isys->adev->isp->ipu7_dir); if (IS_ERR(dir)) return -ENOMEM; file = debugfs_create_file("icache_prefetch", 0600, dir, isys, &isys_icache_prefetch_fops); if (IS_ERR(file)) goto err; file = debugfs_create_file("fwlog", 0400, dir, isys, &isys_fw_log_fops); if (IS_ERR(file)) goto err; isys->debugfsdir = dir; return 0; err: debugfs_remove_recursive(dir); return -ENOMEM; } #endif static int alloc_fw_msg_bufs(struct ipu7_isys *isys, int amount) { struct device *dev = &isys->adev->auxdev.dev; dma_addr_t dma_addr; struct isys_fw_msgs *addr; unsigned int i; unsigned long flags; for (i = 0; i < amount; i++) { addr = dma_alloc_attrs(dev, sizeof(struct isys_fw_msgs), &dma_addr, GFP_KERNEL, 0); if (!addr) break; addr->dma_addr = dma_addr; spin_lock_irqsave(&isys->listlock, flags); list_add(&addr->head, &isys->framebuflist); spin_unlock_irqrestore(&isys->listlock, flags); } if (i == amount) return 0; spin_lock_irqsave(&isys->listlock, flags); while (!list_empty(&isys->framebuflist)) { addr = list_first_entry(&isys->framebuflist, struct isys_fw_msgs, head); list_del(&addr->head); spin_unlock_irqrestore(&isys->listlock, flags); dma_free_attrs(dev, sizeof(struct isys_fw_msgs), addr, addr->dma_addr, 0); spin_lock_irqsave(&isys->listlock, flags); } spin_unlock_irqrestore(&isys->listlock, flags); return -ENOMEM; } struct isys_fw_msgs *ipu7_get_fw_msg_buf(struct ipu7_isys_stream *stream) { struct device *dev = &stream->isys->adev->auxdev.dev; struct ipu7_isys *isys = stream->isys; struct isys_fw_msgs *msg; unsigned long flags; spin_lock_irqsave(&isys->listlock, flags); if (list_empty(&isys->framebuflist)) { spin_unlock_irqrestore(&isys->listlock, flags); dev_warn(dev, "Frame list empty - Allocate more"); alloc_fw_msg_bufs(isys, 5); spin_lock_irqsave(&isys->listlock, flags); if (list_empty(&isys->framebuflist)) { spin_unlock_irqrestore(&isys->listlock, flags); dev_err(dev, "Frame list empty"); return NULL; } } msg = list_last_entry(&isys->framebuflist, struct isys_fw_msgs, head); list_move(&msg->head, &isys->framebuflist_fw); spin_unlock_irqrestore(&isys->listlock, flags); memset(&msg->fw_msg, 0, sizeof(msg->fw_msg)); return msg; } void ipu7_cleanup_fw_msg_bufs(struct ipu7_isys *isys) { struct isys_fw_msgs *fwmsg, *fwmsg0; unsigned long flags; spin_lock_irqsave(&isys->listlock, flags); list_for_each_entry_safe(fwmsg, fwmsg0, &isys->framebuflist_fw, head) list_move(&fwmsg->head, &isys->framebuflist); spin_unlock_irqrestore(&isys->listlock, flags); } void ipu7_put_fw_msg_buf(struct ipu7_isys *isys, u64 data) { struct isys_fw_msgs *msg; unsigned long flags; u64 *ptr = (u64 *)(unsigned long)data; if (WARN_ON_ONCE(!ptr)) return; spin_lock_irqsave(&isys->listlock, flags); msg = container_of(ptr, struct isys_fw_msgs, fw_msg.dummy); list_move(&msg->head, &isys->framebuflist); spin_unlock_irqrestore(&isys->listlock, flags); } static int isys_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *auxdev_id) { const struct ipu7_isys_internal_csi2_pdata *csi2_pdata; struct ipu7_bus_device *adev = auxdev_to_adev(auxdev); struct ipu7_device *isp = adev->isp; struct ipu7_isys *isys; int ret = 0; if (!isp->ipu7_bus_ready_to_probe) return -EPROBE_DEFER; isys = devm_kzalloc(&auxdev->dev, sizeof(*isys), GFP_KERNEL); if (!isys) return -ENOMEM; ret = pm_runtime_resume_and_get(&auxdev->dev); if (ret < 0) return ret; adev->auxdrv_data = (const struct ipu7_auxdrv_data *)auxdev_id->driver_data; adev->auxdrv = to_auxiliary_drv(auxdev->dev.driver); isys->adev = adev; isys->pdata = adev->pdata; INIT_LIST_HEAD(&isys->requests); csi2_pdata = &isys->pdata->ipdata->csi2; isys->csi2 = devm_kcalloc(&auxdev->dev, csi2_pdata->nports, sizeof(*isys->csi2), GFP_KERNEL); if (!isys->csi2) return -ENOMEM; ret = ipu7_mmu_hw_init(adev->mmu); if (ret) { pm_runtime_put(&auxdev->dev); return ret; } spin_lock_init(&isys->streams_lock); spin_lock_init(&isys->power_lock); isys->power = 0; mutex_init(&isys->mutex); mutex_init(&isys->stream_mutex); spin_lock_init(&isys->listlock); INIT_LIST_HEAD(&isys->framebuflist); INIT_LIST_HEAD(&isys->framebuflist_fw); dev_set_drvdata(&auxdev->dev, isys); isys->line_align = IPU_ISYS_2600_MEM_LINE_ALIGN; isys->icache_prefetch = 0; isys->phy_rext_cal = 0; isys_stream_init(isys); #ifndef CONFIG_PM isys_setup_hw(isys); #endif #ifdef CONFIG_DEBUG_FS /* Debug fs failure is not fatal. */ ipu7_isys_init_debugfs(isys); #endif cpu_latency_qos_add_request(&isys->pm_qos, PM_QOS_DEFAULT_VALUE); alloc_fw_msg_bufs(isys, 20); ret = ipu7_fw_isys_init(isys); if (ret) goto out_cleanup; ret = isys_register_devices(isys); if (ret) goto out_cleanup; ret = isys_fw_log_init(isys); if (ret) goto out_cleanup; ipu7_mmu_hw_cleanup(adev->mmu); pm_runtime_put(&auxdev->dev); return 0; out_cleanup: isys_unregister_devices(isys); ipu7_fw_isys_release(isys); for (int i = 0; i < IPU_ISYS_MAX_STREAMS; i++) mutex_destroy(&isys->streams[i].mutex); mutex_destroy(&isys->mutex); mutex_destroy(&isys->stream_mutex); ipu7_mmu_hw_cleanup(adev->mmu); pm_runtime_put(&auxdev->dev); return ret; } struct ipu7_csi2_error { const char *error_string; bool is_info_only; }; /* * Strings corresponding to CSI-2 receiver errors are here. * Corresponding macros are defined in the header file. */ static struct ipu7_csi2_error dphy_rx_errors[] = { { "Error handler FIFO full", false }, { "Reserved Short Packet encoding detected", true }, { "Reserved Long Packet encoding detected", true }, { "Received packet is too short", false}, { "Received packet is too long", false}, { "Short packet discarded due to errors", false }, { "Long packet discarded due to errors", false }, { "CSI Combo Rx interrupt", false }, { "IDI CDC FIFO overflow(remaining bits are reserved as 0)", false }, { "Received NULL packet", true }, { "Received blanking packet", true }, { "Tie to 0", true }, { } }; static void ipu7_isys_register_errors(struct ipu7_isys_csi2 *csi2) { int mask = IPU7_CSI_RX_ERROR_IRQ_MASK; u32 offset = IS_IO_CSI2_ERR_LEGACY_IRQ_CTL_BASE(csi2->port); u32 status = readl(csi2->base + offset + IRQ_CTL_STATUS); if (!status) return; dev_dbg(&csi2->isys->adev->auxdev.dev, "csi2-%u error status 0x%08x", csi2->port, status); writel(status & mask, csi2->base + offset + IRQ_CTL_CLEAR); csi2->receiver_errors |= status & mask; } static void ipu7_isys_csi2_error(struct ipu7_isys_csi2 *csi2) { struct ipu7_csi2_error *errors; u32 status; unsigned int i; /* Register errors once more in case of error interrupts are disabled */ ipu7_isys_register_errors(csi2); status = csi2->receiver_errors; csi2->receiver_errors = 0; errors = dphy_rx_errors; for (i = 0; i < CSI_RX_NUM_ERRORS_IN_IRQ; i++) { if (status & BIT(i)) dev_err_ratelimited(&csi2->isys->adev->auxdev.dev, "csi2-%i error: %s\n", csi2->port, errors[i].error_string); } } struct resp_to_msg { enum ipu7_insys_resp_type type; const char *msg; }; static const struct resp_to_msg is_fw_msg[] = { {IPU_INSYS_RESP_TYPE_STREAM_OPEN_DONE, "IPU_INSYS_RESP_TYPE_STREAM_OPEN_DONE"}, {IPU_INSYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK, "IPU_INSYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK"}, {IPU_INSYS_RESP_TYPE_STREAM_CAPTURE_ACK, "IPU_INSYS_RESP_TYPE_STREAM_CAPTURE_ACK"}, {IPU_INSYS_RESP_TYPE_STREAM_ABORT_ACK, "IPU_INSYS_RESP_TYPE_STREAM_ABORT_ACK"}, {IPU_INSYS_RESP_TYPE_STREAM_FLUSH_ACK, "IPU_INSYS_RESP_TYPE_STREAM_FLUSH_ACK"}, {IPU_INSYS_RESP_TYPE_STREAM_CLOSE_ACK, "IPU_INSYS_RESP_TYPE_STREAM_CLOSE_ACK"}, {IPU_INSYS_RESP_TYPE_PIN_DATA_READY, "IPU_INSYS_RESP_TYPE_PIN_DATA_READY"}, {IPU_INSYS_RESP_TYPE_FRAME_SOF, "IPU_INSYS_RESP_TYPE_FRAME_SOF"}, {IPU_INSYS_RESP_TYPE_FRAME_EOF, "IPU_INSYS_RESP_TYPE_FRAME_EOF"}, {IPU_INSYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE, "IPU_INSYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE"}, {IPU_INSYS_RESP_TYPE_STREAM_CAPTURE_DONE, "IPU_INSYS_RESP_TYPE_STREAM_CAPTURE_DONE"}, {N_IPU_INSYS_RESP_TYPE, "N_IPU_INSYS_RESP_TYPE"}, }; int isys_isr_one(struct ipu7_bus_device *adev) { struct ipu7_isys *isys = ipu7_bus_get_drvdata(adev); struct ipu7_insys_resp *resp; struct ipu7_isys_stream *stream = NULL; struct ia_gofo_msg_err err_info; struct device *dev = &adev->auxdev.dev; struct ipu7_isys_csi2 *csi2 = NULL; #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC struct ipu7_isys_tpg *tpg = NULL; #endif u64 ts; if (!isys->adev->syscom) return 1; #ifdef ENABLE_FW_OFFLINE_LOGGER ipu7_fw_isys_get_log(isys); #endif resp = ipu7_fw_isys_get_resp(isys); if (!resp) return 1; if (resp->type >= N_IPU_INSYS_RESP_TYPE) { dev_err(dev, "Unknown response type %u stream %u\n", resp->type, resp->stream_id); ipu7_fw_isys_put_resp(isys); return 1; } err_info = resp->error_info; ts = ((u64)resp->timestamp[1] << 32) | resp->timestamp[0]; if (err_info.err_group == INSYS_MSG_ERR_GROUP_CAPTURE && err_info.err_code == INSYS_MSG_ERR_CAPTURE_SYNC_FRAME_DROP) { /* receive a sp w/o command, firmware drop it */ dev_dbg(dev, "FRAME DROP: %02u %s stream %u\n", resp->type, is_fw_msg[resp->type].msg, resp->stream_id); dev_dbg(dev, "\tpin %u buf_id %llx frame %u\n", resp->pin_id, resp->buf_id, resp->frame_id); dev_dbg(dev, "\terror group %u code %u details [%u %u]\n", err_info.err_group, err_info.err_code, err_info.err_detail[0], err_info.err_detail[1]); } else if (!IA_GOFO_MSG_ERR_IS_OK(err_info)) { dev_err(dev, "%02u %s stream %u pin %u buf_id %llx frame %u\n", resp->type, is_fw_msg[resp->type].msg, resp->stream_id, resp->pin_id, resp->buf_id, resp->frame_id); dev_err(dev, "\terror group %u code %u details [%u %u]\n", err_info.err_group, err_info.err_code, err_info.err_detail[0], err_info.err_detail[1]); } else { dev_dbg(dev, "%02u %s stream %u pin %u buf_id %llx frame %u\n", resp->type, is_fw_msg[resp->type].msg, resp->stream_id, resp->pin_id, resp->buf_id, resp->frame_id); dev_dbg(dev, "\tts %llu\n", ts); } if (resp->stream_id >= IPU_ISYS_MAX_STREAMS) { dev_err(dev, "bad stream handle %u\n", resp->stream_id); goto leave; } stream = ipu7_isys_query_stream_by_handle(isys, resp->stream_id); if (!stream) { dev_err(dev, "stream of stream_handle %u is unused\n", resp->stream_id); goto leave; } /* TODO: stream->error should be modified */ stream->error = err_info.err_code; #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC if (stream->asd) { if (stream->asd->is_tpg) tpg = ipu7_isys_subdev_to_tpg(stream->asd); else csi2 = ipu7_isys_subdev_to_csi2(stream->asd); } #else if (stream->asd) csi2 = ipu7_isys_subdev_to_csi2(stream->asd); #endif switch (resp->type) { case IPU_INSYS_RESP_TYPE_STREAM_OPEN_DONE: complete(&stream->stream_open_completion); break; case IPU_INSYS_RESP_TYPE_STREAM_CLOSE_ACK: complete(&stream->stream_close_completion); break; case IPU_INSYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK: complete(&stream->stream_start_completion); break; case IPU_INSYS_RESP_TYPE_STREAM_ABORT_ACK: complete(&stream->stream_stop_completion); break; case IPU_INSYS_RESP_TYPE_STREAM_FLUSH_ACK: complete(&stream->stream_stop_completion); break; case IPU_INSYS_RESP_TYPE_PIN_DATA_READY: /* * firmware only release the capture msg until software * get pin_data_ready event */ ipu7_put_fw_msg_buf(ipu7_bus_get_drvdata(adev), resp->buf_id); if (resp->pin_id < IPU_INSYS_OUTPUT_PINS && stream->output_pins[resp->pin_id].pin_ready) stream->output_pins[resp->pin_id].pin_ready(stream, resp); else dev_err(dev, "No handler for pin %u ready\n", resp->pin_id); if (csi2) ipu7_isys_csi2_error(csi2); break; case IPU_INSYS_RESP_TYPE_STREAM_CAPTURE_ACK: break; case IPU_INSYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE: case IPU_INSYS_RESP_TYPE_STREAM_CAPTURE_DONE: break; case IPU_INSYS_RESP_TYPE_FRAME_SOF: if (csi2) ipu7_isys_csi2_sof_event_by_stream(stream); #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC if (tpg) ipu7_isys_tpg_sof_event_by_stream(stream); #endif stream->seq[stream->seq_index].sequence = atomic_read(&stream->sequence) - 1; stream->seq[stream->seq_index].timestamp = ts; dev_dbg(dev, "SOF: stream %u frame %u (index %u), ts 0x%16.16llx\n", resp->stream_id, resp->frame_id, stream->seq[stream->seq_index].sequence, ts); stream->seq_index = (stream->seq_index + 1) % IPU_ISYS_MAX_PARALLEL_SOF; break; case IPU_INSYS_RESP_TYPE_FRAME_EOF: if (csi2) ipu7_isys_csi2_eof_event_by_stream(stream); #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC if (tpg) ipu7_isys_tpg_eof_event_by_stream(stream); #endif dev_dbg(dev, "eof: stream %d(index %u) ts 0x%16.16llx\n", resp->stream_id, stream->seq[stream->seq_index].sequence, ts); break; default: dev_err(dev, "Unknown response type %u stream %u\n", resp->type, resp->stream_id); break; } ipu7_isys_put_stream(stream); leave: ipu7_fw_isys_put_resp(isys); return 0; } /* * It has only one stream every pipeline of CSI2 now, * so get stream which is not NULL in av. */ static struct ipu7_isys_stream * ipu7_isys_get_stream_by_av(struct ipu7_isys_csi2 *csi2) { unsigned int i; for (i = 0; i < NR_OF_CSI2_SRC_PADS; i++) { if (csi2->av[i].stream) return csi2->av[i].stream; } return NULL; } static void ipu7_isys_csi2_isr(struct ipu7_isys_csi2 *csi2) { u32 status, offset; struct ipu7_device *isp = csi2->isys->adev->isp; struct ipu7_isys_stream *stream; ipu7_isys_register_errors(csi2); offset = IS_IO_CSI2_SYNC_LEGACY_IRQ_CTL_BASE(csi2->port); status = readl(csi2->base + offset + IRQ_CTL_STATUS); dev_dbg(&csi2->isys->adev->auxdev.dev, "csi2-%u sync status 0x%08x", csi2->port, status); writel(status, csi2->base + offset + IRQ_CTL_CLEAR); stream = ipu7_isys_get_stream_by_av(csi2); if (!stream) { dev_err(&csi2->isys->adev->auxdev.dev, "no stream for csi2-%u\n", csi2->port); return; } if (status & IPU_CSI_RX_SYNC_FS_VC) ipu7_isys_csi2_sof_event_by_stream(stream); if (is_ipu7p5(isp->hw_ver)) { status = readl(csi2->base + offset + IRQ1_CTL_STATUS); writel(status, csi2->base + offset + IRQ1_CTL_CLEAR); if (status & IPU7P5_CSI_RX_SYNC_FE_VC) ipu7_isys_csi2_sof_event_by_stream(stream); return; } if (status & IPU_CSI_RX_SYNC_FE_VC) ipu7_isys_csi2_eof_event_by_stream(stream); } irqreturn_t isys_isr(struct ipu7_bus_device *adev) { struct ipu7_isys *isys = ipu7_bus_get_drvdata(adev); struct device *dev = &isys->adev->auxdev.dev; void __iomem *base = isys->pdata->base; u32 status_csi, status_sw, csi_offset, sw_offset; spin_lock(&isys->power_lock); if (!isys->power) { spin_unlock(&isys->power_lock); return IRQ_NONE; } csi_offset = IS_IO_CSI2_LEGACY_IRQ_CTRL_BASE; sw_offset = IS_BASE; status_csi = readl(base + csi_offset + IRQ_CTL_STATUS); status_sw = readl(base + sw_offset + TO_SW_IRQ_CNTL_STATUS); if (!status_csi && !status_sw) { spin_unlock(&isys->power_lock); return IRQ_NONE; } if (status_csi) dev_dbg(dev, "status csi 0x%08x\n", status_csi); if (status_sw) dev_dbg(dev, "status to_sw 0x%08x\n", status_sw); do { writel(status_sw, base + sw_offset + TO_SW_IRQ_CNTL_CLEAR); writel(status_csi, base + csi_offset + IRQ_CTL_CLEAR); if (isys->isr_csi2_mask & status_csi) { unsigned int i; for (i = 0; i < isys->pdata->ipdata->csi2.nports; i++) { /* irq from not enabled port */ if (!isys->csi2[i].base) continue; if (status_csi & isys->csi2[i].legacy_irq_mask) ipu7_isys_csi2_isr(&isys->csi2[i]); } } if (!isys_isr_one(adev)) status_sw = TO_SW_IRQ_FW; else status_sw = 0; status_csi = readl(base + csi_offset + IRQ_CTL_STATUS); status_sw |= readl(base + sw_offset + TO_SW_IRQ_CNTL_STATUS); } while ((status_csi & isys->isr_csi2_mask) || (status_sw & TO_SW_IRQ_FW)); writel(TO_SW_IRQ_MASK, base + sw_offset + TO_SW_IRQ_CNTL_MASK_N); spin_unlock(&isys->power_lock); return IRQ_HANDLED; } static const struct ipu7_auxdrv_data ipu7_isys_auxdrv_data = { .isr = isys_isr, .isr_threaded = NULL, .wake_isr_thread = false, }; static const struct auxiliary_device_id ipu7_isys_id_table[] = { { .name = "intel_ipu7.isys", .driver_data = (kernel_ulong_t)&ipu7_isys_auxdrv_data, }, { } }; MODULE_DEVICE_TABLE(auxiliary, ipu7_isys_id_table); static struct auxiliary_driver isys_driver = { .name = IPU_ISYS_NAME, .probe = isys_probe, .remove = isys_remove, .id_table = ipu7_isys_id_table, .driver = { .pm = ISYS_PM_OPS, }, }; module_auxiliary_driver(isys_driver); MODULE_AUTHOR("Bingbu Cao "); MODULE_AUTHOR("Tianshu Qiu "); MODULE_AUTHOR("Qingwu Zhang "); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Intel ipu7 input system driver"); MODULE_IMPORT_NS(INTEL_IPU7); MODULE_IMPORT_NS(INTEL_IPU_BRIDGE); ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/ipu7-isys.h000066400000000000000000000101071465530421300262750ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 - 2024 Intel Corporation */ #ifndef IPU7_ISYS_H #define IPU7_ISYS_H #include #include #include #include #include #include #include #include #include #include "abi/ipu7_fw_msg_abi.h" #include "abi/ipu7_fw_isys_abi.h" #include "ipu7.h" #include "ipu7-isys-csi2.h" #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC #include "ipu7-isys-tpg.h" #endif #include "ipu7-isys-video.h" #ifdef CONFIG_DEBUG_FS struct dentry; #endif #define IPU_ISYS_ENTITY_PREFIX "Intel IPU7" /* FW support max 8 streams */ #define IPU_ISYS_MAX_STREAMS 8 #define IPU_ISYS_2600_MEM_LINE_ALIGN 64 /* for TPG */ #define IPU_ISYS_FREQ 533000000UL /* * Current message queue configuration. These must be big enough * so that they never gets full. Queues are located in system memory */ #define IPU_ISYS_SIZE_RECV_QUEUE 40 #define IPU_ISYS_SIZE_LOG_QUEUE 256 #define IPU_ISYS_SIZE_SEND_QUEUE 40 #define IPU_ISYS_NUM_RECV_QUEUE 1 #define IPU_ISYS_MIN_WIDTH 2U #define IPU_ISYS_MIN_HEIGHT 2U #define IPU_ISYS_MAX_WIDTH 8160U #define IPU_ISYS_MAX_HEIGHT 8190U #define FW_CALL_TIMEOUT_JIFFIES \ msecs_to_jiffies(IPU_LIB_CALL_TIMEOUT_MS) struct isys_fw_log { struct mutex mutex; /* protect whole struct */ void *head; void *addr; u32 count; /* running counter of log */ u32 size; /* actual size of log content, in bits */ }; /* * struct ipu7_isys * * @media_dev: Media device * @v4l2_dev: V4L2 device * @adev: ISYS bus device * @power: Is ISYS powered on or not? * @isr_bits: Which bits does the ISR handle? * @power_lock: Serialise access to power (power state in general) * @csi2_rx_ctrl_cached: cached shared value between all CSI2 receivers * @streams_lock: serialise access to streams * @streams: streams per firmware stream ID * @syscom: fw communication layer context * @line_align: line alignment in memory * @need_reset: Isys requires d0i0->i3 transition * @ref_count: total number of callers fw open * @mutex: serialise access isys video open/release related operations * @stream_mutex: serialise stream start and stop, queueing requests * @pdata: platform data pointer * @csi2: CSI-2 receivers #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC * @tpg: test pattern generators #endif */ struct ipu7_isys { struct media_device media_dev; struct v4l2_device v4l2_dev; struct ipu7_bus_device *adev; int power; spinlock_t power_lock; /* Serialise access to power */ u32 isr_csi2_mask; u32 csi2_rx_ctrl_cached; spinlock_t streams_lock; struct ipu7_isys_stream streams[IPU_ISYS_MAX_STREAMS]; int streams_ref_count[IPU_ISYS_MAX_STREAMS]; unsigned int line_align; u32 phy_rext_cal; bool need_reset; bool icache_prefetch; bool csi2_cse_ipc_not_supported; unsigned int ref_count; unsigned int stream_opened; #ifdef CONFIG_DEBUG_FS struct dentry *debugfsdir; #endif struct mutex mutex; /* Serialise isys video open/release related */ struct mutex stream_mutex; /* Stream start, stop, queueing reqs */ struct ipu7_isys_pdata *pdata; struct ipu7_isys_csi2 *csi2; #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC struct ipu7_isys_tpg *tpg; #endif struct isys_fw_log *fw_log; struct list_head requests; struct pm_qos_request pm_qos; spinlock_t listlock; /* Protect framebuflist */ struct list_head framebuflist; struct list_head framebuflist_fw; struct v4l2_async_notifier notifier; struct ipu7_insys_config *subsys_config; dma_addr_t subsys_config_dma_addr; }; struct isys_fw_msgs { union { u64 dummy; struct ipu7_insys_buffset frame; struct ipu7_insys_stream_cfg stream; } fw_msg; struct list_head head; dma_addr_t dma_addr; }; struct isys_fw_msgs *ipu7_get_fw_msg_buf(struct ipu7_isys_stream *stream); void ipu7_put_fw_msg_buf(struct ipu7_isys *isys, u64 data); void ipu7_cleanup_fw_msg_bufs(struct ipu7_isys *isys); extern const struct v4l2_ioctl_ops ipu7_isys_ioctl_ops; int isys_isr_one(struct ipu7_bus_device *adev); irqreturn_t isys_isr(struct ipu7_bus_device *adev); #endif /* IPU7_ISYS_H */ ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/ipu7-mmu.c000066400000000000000000000534331465530421300261100ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013 - 2024 Intel Corporation */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ipu7.h" #include "ipu7-dma.h" #include "ipu7-mmu.h" #include "ipu7-platform-regs.h" #define ISP_PAGE_SHIFT 12 #define ISP_PAGE_SIZE BIT(ISP_PAGE_SHIFT) #define ISP_PAGE_MASK (~(ISP_PAGE_SIZE - 1)) #define ISP_L1PT_SHIFT 22 #define ISP_L1PT_MASK (~((1U << ISP_L1PT_SHIFT) - 1)) #define ISP_L2PT_SHIFT 12 #define ISP_L2PT_MASK (~(ISP_L1PT_MASK | (~(ISP_PAGE_MASK)))) #define ISP_L1PT_PTES 1024 #define ISP_L2PT_PTES 1024 #define ISP_PADDR_SHIFT 12 #define REG_L1_PHYS 0x0004 /* 27-bit pfn */ #define REG_INFO 0x0008 #define TBL_PHYS_ADDR(a) ((phys_addr_t)(a) << ISP_PADDR_SHIFT) #define MMU_TLB_INVALIDATE_TIMEOUT 2000 static __maybe_unused void mmu_irq_handler(struct ipu7_mmu *mmu) { u32 irq_cause; unsigned int i; for (i = 0; i < mmu->nr_mmus; i++) { irq_cause = readl(mmu->mmu_hw[i].base + MMU_REG_IRQ_CAUSE); pr_info("mmu %s irq_cause = 0x%x", mmu->mmu_hw[i].name, irq_cause); writel(0x1ffff, mmu->mmu_hw[i].base + MMU_REG_IRQ_CLEAR); } } static void tlb_invalidate(struct ipu7_mmu *mmu) { unsigned long flags; unsigned int i; int ret; u32 val; spin_lock_irqsave(&mmu->ready_lock, flags); if (!mmu->ready) { spin_unlock_irqrestore(&mmu->ready_lock, flags); return; } for (i = 0; i < mmu->nr_mmus; i++) { writel(0xffffffff, mmu->mmu_hw[i].base + MMU_REG_INVALIDATE_0); /* Need check with HW, use l1streams or l2streams */ if (mmu->mmu_hw[i].nr_l2streams > 32) writel(0xffffffff, mmu->mmu_hw[i].base + MMU_REG_INVALIDATE_1); /* * The TLB invalidation is a "single cycle" (IOMMU clock cycles) * When the actual MMIO write reaches the IPU TLB Invalidate * register, wmb() will force the TLB invalidate out if the CPU * attempts to update the IOMMU page table (or sooner). */ wmb(); /* wait invalidation done */ ret = readl_poll_timeout_atomic(mmu->mmu_hw[i].base + MMU_REG_INVALIDATION_STATUS, val, !(val & 0x1), 500, MMU_TLB_INVALIDATE_TIMEOUT); if (ret) dev_err(mmu->dev, "MMU[%u] TLB invalidate failed\n", i); } spin_unlock_irqrestore(&mmu->ready_lock, flags); } #ifdef DEBUG static void page_table_dump(struct ipu7_mmu_info *mmu_info) { u32 l1_idx; dev_dbg(mmu_info->dev, "begin IOMMU page table dump\n"); for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++) { u32 l2_idx; u32 iova = (phys_addr_t)l1_idx << ISP_L1PT_SHIFT; if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval) continue; dev_dbg(mmu_info->dev, "l1 entry %u; iovas 0x%8.8x-0x%8.8x, at %pa\n", l1_idx, iova, iova + ISP_PAGE_SIZE, TBL_PHYS_ADDR(mmu_info->l1_pt[l1_idx])); for (l2_idx = 0; l2_idx < ISP_L2PT_PTES; l2_idx++) { u32 *l2_pt = mmu_info->l2_pts[l1_idx]; u32 iova2 = iova + (l2_idx << ISP_L2PT_SHIFT); if (l2_pt[l2_idx] == mmu_info->dummy_page_pteval) continue; dev_dbg(mmu_info->dev, "\tl2 entry %u; iova 0x%8.8x, phys %pa\n", l2_idx, iova2, TBL_PHYS_ADDR(l2_pt[l2_idx])); } } dev_dbg(mmu_info->dev, "end IOMMU page table dump\n"); } #endif /* DEBUG */ static dma_addr_t map_single(struct ipu7_mmu_info *mmu_info, void *ptr) { dma_addr_t dma; dma = dma_map_single(mmu_info->dev, ptr, PAGE_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(mmu_info->dev, dma)) return 0; return dma; } static int get_dummy_page(struct ipu7_mmu_info *mmu_info) { void *pt = (void *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); dma_addr_t dma; if (!pt) return -ENOMEM; dev_dbg(mmu_info->dev, "dummy_page: get_zeroed_page() == %p\n", pt); dma = map_single(mmu_info, pt); if (!dma) { dev_err(mmu_info->dev, "Failed to map dummy page\n"); goto err_free_page; } mmu_info->dummy_page = pt; mmu_info->dummy_page_pteval = dma >> ISP_PAGE_SHIFT; return 0; err_free_page: free_page((unsigned long)pt); return -ENOMEM; } static void free_dummy_page(struct ipu7_mmu_info *mmu_info) { dma_unmap_single(mmu_info->dev, TBL_PHYS_ADDR(mmu_info->dummy_page_pteval), PAGE_SIZE, DMA_BIDIRECTIONAL); free_page((unsigned long)mmu_info->dummy_page); } static int alloc_dummy_l2_pt(struct ipu7_mmu_info *mmu_info) { u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); dma_addr_t dma; unsigned int i; if (!pt) return -ENOMEM; dev_dbg(mmu_info->dev, "dummy_l2: get_zeroed_page() = %p\n", pt); dma = map_single(mmu_info, pt); if (!dma) { dev_err(mmu_info->dev, "Failed to map l2pt page\n"); goto err_free_page; } for (i = 0; i < ISP_L2PT_PTES; i++) pt[i] = mmu_info->dummy_page_pteval; mmu_info->dummy_l2_pt = pt; mmu_info->dummy_l2_pteval = dma >> ISP_PAGE_SHIFT; return 0; err_free_page: free_page((unsigned long)pt); return -ENOMEM; } static void free_dummy_l2_pt(struct ipu7_mmu_info *mmu_info) { dma_unmap_single(mmu_info->dev, TBL_PHYS_ADDR(mmu_info->dummy_l2_pteval), PAGE_SIZE, DMA_BIDIRECTIONAL); free_page((unsigned long)mmu_info->dummy_l2_pt); } static u32 *alloc_l1_pt(struct ipu7_mmu_info *mmu_info) { u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); dma_addr_t dma; unsigned int i; if (!pt) return NULL; dev_dbg(mmu_info->dev, "alloc_l1: get_zeroed_page() = %p\n", pt); for (i = 0; i < ISP_L1PT_PTES; i++) pt[i] = mmu_info->dummy_l2_pteval; dma = map_single(mmu_info, pt); if (!dma) { dev_err(mmu_info->dev, "Failed to map l1pt page\n"); goto err_free_page; } mmu_info->l1_pt_dma = dma >> ISP_PADDR_SHIFT; dev_dbg(mmu_info->dev, "l1 pt %p mapped at %llx\n", pt, dma); return pt; err_free_page: free_page((unsigned long)pt); return NULL; } static u32 *alloc_l2_pt(struct ipu7_mmu_info *mmu_info) { u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); unsigned int i; if (!pt) return NULL; dev_dbg(mmu_info->dev, "alloc_l2: get_zeroed_page() = %p\n", pt); for (i = 0; i < ISP_L1PT_PTES; i++) pt[i] = mmu_info->dummy_page_pteval; return pt; } static size_t l2_unmap(struct ipu7_mmu_info *mmu_info, unsigned long iova, phys_addr_t dummy, size_t size) { unsigned int l2_idx; unsigned int l2_entries; size_t unmapped = 0; unsigned long flags; u32 l1_idx; u32 *l2_pt; spin_lock_irqsave(&mmu_info->lock, flags); for (l1_idx = iova >> ISP_L1PT_SHIFT; size > 0 && l1_idx < ISP_L1PT_PTES; l1_idx++) { dev_dbg(mmu_info->dev, "unmapping l2 page table for l1 index %u (iova 0x%8.8lx)\n", l1_idx, iova); if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval) { spin_unlock_irqrestore(&mmu_info->lock, flags); dev_err(mmu_info->dev, "unmap iova 0x%8.8lx l1 idx %u which was not mapped\n", iova, l1_idx); return unmapped << ISP_PAGE_SHIFT; } l2_pt = mmu_info->l2_pts[l1_idx]; l2_entries = 0; for (l2_idx = (iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT; size > 0 && l2_idx < ISP_L2PT_PTES; l2_idx++) { dev_dbg(mmu_info->dev, "unmap l2 index %u with pteval 0x%10.10llx\n", l2_idx, TBL_PHYS_ADDR(l2_pt[l2_idx])); l2_pt[l2_idx] = mmu_info->dummy_page_pteval; iova += ISP_PAGE_SIZE; size -= ISP_PAGE_SIZE; l2_entries++; } if (l2_entries) { clflush_cache_range(&l2_pt[l2_idx - l2_entries], sizeof(l2_pt[0]) * l2_entries); unmapped += l2_entries; } } spin_unlock_irqrestore(&mmu_info->lock, flags); return unmapped << ISP_PAGE_SHIFT; } static int l2_map(struct ipu7_mmu_info *mmu_info, unsigned long iova, phys_addr_t paddr, size_t size) { struct device *dev = mmu_info->dev; u32 *l2_pt, *l2_virt; unsigned int l2_entries; unsigned int l2_idx; unsigned long flags; size_t mapped_size = 0; dma_addr_t dma; u32 l1_idx; u32 l1_entry; int err = 0; spin_lock_irqsave(&mmu_info->lock, flags); paddr = ALIGN(paddr, ISP_PAGE_SIZE); for (l1_idx = iova >> ISP_L1PT_SHIFT; size > 0 && l1_idx < ISP_L1PT_PTES; l1_idx++) { dev_dbg(dev, "mapping l2 page table for l1 index %u (iova %8.8x)\n", l1_idx, (u32)iova); l1_entry = mmu_info->l1_pt[l1_idx]; if (l1_entry == mmu_info->dummy_l2_pteval) { l2_virt = mmu_info->l2_pts[l1_idx]; if (likely(!l2_virt)) { l2_virt = alloc_l2_pt(mmu_info); if (!l2_virt) { spin_unlock_irqrestore(&mmu_info->lock, flags); err = -ENOMEM; goto error; } } dma = map_single(mmu_info, l2_virt); if (!dma) { dev_err(dev, "Failed to map l2pt page\n"); free_page((unsigned long)l2_virt); spin_unlock_irqrestore(&mmu_info->lock, flags); err = -EINVAL; goto error; } l1_entry = dma >> ISP_PADDR_SHIFT; dev_dbg(dev, "page for l1_idx %u %p allocated\n", l1_idx, l2_virt); mmu_info->l1_pt[l1_idx] = l1_entry; mmu_info->l2_pts[l1_idx] = l2_virt; clflush_cache_range(&mmu_info->l1_pt[l1_idx], sizeof(mmu_info->l1_pt[l1_idx])); } l2_pt = mmu_info->l2_pts[l1_idx]; l2_entries = 0; for (l2_idx = (iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT; size > 0 && l2_idx < ISP_L2PT_PTES; l2_idx++) { l2_pt[l2_idx] = paddr >> ISP_PADDR_SHIFT; dev_dbg(dev, "l2 index %u mapped as 0x%8.8x\n", l2_idx, l2_pt[l2_idx]); iova += ISP_PAGE_SIZE; paddr += ISP_PAGE_SIZE; mapped_size += ISP_PAGE_SIZE; size -= ISP_PAGE_SIZE; l2_entries++; } if (l2_entries) clflush_cache_range(&l2_pt[l2_idx - l2_entries], sizeof(l2_pt[0]) * l2_entries); } spin_unlock_irqrestore(&mmu_info->lock, flags); return 0; error: /* unroll mapping in case something went wrong */ if (size && mapped_size) l2_unmap(mmu_info, iova - mapped_size, paddr - mapped_size, mapped_size); return err; } static int __ipu_mmu_map(struct ipu7_mmu_info *mmu_info, unsigned long iova, phys_addr_t paddr, size_t size) { u32 iova_start = round_down(iova, ISP_PAGE_SIZE); u32 iova_end = ALIGN(iova + size, ISP_PAGE_SIZE); dev_dbg(mmu_info->dev, "mapping iova 0x%8.8x--0x%8.8x, size %zu at paddr 0x%10.10llx\n", iova_start, iova_end, size, paddr); return l2_map(mmu_info, iova_start, paddr, size); } static size_t __ipu_mmu_unmap(struct ipu7_mmu_info *mmu_info, unsigned long iova, size_t size) { return l2_unmap(mmu_info, iova, 0, size); } static int allocate_trash_buffer(struct ipu7_mmu *mmu) { unsigned int n_pages = PHYS_PFN(PAGE_ALIGN(IPU_MMUV2_TRASH_RANGE)); unsigned long iova_addr; struct iova *iova; unsigned int i; dma_addr_t dma; int ret; /* Allocate 8MB in iova range */ iova = alloc_iova(&mmu->dmap->iovad, n_pages, PHYS_PFN(mmu->dmap->mmu_info->aperture_end), 0); if (!iova) { dev_err(mmu->dev, "cannot allocate iova range for trash\n"); return -ENOMEM; } dma = dma_map_page(mmu->dmap->mmu_info->dev, mmu->trash_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(mmu->dmap->mmu_info->dev, dma)) { dev_err(mmu->dmap->mmu_info->dev, "Failed to map trash page\n"); ret = -ENOMEM; goto out_free_iova; } mmu->pci_trash_page = dma; /* * Map the 8MB iova address range to the same physical trash page * mmu->trash_page which is already reserved at the probe */ iova_addr = iova->pfn_lo; for (i = 0; i < n_pages; i++) { ret = ipu7_mmu_map(mmu->dmap->mmu_info, PFN_PHYS(iova_addr), mmu->pci_trash_page, PAGE_SIZE); if (ret) { dev_err(mmu->dev, "mapping trash buffer range failed\n"); goto out_unmap; } iova_addr++; } mmu->iova_trash_page = PFN_PHYS(iova->pfn_lo); dev_dbg(mmu->dev, "iova trash buffer for MMUID: %d is %u\n", mmu->mmid, (unsigned int)mmu->iova_trash_page); return 0; out_unmap: ipu7_mmu_unmap(mmu->dmap->mmu_info, PFN_PHYS(iova->pfn_lo), PFN_PHYS(iova_size(iova))); dma_unmap_page(mmu->dmap->mmu_info->dev, mmu->pci_trash_page, PAGE_SIZE, DMA_BIDIRECTIONAL); out_free_iova: __free_iova(&mmu->dmap->iovad, iova); return ret; } static void __mmu_at_init(struct ipu7_mmu *mmu) { struct ipu7_mmu_info *mmu_info; unsigned int i; mmu_info = mmu->dmap->mmu_info; for (i = 0; i < mmu->nr_mmus; i++) { struct ipu7_mmu_hw *mmu_hw = &mmu->mmu_hw[i]; unsigned int j; /* Write page table address per MMU */ writel((phys_addr_t)mmu_info->l1_pt_dma, mmu_hw->base + MMU_REG_PAGE_TABLE_BASE_ADDR); dev_dbg(mmu->dev, "mmu %s base was set as %x\n", mmu_hw->name, readl(mmu_hw->base + MMU_REG_PAGE_TABLE_BASE_ADDR)); /* Set info bits and axi_refill per MMU */ writel(mmu_hw->info_bits, mmu_hw->base + MMU_REG_USER_INFO_BITS); writel(mmu_hw->refill, mmu_hw->base + MMU_REG_AXI_REFILL_IF_ID); writel(mmu_hw->collapse_en_bitmap, mmu_hw->base + MMU_REG_COLLAPSE_ENABLE_BITMAP); if (mmu_hw->at_sp_arb_cfg) writel(mmu_hw->at_sp_arb_cfg, mmu_hw->base + MMU_REG_AT_SP_ARB_CFG); /* default irq configuration */ writel(0x3ff, mmu_hw->base + MMU_REG_IRQ_MASK); writel(0x3ff, mmu_hw->base + MMU_REG_IRQ_ENABLE); /* Configure MMU TLB stream configuration for L1/L2 */ for (j = 0; j < mmu_hw->nr_l1streams; j++) { writel(mmu_hw->l1_block_sz[j], mmu_hw->base + mmu_hw->l1_block + 4 * j); } for (j = 0; j < mmu_hw->nr_l2streams; j++) { writel(mmu_hw->l2_block_sz[j], mmu_hw->base + mmu_hw->l2_block + 4 * j); } for (j = 0; j < mmu_hw->uao_p_num; j++) { if (!mmu_hw->uao_p2tlb[j]) continue; writel(mmu_hw->uao_p2tlb[j], mmu_hw->uao_base + 4 * j); } } } static void __mmu_zlx_init(struct ipu7_mmu *mmu) { unsigned int i; dev_dbg(mmu->dev, "mmu zlx init\n"); for (i = 0; i < mmu->nr_mmus; i++) { struct ipu7_mmu_hw *mmu_hw = &mmu->mmu_hw[i]; unsigned int j; dev_dbg(mmu->dev, "mmu %s zlx init\n", mmu_hw->name); for (j = 0; j < IPU_ZLX_POOL_NUM; j++) { if (!mmu_hw->zlx_axi_pool[j]) continue; writel(mmu_hw->zlx_axi_pool[j], mmu_hw->zlx_base + ZLX_REG_AXI_POOL + j * 0x4); } for (j = 0; j < mmu_hw->zlx_nr; j++) { if (!mmu_hw->zlx_conf[j]) continue; writel(mmu_hw->zlx_conf[j], mmu_hw->zlx_base + ZLX_REG_CONF + j * 0x8); } for (j = 0; j < mmu_hw->zlx_nr; j++) { if (!mmu_hw->zlx_en[j]) continue; writel(mmu_hw->zlx_en[j], mmu_hw->zlx_base + ZLX_REG_EN + j * 0x8); } } } int ipu7_mmu_hw_init(struct ipu7_mmu *mmu) { unsigned long flags; dev_dbg(mmu->dev, "IPU mmu hardware init\n"); /* Initialise the each MMU and ZLX */ __mmu_at_init(mmu); __mmu_zlx_init(mmu); if (!mmu->trash_page) { int ret; mmu->trash_page = alloc_page(GFP_KERNEL); if (!mmu->trash_page) { dev_err(mmu->dev, "insufficient memory for trash buffer\n"); return -ENOMEM; } ret = allocate_trash_buffer(mmu); if (ret) { __free_page(mmu->trash_page); mmu->trash_page = NULL; dev_err(mmu->dev, "trash buffer allocation failed\n"); return ret; } } spin_lock_irqsave(&mmu->ready_lock, flags); mmu->ready = true; spin_unlock_irqrestore(&mmu->ready_lock, flags); return 0; } EXPORT_SYMBOL_NS_GPL(ipu7_mmu_hw_init, INTEL_IPU7); static struct ipu7_mmu_info *ipu7_mmu_alloc(struct ipu7_device *isp) { struct ipu7_mmu_info *mmu_info; int ret; mmu_info = kzalloc(sizeof(*mmu_info), GFP_KERNEL); if (!mmu_info) return NULL; mmu_info->aperture_start = IPU_FW_CODE_REGION_START; if (isp->secure_mode) mmu_info->aperture_end = DMA_BIT_MASK(IPU_MMU_ADDR_BITS); else mmu_info->aperture_end = DMA_BIT_MASK(IPU_MMU_ADDR_BITS_NON_SECURE); mmu_info->pgsize_bitmap = SZ_4K; mmu_info->dev = &isp->pdev->dev; ret = get_dummy_page(mmu_info); if (ret) goto err_free_info; ret = alloc_dummy_l2_pt(mmu_info); if (ret) goto err_free_dummy_page; mmu_info->l2_pts = vzalloc(ISP_L2PT_PTES * sizeof(*mmu_info->l2_pts)); if (!mmu_info->l2_pts) goto err_free_dummy_l2_pt; /* * We always map the L1 page table (a single page as well as * the L2 page tables). */ mmu_info->l1_pt = alloc_l1_pt(mmu_info); if (!mmu_info->l1_pt) goto err_free_l2_pts; spin_lock_init(&mmu_info->lock); dev_dbg(mmu_info->dev, "domain initialised\n"); return mmu_info; err_free_l2_pts: vfree(mmu_info->l2_pts); err_free_dummy_l2_pt: free_dummy_l2_pt(mmu_info); err_free_dummy_page: free_dummy_page(mmu_info); err_free_info: kfree(mmu_info); return NULL; } void ipu7_mmu_hw_cleanup(struct ipu7_mmu *mmu) { unsigned long flags; spin_lock_irqsave(&mmu->ready_lock, flags); mmu->ready = false; spin_unlock_irqrestore(&mmu->ready_lock, flags); } EXPORT_SYMBOL_NS_GPL(ipu7_mmu_hw_cleanup, INTEL_IPU7); static struct ipu7_dma_mapping *alloc_dma_mapping(struct ipu7_device *isp) { struct ipu7_dma_mapping *dmap; unsigned long base_pfn; dmap = kzalloc(sizeof(*dmap), GFP_KERNEL); if (!dmap) return NULL; dmap->mmu_info = ipu7_mmu_alloc(isp); if (!dmap->mmu_info) { kfree(dmap); return NULL; } /* 0~64M is forbidden for uctile controller */ base_pfn = max_t(unsigned long, 1, PFN_DOWN(dmap->mmu_info->aperture_start)); init_iova_domain(&dmap->iovad, SZ_4K, base_pfn); dmap->mmu_info->dmap = dmap; dev_dbg(&isp->pdev->dev, "alloc mapping\n"); iova_cache_get(); return dmap; } phys_addr_t ipu7_mmu_iova_to_phys(struct ipu7_mmu_info *mmu_info, dma_addr_t iova) { phys_addr_t phy_addr; unsigned long flags; u32 *l2_pt; spin_lock_irqsave(&mmu_info->lock, flags); l2_pt = mmu_info->l2_pts[iova >> ISP_L1PT_SHIFT]; phy_addr = (phys_addr_t)l2_pt[(iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT]; phy_addr <<= ISP_PAGE_SHIFT; spin_unlock_irqrestore(&mmu_info->lock, flags); return phy_addr; } size_t ipu7_mmu_unmap(struct ipu7_mmu_info *mmu_info, unsigned long iova, size_t size) { unsigned int min_pagesz; dev_dbg(mmu_info->dev, "unmapping iova 0x%lx size 0x%zx\n", iova, size); /* find out the minimum page size supported */ min_pagesz = 1 << __ffs(mmu_info->pgsize_bitmap); /* * The virtual address and the size of the mapping must be * aligned (at least) to the size of the smallest page supported * by the hardware */ if (!IS_ALIGNED(iova | size, min_pagesz)) { dev_err(NULL, "unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n", iova, size, min_pagesz); return -EINVAL; } return __ipu_mmu_unmap(mmu_info, iova, size); } int ipu7_mmu_map(struct ipu7_mmu_info *mmu_info, unsigned long iova, phys_addr_t paddr, size_t size) { unsigned int min_pagesz; if (mmu_info->pgsize_bitmap == 0UL) return -ENODEV; /* find out the minimum page size supported */ min_pagesz = 1 << __ffs(mmu_info->pgsize_bitmap); /* * both the virtual address and the physical one, as well as * the size of the mapping, must be aligned (at least) to the * size of the smallest page supported by the hardware */ if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { dev_err(mmu_info->dev, "unaligned: iova %lx pa %pa size %zx min_pagesz %x\n", iova, &paddr, size, min_pagesz); return -EINVAL; } dev_dbg(mmu_info->dev, "map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); return __ipu_mmu_map(mmu_info, iova, paddr, size); } static void ipu7_mmu_destroy(struct ipu7_mmu *mmu) { struct ipu7_dma_mapping *dmap = mmu->dmap; struct ipu7_mmu_info *mmu_info = dmap->mmu_info; struct iova *iova; u32 l1_idx; if (mmu->iova_trash_page) { iova = find_iova(&dmap->iovad, PHYS_PFN(mmu->iova_trash_page)); if (iova) { /* unmap and free the trash buffer iova */ ipu7_mmu_unmap(mmu_info, PFN_PHYS(iova->pfn_lo), PFN_PHYS(iova_size(iova))); __free_iova(&dmap->iovad, iova); } else { dev_err(mmu->dev, "trash buffer iova not found.\n"); } mmu->iova_trash_page = 0; dma_unmap_page(mmu_info->dev, mmu->pci_trash_page, PAGE_SIZE, DMA_BIDIRECTIONAL); mmu->pci_trash_page = 0; __free_page(mmu->trash_page); } for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++) { if (mmu_info->l1_pt[l1_idx] != mmu_info->dummy_l2_pteval) { dma_unmap_single(mmu_info->dev, TBL_PHYS_ADDR(mmu_info->l1_pt[l1_idx]), PAGE_SIZE, DMA_BIDIRECTIONAL); free_page((unsigned long)mmu_info->l2_pts[l1_idx]); } } free_dummy_page(mmu_info); dma_unmap_single(mmu_info->dev, TBL_PHYS_ADDR(mmu_info->l1_pt_dma), PAGE_SIZE, DMA_BIDIRECTIONAL); free_page((unsigned long)mmu_info->dummy_l2_pt); free_page((unsigned long)mmu_info->l1_pt); kfree(mmu_info); } struct ipu7_mmu *ipu7_mmu_init(struct device *dev, void __iomem *base, int mmid, const struct ipu7_hw_variants *hw) { struct ipu7_device *isp = pci_get_drvdata(to_pci_dev(dev)); struct ipu7_mmu_pdata *pdata; struct ipu7_mmu *mmu; unsigned int i; if (hw->nr_mmus > IPU_MMU_MAX_NUM) return ERR_PTR(-EINVAL); pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) return ERR_PTR(-ENOMEM); for (i = 0; i < hw->nr_mmus; i++) { struct ipu7_mmu_hw *pdata_mmu = &pdata->mmu_hw[i]; const struct ipu7_mmu_hw *src_mmu = &hw->mmu_hw[i]; if (src_mmu->nr_l1streams > IPU_MMU_MAX_TLB_L1_STREAMS || src_mmu->nr_l2streams > IPU_MMU_MAX_TLB_L2_STREAMS) return ERR_PTR(-EINVAL); *pdata_mmu = *src_mmu; pdata_mmu->base = base + src_mmu->offset; pdata_mmu->zlx_base = base + src_mmu->zlx_offset; pdata_mmu->uao_base = base + src_mmu->uao_offset; } mmu = devm_kzalloc(dev, sizeof(*mmu), GFP_KERNEL); if (!mmu) return ERR_PTR(-ENOMEM); mmu->mmid = mmid; mmu->mmu_hw = pdata->mmu_hw; mmu->nr_mmus = hw->nr_mmus; mmu->tlb_invalidate = tlb_invalidate; mmu->ready = false; INIT_LIST_HEAD(&mmu->vma_list); spin_lock_init(&mmu->ready_lock); mmu->dmap = alloc_dma_mapping(isp); if (!mmu->dmap) { dev_err(dev, "can't alloc dma mapping\n"); return ERR_PTR(-ENOMEM); } return mmu; } void ipu7_mmu_cleanup(struct ipu7_mmu *mmu) { struct ipu7_dma_mapping *dmap = mmu->dmap; ipu7_mmu_destroy(mmu); mmu->dmap = NULL; iova_cache_put(); put_iova_domain(&dmap->iovad); kfree(dmap); } MODULE_AUTHOR("Bingbu Cao "); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Intel ipu mmu driver"); ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/ipu7-mmu.h000066400000000000000000000214651465530421300261150ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 - 2024 Intel Corporation */ #ifndef IPU7_MMU_H #define IPU7_MMU_H #include #include #include #include struct device; struct page; struct ipu7_hw_variants; struct ipu7_mmu; struct ipu7_mmu_info; #define ISYS_MMID 1 #define PSYS_MMID 0 /* IPU7 for LNL */ /* IS MMU Cmd RD */ #define IPU7_IS_MMU_FW_RD_OFFSET 0x274000 #define IPU7_IS_MMU_FW_RD_STREAM_NUM 3 #define IPU7_IS_MMU_FW_RD_L1_BLOCKNR_REG 0x54 #define IPU7_IS_MMU_FW_RD_L2_BLOCKNR_REG 0x60 /* IS MMU Cmd WR */ #define IPU7_IS_MMU_FW_WR_OFFSET 0x275000 #define IPU7_IS_MMU_FW_WR_STREAM_NUM 3 #define IPU7_IS_MMU_FW_WR_L1_BLOCKNR_REG 0x54 #define IPU7_IS_MMU_FW_WR_L2_BLOCKNR_REG 0x60 /* IS MMU Data WR Snoop */ #define IPU7_IS_MMU_M0_OFFSET 0x276000 #define IPU7_IS_MMU_M0_STREAM_NUM 8 #define IPU7_IS_MMU_M0_L1_BLOCKNR_REG 0x54 #define IPU7_IS_MMU_M0_L2_BLOCKNR_REG 0x74 /* IS MMU Data WR ISOC */ #define IPU7_IS_MMU_M1_OFFSET 0x277000 #define IPU7_IS_MMU_M1_STREAM_NUM 16 #define IPU7_IS_MMU_M1_L1_BLOCKNR_REG 0x54 #define IPU7_IS_MMU_M1_L2_BLOCKNR_REG 0x94 /* PS MMU FW RD */ #define IPU7_PS_MMU_FW_RD_OFFSET 0x148000 #define IPU7_PS_MMU_FW_RD_STREAM_NUM 20 #define IPU7_PS_MMU_FW_RD_L1_BLOCKNR_REG 0x54 #define IPU7_PS_MMU_FW_RD_L2_BLOCKNR_REG 0xa4 /* PS MMU FW WR */ #define IPU7_PS_MMU_FW_WR_OFFSET 0x149000 #define IPU7_PS_MMU_FW_WR_STREAM_NUM 10 #define IPU7_PS_MMU_FW_WR_L1_BLOCKNR_REG 0x54 #define IPU7_PS_MMU_FW_WR_L2_BLOCKNR_REG 0x7c /* PS MMU FW Data RD VC0 */ #define IPU7_PS_MMU_SRT_RD_OFFSET 0x14a000 #define IPU7_PS_MMU_SRT_RD_STREAM_NUM 40 #define IPU7_PS_MMU_SRT_RD_L1_BLOCKNR_REG 0x54 #define IPU7_PS_MMU_SRT_RD_L2_BLOCKNR_REG 0xf4 /* PS MMU FW Data WR VC0 */ #define IPU7_PS_MMU_SRT_WR_OFFSET 0x14b000 #define IPU7_PS_MMU_SRT_WR_STREAM_NUM 40 #define IPU7_PS_MMU_SRT_WR_L1_BLOCKNR_REG 0x54 #define IPU7_PS_MMU_SRT_WR_L2_BLOCKNR_REG 0xf4 /* IS UAO UC RD */ #define IPU7_IS_UAO_UC_RD_OFFSET 0x27c000 #define IPU7_IS_UAO_UC_RD_PLANENUM 4 /* IS UAO UC WR */ #define IPU7_IS_UAO_UC_WR_OFFSET 0x27d000 #define IPU7_IS_UAO_UC_WR_PLANENUM 4 /* IS UAO M0 WR */ #define IPU7_IS_UAO_M0_WR_OFFSET 0x27e000 #define IPU7_IS_UAO_M0_WR_PLANENUM 8 /* IS UAO M1 WR */ #define IPU7_IS_UAO_M1_WR_OFFSET 0x27f000 #define IPU7_IS_UAO_M1_WR_PLANENUM 16 /* PS UAO FW RD */ #define IPU7_PS_UAO_FW_RD_OFFSET 0x156000 #define IPU7_PS_UAO_FW_RD_PLANENUM 20 /* PS UAO FW WR */ #define IPU7_PS_UAO_FW_WR_OFFSET 0x157000 #define IPU7_PS_UAO_FW_WR_PLANENUM 16 /* PS UAO SRT RD */ #define IPU7_PS_UAO_SRT_RD_OFFSET 0x154000 #define IPU7_PS_UAO_SRT_RD_PLANENUM 40 /* PS UAO SRT WR */ #define IPU7_PS_UAO_SRT_WR_OFFSET 0x155000 #define IPU7_PS_UAO_SRT_WR_PLANENUM 40 #define IPU7_IS_ZLX_UC_RD_OFFSET 0x278000 #define IPU7_IS_ZLX_UC_WR_OFFSET 0x279000 #define IPU7_IS_ZLX_M0_OFFSET 0x27a000 #define IPU7_IS_ZLX_M1_OFFSET 0x27b000 #define IPU7_IS_ZLX_UC_RD_NUM 4 #define IPU7_IS_ZLX_UC_WR_NUM 4 #define IPU7_IS_ZLX_M0_NUM 8 #define IPU7_IS_ZLX_M1_NUM 16 #define IPU7_PS_ZLX_DATA_RD_OFFSET 0x14e000 #define IPU7_PS_ZLX_DATA_WR_OFFSET 0x14f000 #define IPU7_PS_ZLX_FW_RD_OFFSET 0x150000 #define IPU7_PS_ZLX_FW_WR_OFFSET 0x151000 #define IPU7_PS_ZLX_DATA_RD_NUM 32 #define IPU7_PS_ZLX_DATA_WR_NUM 32 #define IPU7_PS_ZLX_FW_RD_NUM 16 #define IPU7_PS_ZLX_FW_WR_NUM 10 /* IPU7P5 for PTL */ /* IS MMU Cmd RD */ #define IPU7P5_IS_MMU_FW_RD_OFFSET 0x274000 #define IPU7P5_IS_MMU_FW_RD_STREAM_NUM 3 #define IPU7P5_IS_MMU_FW_RD_L1_BLOCKNR_REG 0x54 #define IPU7P5_IS_MMU_FW_RD_L2_BLOCKNR_REG 0x60 /* IS MMU Cmd WR */ #define IPU7P5_IS_MMU_FW_WR_OFFSET 0x275000 #define IPU7P5_IS_MMU_FW_WR_STREAM_NUM 3 #define IPU7P5_IS_MMU_FW_WR_L1_BLOCKNR_REG 0x54 #define IPU7P5_IS_MMU_FW_WR_L2_BLOCKNR_REG 0x60 /* IS MMU Data WR Snoop */ #define IPU7P5_IS_MMU_M0_OFFSET 0x276000 #define IPU7P5_IS_MMU_M0_STREAM_NUM 16 #define IPU7P5_IS_MMU_M0_L1_BLOCKNR_REG 0x54 #define IPU7P5_IS_MMU_M0_L2_BLOCKNR_REG 0x94 /* IS MMU Data WR ISOC */ #define IPU7P5_IS_MMU_M1_OFFSET 0x277000 #define IPU7P5_IS_MMU_M1_STREAM_NUM 16 #define IPU7P5_IS_MMU_M1_L1_BLOCKNR_REG 0x54 #define IPU7P5_IS_MMU_M1_L2_BLOCKNR_REG 0x94 /* PS MMU FW RD */ #define IPU7P5_PS_MMU_FW_RD_OFFSET 0x148000 #define IPU7P5_PS_MMU_FW_RD_STREAM_NUM 16 #define IPU7P5_PS_MMU_FW_RD_L1_BLOCKNR_REG 0x54 #define IPU7P5_PS_MMU_FW_RD_L2_BLOCKNR_REG 0x94 /* PS MMU FW WR */ #define IPU7P5_PS_MMU_FW_WR_OFFSET 0x149000 #define IPU7P5_PS_MMU_FW_WR_STREAM_NUM 10 #define IPU7P5_PS_MMU_FW_WR_L1_BLOCKNR_REG 0x54 #define IPU7P5_PS_MMU_FW_WR_L2_BLOCKNR_REG 0x7c /* PS MMU FW Data RD VC0 */ #define IPU7P5_PS_MMU_SRT_RD_OFFSET 0x14a000 #define IPU7P5_PS_MMU_SRT_RD_STREAM_NUM 22 #define IPU7P5_PS_MMU_SRT_RD_L1_BLOCKNR_REG 0x54 #define IPU7P5_PS_MMU_SRT_RD_L2_BLOCKNR_REG 0xac /* PS MMU FW Data WR VC0 */ #define IPU7P5_PS_MMU_SRT_WR_OFFSET 0x14b000 #define IPU7P5_PS_MMU_SRT_WR_STREAM_NUM 32 #define IPU7P5_PS_MMU_SRT_WR_L1_BLOCKNR_REG 0x54 #define IPU7P5_PS_MMU_SRT_WR_L2_BLOCKNR_REG 0xd4 /* IS UAO UC RD */ #define IPU7P5_IS_UAO_UC_RD_OFFSET 0x27c000 #define IPU7P5_IS_UAO_UC_RD_PLANENUM 4 /* IS UAO UC WR */ #define IPU7P5_IS_UAO_UC_WR_OFFSET 0x27d000 #define IPU7P5_IS_UAO_UC_WR_PLANENUM 4 /* IS UAO M0 WR */ #define IPU7P5_IS_UAO_M0_WR_OFFSET 0x27e000 #define IPU7P5_IS_UAO_M0_WR_PLANENUM 16 /* IS UAO M1 WR */ #define IPU7P5_IS_UAO_M1_WR_OFFSET 0x27f000 #define IPU7P5_IS_UAO_M1_WR_PLANENUM 16 /* PS UAO FW RD */ #define IPU7P5_PS_UAO_FW_RD_OFFSET 0x156000 #define IPU7P5_PS_UAO_FW_RD_PLANENUM 16 /* PS UAO FW WR */ #define IPU7P5_PS_UAO_FW_WR_OFFSET 0x157000 #define IPU7P5_PS_UAO_FW_WR_PLANENUM 10 /* PS UAO SRT RD */ #define IPU7P5_PS_UAO_SRT_RD_OFFSET 0x154000 #define IPU7P5_PS_UAO_SRT_RD_PLANENUM 22 /* PS UAO SRT WR */ #define IPU7P5_PS_UAO_SRT_WR_OFFSET 0x155000 #define IPU7P5_PS_UAO_SRT_WR_PLANENUM 32 #define IPU7P5_IS_ZLX_UC_RD_OFFSET 0x278000 #define IPU7P5_IS_ZLX_UC_WR_OFFSET 0x279000 #define IPU7P5_IS_ZLX_M0_OFFSET 0x27a000 #define IPU7P5_IS_ZLX_M1_OFFSET 0x27b000 #define IPU7P5_IS_ZLX_UC_RD_NUM 4 #define IPU7P5_IS_ZLX_UC_WR_NUM 4 #define IPU7P5_IS_ZLX_M0_NUM 16 #define IPU7P5_IS_ZLX_M1_NUM 16 #define IPU7P5_PS_ZLX_DATA_RD_OFFSET 0x14e000 #define IPU7P5_PS_ZLX_DATA_WR_OFFSET 0x14f000 #define IPU7P5_PS_ZLX_FW_RD_OFFSET 0x150000 #define IPU7P5_PS_ZLX_FW_WR_OFFSET 0x151000 #define IPU7P5_PS_ZLX_DATA_RD_NUM 22 #define IPU7P5_PS_ZLX_DATA_WR_NUM 32 #define IPU7P5_PS_ZLX_FW_RD_NUM 16 #define IPU7P5_PS_ZLX_FW_WR_NUM 10 #define MMU_REG_INVALIDATE_0 0x00 #define MMU_REG_INVALIDATE_1 0x04 #define MMU_REG_PAGE_TABLE_BASE_ADDR 0x08 #define MMU_REG_USER_INFO_BITS 0x0c #define MMU_REG_AXI_REFILL_IF_ID 0x10 #define MMU_REG_PW_EN_BITMAP 0x14 #define MMU_REG_COLLAPSE_ENABLE_BITMAP 0x18 #define MMU_REG_GENERAL_REG 0x1c #define MMU_REG_AT_SP_ARB_CFG 0x20 #define MMU_REG_INVALIDATION_STATUS 0x24 #define MMU_REG_IRQ_LEVEL_NO_PULSE 0x28 #define MMU_REG_IRQ_MASK 0x2c #define MMU_REG_IRQ_ENABLE 0x30 #define MMU_REG_IRQ_EDGE 0x34 #define MMU_REG_IRQ_CLEAR 0x38 #define MMU_REG_IRQ_CAUSE 0x3c #define MMU_REG_CG_CTRL_BITS 0x40 #define MMU_REG_RD_FIFOS_STATUS 0x44 #define MMU_REG_WR_FIFOS_STATUS 0x48 #define MMU_REG_COMMON_FIFOS_STATUS 0x4c #define MMU_REG_FSM_STATUS 0x50 #define ZLX_REG_AXI_POOL 0x0 #define ZLX_REG_EN 0x20 #define ZLX_REG_CONF 0x24 #define ZLX_REG_CG_CTRL 0x900 #define ZLX_REG_FORCE_BYPASS 0x904 struct ipu7_mmu_info { struct device *dev; u32 *l1_pt; u32 l1_pt_dma; u32 **l2_pts; u32 *dummy_l2_pt; u32 dummy_l2_pteval; void *dummy_page; u32 dummy_page_pteval; dma_addr_t aperture_start; dma_addr_t aperture_end; unsigned long pgsize_bitmap; spinlock_t lock; /* Serialize access to users */ struct ipu7_dma_mapping *dmap; bool fw_code_region_mapped; }; struct ipu7_mmu { struct list_head node; struct ipu7_mmu_hw *mmu_hw; unsigned int nr_mmus; unsigned int mmid; phys_addr_t pgtbl; struct device *dev; struct ipu7_dma_mapping *dmap; struct list_head vma_list; struct page *trash_page; dma_addr_t pci_trash_page; /* IOVA from PCI DMA services (parent) */ dma_addr_t iova_trash_page; /* IOVA for IPU child nodes to use */ bool ready; spinlock_t ready_lock; /* Serialize access to bool ready */ void (*tlb_invalidate)(struct ipu7_mmu *mmu); }; struct ipu7_mmu *ipu7_mmu_init(struct device *dev, void __iomem *base, int mmid, const struct ipu7_hw_variants *hw); void ipu7_mmu_cleanup(struct ipu7_mmu *mmu); int ipu7_mmu_hw_init(struct ipu7_mmu *mmu); void ipu7_mmu_hw_cleanup(struct ipu7_mmu *mmu); int ipu7_mmu_map(struct ipu7_mmu_info *mmu_info, unsigned long iova, phys_addr_t paddr, size_t size); size_t ipu7_mmu_unmap(struct ipu7_mmu_info *mmu_info, unsigned long iova, size_t size); phys_addr_t ipu7_mmu_iova_to_phys(struct ipu7_mmu_info *mmu_info, dma_addr_t iova); #endif ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/ipu7-platform-regs.h000066400000000000000000000111421465530421300300700ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2018 - 2024 Intel Corporation */ #ifndef IPU7_PLATFORM_REGS_H #define IPU7_PLATFORM_REGS_H #define IS_BASE 0x230000 #define IS_UC_CTRL_BASE (IS_BASE + 0x0) #define PS_BASE 0x130000 #define PS_UC_CTRL_BASE (PS_BASE + 0x0) /* * bit 0: IRQ from FW, * bit 1, 2 and 3: IRQ from HW */ #define TO_SW_IRQ_MASK 0xf #define TO_SW_IRQ_FW BIT(0) #define FW_CODE_BASE 0x0 #define FW_DATA_BASE 0x4 #define CPU_AXI_CNTL 0x8 #define CPU_QOS_CNTL 0xc #define IDMA_AXI_CNTL 0x10 #define IDMA_QOS_CNTL 0x14 #define MEF_SPLIT_SIZE 0x18 #define FW_MSG_CONTROL 0x1c #define FW_MSG_CREDITS_STATUS 0x20 #define FW_MSG_CREDIT_TAKEN 0x24 #define FW_MSG_CREDIT_RETURNED 0x28 #define TRIG_IDMA_IN 0x2c #define IDMA_DONE 0x30 #define IDMA_DONE_CLEAR 0x34 #define DMEM_CAPACITY 0x38 #define NON_SECURE_CODE_OFFSET 0x3c #define UC_CG_CTRL_BITS 0x40 #define ALT_RESET_VEC 0x44 #define WDT_NMI_DURATION 0x104 #define WDT_RST_REQ_DURATION 0x108 #define WDT_CNTL 0x10c #define WDT_NMI_CURRENT_COUNT 0x110 #define WDT_RST_CURRENT_COUNT 0x114 #define WDT_HALT 0x118 #define WDT_STATUS 0x11c #define SPARE_REG_RW 0x120 #define SPARE_REG_RO 0x124 #define FW_TO_FW_IRQ_CNTL_EDGE 0x200 #define FW_TO_FW_IRQ_CNTL_MASK_N 0x204 #define FW_TO_FW_IRQ_CNTL_STATUS 0x208 #define FW_TO_FW_IRQ_CNTL_CLEAR 0x20c #define FW_TO_FW_IRQ_CNTL_ENABLE 0x210 #define FW_TO_FW_IRQ_CNTL_LEVEL_NOT_PULSE 0x214 #define CLK_GATE_DIS 0x218 #define DEBUG_STATUS 0x1000 #define DEBUG_EXCPETION 0x1004 #define TIE_GENERAL_INPUT 0x1008 #define ERR_STATUS 0x100c #define UC_ERR_INFO 0x1010 #define SPARE_CNTL 0x1014 #define MEF_TRC_CNTL 0x1100 #define DBG_MEF_LAST_PUSH 0x1104 #define DBG_MEF_LAST_POP 0x1108 #define DBG_MEF_COUNT_CNTL 0x110c #define DBG_MEF_COUNT1 0x1110 #define DBG_MEF_COUNT2 0x1114 #define DBG_MEF_ACC_OCCUPANCY 0x1118 #define DBG_MEF_MAX_IRQ_TO_POP 0x111c #define DBG_IRQ_CNTL 0x1120 #define DBG_IRQ_COUNT 0x1124 #define DBG_CYC_COUNT 0x1128 #define DBG_CNTL 0x1130 #define DBG_RST_REG 0x1134 #define DBG_MEF_STATUS0 0x1138 #define DBG_MEF_STATUS1 0x113c #define PDEBUG_CTL 0x1140 #define PDEBUG_DATA 0x1144 #define PDEBUG_INST 0x1148 #define PDEBUG_LS0ADDR 0x114c #define PDEBUG_LS0DATA 0x1150 #define PDEBUG_LS0STAT 0x1154 #define PDEBUG_PC 0x1158 #define PDEBUG_MISC 0x115c #define PDEBUG_PREF_STS 0x1160 #define MEF0_ADDR 0x2000 #define MEF1_ADDR 0x2020 #define PRINTF_EN_THROUGH_TRACE 0x3004 #define PRINTF_EN_DIRECTLY_TO_DDR 0x3008 #define PRINTF_DDR_BASE_ADDR 0x300c #define PRINTF_DDR_SIZE 0x3010 #define PRINTF_DDR_NEXT_ADDR 0x3014 #define PRINTF_STATUS 0x3018 #define PRINTF_AXI_CNTL 0x301c #define PRINTF_MSG_LENGTH 0x3020 #define TO_SW_IRQ_CNTL_EDGE 0x4000 #define TO_SW_IRQ_CNTL_MASK_N 0x4004 #define TO_SW_IRQ_CNTL_STATUS 0x4008 #define TO_SW_IRQ_CNTL_CLEAR 0x400c #define TO_SW_IRQ_CNTL_ENABLE 0x4010 #define TO_SW_IRQ_CNTL_LEVEL_NOT_PULSE 0x4014 #define ERR_IRQ_CNTL_EDGE 0x4018 #define ERR_IRQ_CNTL_MASK_N 0x401c #define ERR_IRQ_CNTL_STATUS 0x4020 #define ERR_IRQ_CNTL_CLEAR 0x4024 #define ERR_IRQ_CNTL_ENABLE 0x4028 #define ERR_IRQ_CNTL_LEVEL_NOT_PULSE 0x402c #define LOCAL_DMEM_BASE_ADDR 0x1300000 /* * IS_UC_TO_SW irqs * bit 0: IRQ from local FW * bit 1~3: IRQ from HW */ #define IS_UC_TO_SW_IRQ_MASK 0xf /* * IPU6 uses uniform address within IPU, therefore all subsystem registers * locates in one signle space starts from 0 but in different sctions with * different addresses, the subsystem offsets are defined to 0 as the * register definition will have the address offset to 0. */ #define IPU_ISYS_SPC_OFFSET 0x210000 #define IPU7_PSYS_SPC_OFFSET 0x118000 #define IPU_ISYS_DMEM_OFFSET 0x200000 #define IPU_PSYS_DMEM_OFFSET 0x100000 #define IPU7_ISYS_CSI_PORT_NUM 4 /* IRQ-related registers in PSYS */ #define IPU_REG_PSYS_TO_SW_IRQ_CNTL_EDGE 0x134000 #define IPU_REG_PSYS_TO_SW_IRQ_CNTL_MASK 0x134004 #define IPU_REG_PSYS_TO_SW_IRQ_CNTL_STATUS 0x134008 #define IPU_REG_PSYS_TO_SW_IRQ_CNTL_CLEAR 0x13400c #define IPU_REG_PSYS_TO_SW_IRQ_CNTL_ENABLE 0x134010 #define IPU_REG_PSYS_TO_SW_IRQ_CNTL_LEVEL_NOT_PULSE 0x134014 #define IRQ_FROM_LOCAL_FW BIT(0) /* * psys subdomains power request regs */ enum ipu7_device_buttress_psys_domain_pos { IPU_PSYS_SUBDOMAIN_LB = 0, IPU_PSYS_SUBDOMAIN_BB = 1, }; #define IPU_PSYS_DOMAIN_POWER_MASK (BIT(IPU_PSYS_SUBDOMAIN_LB) | \ BIT(IPU_PSYS_SUBDOMAIN_BB)) #define IPU_PSYS_DOMAIN_POWER_IN_PROGRESS BIT(31) #endif /* IPU7_PLATFORM_REGS_H */ ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/ipu7-syscom.c000066400000000000000000000042401465530421300266170ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013 - 2024 Intel Corporation */ #include #include #include "abi/ipu7_fw_syscom_abi.h" #include "ipu7.h" #include "ipu7-syscom.h" static void __iomem *ipu7_syscom_get_indices(struct ipu7_syscom_context *ctx, u32 q) { return ctx->queue_indices + (q * sizeof(struct syscom_queue_indices_s)); } void *ipu7_syscom_get_token(struct ipu7_syscom_context *ctx, int q) { struct syscom_queue_config *queue_params = &ctx->queue_configs[q]; void __iomem *queue_indices = ipu7_syscom_get_indices(ctx, q); u32 write_index = readl(queue_indices + offsetof(struct syscom_queue_indices_s, write_index)); u32 read_index = readl(queue_indices + offsetof(struct syscom_queue_indices_s, read_index)); void *token = NULL; if (q < ctx->num_output_queues) { /* Output queue */ bool empty = (write_index == read_index); if (!empty) token = queue_params->token_array_mem + read_index * queue_params->token_size_in_bytes; } else { /* Input queue */ bool full = (read_index == ((write_index + 1U) % (u32)queue_params->max_capacity)); if (!full) token = queue_params->token_array_mem + write_index * queue_params->token_size_in_bytes; } return token; } EXPORT_SYMBOL_NS_GPL(ipu7_syscom_get_token, INTEL_IPU7); void ipu7_syscom_put_token(struct ipu7_syscom_context *ctx, int q) { struct syscom_queue_config *queue_params = &ctx->queue_configs[q]; void __iomem *queue_indices = ipu7_syscom_get_indices(ctx, q); u32 offset, index; if (q < ctx->num_output_queues) /* Output queue */ offset = offsetof(struct syscom_queue_indices_s, read_index); else /* Input queue */ offset = offsetof(struct syscom_queue_indices_s, write_index); index = readl(queue_indices + offset); writel((index + 1U) % queue_params->max_capacity, queue_indices + offset); } EXPORT_SYMBOL_NS_GPL(ipu7_syscom_put_token, INTEL_IPU7); struct syscom_queue_params_config * ipu7_syscom_get_queue_config(struct syscom_config_s *config) { return (struct syscom_queue_params_config *)(&config[1]); } EXPORT_SYMBOL_NS_GPL(ipu7_syscom_get_queue_config, INTEL_IPU7); ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/ipu7-syscom.h000066400000000000000000000014611465530421300266260ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 - 2024 Intel Corporation */ #ifndef IPU7_SYSCOM_H #define IPU7_SYSCOM_H #include struct syscom_config_s; struct syscom_queue_params_config; struct syscom_queue_config { void *token_array_mem; u32 queue_size; u16 token_size_in_bytes; u16 max_capacity; }; struct ipu7_syscom_context { u16 num_input_queues; u16 num_output_queues; struct syscom_queue_config *queue_configs; void __iomem *queue_indices; dma_addr_t queue_mem_dma_addr; void *queue_mem; u32 queue_mem_size; }; void ipu7_syscom_put_token(struct ipu7_syscom_context *ctx, int q); void *ipu7_syscom_get_token(struct ipu7_syscom_context *ctx, int q); struct syscom_queue_params_config * ipu7_syscom_get_queue_config(struct syscom_config_s *config); #endif ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/ipu7.c000066400000000000000000001374121465530421300253140ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013 - 2024 Intel Corporation */ #include #include #include #include #ifdef CONFIG_DEBUG_FS #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(CONFIG_INTEL_IPU7_ACPI) #include #endif #if LINUX_VERSION_CODE > KERNEL_VERSION(6, 6, 0) #include #endif #include "abi/ipu7_fw_common_abi.h" #include "ipu7.h" #include "ipu7-bus.h" #include "ipu7-buttress.h" #include "ipu7-buttress-regs.h" #include "ipu7-cpd.h" #include "ipu7-isys-csi2-regs.h" #include "ipu7-mmu.h" #include "ipu7-platform-regs.h" #define IPU_PCI_BAR 0 #define IPU_PCI_PBBAR 4 #if defined(CONFIG_INTEL_IPU7_ACPI) static int isys_init_acpi_add_device(struct device *dev, void *priv, struct ipu7_isys_csi2_config *csi2, bool reprobe) { return 0; } #endif #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC static unsigned int ipu7_tpg_offsets[] = { MGC_MG_PORT(0), MGC_MG_PORT(1), MGC_MG_PORT(2), }; #endif static unsigned int ipu7_csi_offsets[] = { IPU_CSI_PORT_A_ADDR_OFFSET, IPU_CSI_PORT_B_ADDR_OFFSET, IPU_CSI_PORT_C_ADDR_OFFSET, IPU_CSI_PORT_D_ADDR_OFFSET, }; static struct ipu7_isys_internal_pdata ipu7p5_isys_ipdata = { .hw_variant = { .offset = IPU_UNIFIED_OFFSET, .nr_mmus = IPU7P5_IS_MMU_NUM, .mmu_hw = { { .name = "IS_FW_RD", .offset = IPU7P5_IS_MMU_FW_RD_OFFSET, .zlx_offset = IPU7P5_IS_ZLX_UC_RD_OFFSET, .uao_offset = IPU7P5_IS_UAO_UC_RD_OFFSET, .info_bits = 0x20005101, .refill = 0x00002726, .collapse_en_bitmap = 0x1, .at_sp_arb_cfg = 0x1, .l1_block = IPU7P5_IS_MMU_FW_RD_L1_BLOCKNR_REG, .l2_block = IPU7P5_IS_MMU_FW_RD_L2_BLOCKNR_REG, .nr_l1streams = IPU7P5_IS_MMU_FW_RD_STREAM_NUM, .nr_l2streams = IPU7P5_IS_MMU_FW_RD_STREAM_NUM, .l1_block_sz = { 0x0, 0x8, 0xa, }, .l2_block_sz = { 0x0, 0x2, 0x4, }, .zlx_nr = IPU7P5_IS_ZLX_UC_RD_NUM, .zlx_axi_pool = { 0x00000f30, }, .zlx_en = { 0, 1, 0, 0 }, .zlx_conf = { 0x0, }, .uao_p_num = IPU7P5_IS_UAO_UC_RD_PLANENUM, .uao_p2tlb = { 0x00000049, 0x0000004c, 0x0000004d, 0x00000000, }, }, { .name = "IS_FW_WR", .offset = IPU7P5_IS_MMU_FW_WR_OFFSET, .zlx_offset = IPU7P5_IS_ZLX_UC_WR_OFFSET, .uao_offset = IPU7P5_IS_UAO_UC_WR_OFFSET, .info_bits = 0x20005001, .refill = 0x00002524, .collapse_en_bitmap = 0x1, .at_sp_arb_cfg = 0x1, .l1_block = IPU7P5_IS_MMU_FW_WR_L1_BLOCKNR_REG, .l2_block = IPU7P5_IS_MMU_FW_WR_L2_BLOCKNR_REG, .nr_l1streams = IPU7P5_IS_MMU_FW_WR_STREAM_NUM, .nr_l2streams = IPU7P5_IS_MMU_FW_WR_STREAM_NUM, .l1_block_sz = { 0x0, 0x8, 0xa, }, .l2_block_sz = { 0x0, 0x2, 0x4, }, .zlx_nr = IPU7P5_IS_ZLX_UC_WR_NUM, .zlx_axi_pool = { 0x00000f20, }, .zlx_en = { 0, 1, 1, 0, }, .zlx_conf = { 0x0, 0x00010101, 0x00010101, 0x0, }, .uao_p_num = IPU7P5_IS_UAO_UC_WR_PLANENUM, .uao_p2tlb = { 0x00000049, 0x0000004a, 0x0000004b, 0x00000000, }, }, { .name = "IS_DATA_WR_ISOC", .offset = IPU7P5_IS_MMU_M0_OFFSET, .zlx_offset = IPU7P5_IS_ZLX_M0_OFFSET, .uao_offset = IPU7P5_IS_UAO_M0_WR_OFFSET, .info_bits = 0x20004e01, .refill = 0x00002120, .collapse_en_bitmap = 0x1, .at_sp_arb_cfg = 0x1, .l1_block = IPU7P5_IS_MMU_M0_L1_BLOCKNR_REG, .l2_block = IPU7P5_IS_MMU_M0_L2_BLOCKNR_REG, .nr_l1streams = IPU7P5_IS_MMU_M0_STREAM_NUM, .nr_l2streams = IPU7P5_IS_MMU_M0_STREAM_NUM, .l1_block_sz = { 0x00000000, 0x00000002, 0x00000004, 0x00000006, 0x00000008, 0x0000000a, 0x0000000c, 0x0000000e, 0x00000010, 0x00000012, 0x00000014, 0x00000016, 0x00000018, 0x0000001a, 0x0000001c, 0x0000001e, }, .l2_block_sz = { 0x00000000, 0x00000002, 0x00000004, 0x00000006, 0x00000008, 0x0000000a, 0x0000000c, 0x0000000e, 0x00000010, 0x00000012, 0x00000014, 0x00000016, 0x00000018, 0x0000001a, 0x0000001c, 0x0000001e, }, .zlx_nr = IPU7P5_IS_ZLX_M0_NUM, .zlx_axi_pool = { 0x00000f10, }, .zlx_en = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, }, .zlx_conf = { 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, }, .uao_p_num = IPU7P5_IS_UAO_M0_WR_PLANENUM, .uao_p2tlb = { 0x00000041, 0x00000042, 0x00000043, 0x00000044, 0x00000041, 0x00000042, 0x00000043, 0x00000044, 0x00000041, 0x00000042, 0x00000043, 0x00000044, 0x00000041, 0x00000042, 0x00000043, 0x00000044, }, }, { .name = "IS_DATA_WR_SNOOP", .offset = IPU7P5_IS_MMU_M1_OFFSET, .zlx_offset = IPU7P5_IS_ZLX_M1_OFFSET, .uao_offset = IPU7P5_IS_UAO_M1_WR_OFFSET, .info_bits = 0x20004f01, .refill = 0x00002322, .collapse_en_bitmap = 0x1, .at_sp_arb_cfg = 0x1, .l1_block = IPU7P5_IS_MMU_M1_L1_BLOCKNR_REG, .l2_block = IPU7P5_IS_MMU_M1_L2_BLOCKNR_REG, .nr_l1streams = IPU7P5_IS_MMU_M1_STREAM_NUM, .nr_l2streams = IPU7P5_IS_MMU_M1_STREAM_NUM, .l1_block_sz = { 0x00000000, 0x00000002, 0x00000004, 0x00000006, 0x00000008, 0x0000000a, 0x0000000c, 0x0000000e, 0x00000010, 0x00000012, 0x00000014, 0x00000016, 0x00000018, 0x0000001a, 0x0000001c, 0x0000001e, }, .l2_block_sz = { 0x00000000, 0x00000002, 0x00000004, 0x00000006, 0x00000008, 0x0000000a, 0x0000000c, 0x0000000e, 0x00000010, 0x00000012, 0x00000014, 0x00000016, 0x00000018, 0x0000001a, 0x0000001c, 0x0000001e, }, .zlx_nr = IPU7P5_IS_ZLX_M1_NUM, .zlx_axi_pool = { 0x00000f20, }, .zlx_en = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, }, .zlx_conf = { 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, }, .uao_p_num = IPU7P5_IS_UAO_M1_WR_PLANENUM, .uao_p2tlb = { 0x00000045, 0x00000046, 0x00000047, 0x00000048, 0x00000045, 0x00000046, 0x00000047, 0x00000048, 0x00000045, 0x00000046, 0x00000047, 0x00000048, 0x00000045, 0x00000046, 0x00000047, 0x00000048, }, }, }, .cdc_fifos = 3, .cdc_fifo_threshold = {6, 8, 2}, .dmem_offset = IPU_ISYS_DMEM_OFFSET, .spc_offset = IPU_ISYS_SPC_OFFSET, }, .isys_dma_overshoot = IPU_ISYS_OVERALLOC_MIN, }; static struct ipu7_psys_internal_pdata ipu7p5_psys_ipdata = { .hw_variant = { .offset = IPU_UNIFIED_OFFSET, .nr_mmus = IPU7P5_PS_MMU_NUM, .mmu_hw = { { .name = "PS_FW_RD", .offset = IPU7P5_PS_MMU_FW_RD_OFFSET, .zlx_offset = IPU7P5_PS_ZLX_FW_RD_OFFSET, .uao_offset = IPU7P5_PS_UAO_FW_RD_OFFSET, .info_bits = 0x20004001, .refill = 0x00002726, .collapse_en_bitmap = 0x1, .at_sp_arb_cfg = 0x1, .l1_block = IPU7P5_PS_MMU_FW_RD_L1_BLOCKNR_REG, .l2_block = IPU7P5_PS_MMU_FW_RD_L2_BLOCKNR_REG, .nr_l1streams = IPU7P5_PS_MMU_FW_RD_STREAM_NUM, .nr_l2streams = IPU7P5_PS_MMU_FW_RD_STREAM_NUM, .l1_block_sz = { 0x00000000, 0x00000008, 0x0000000a, 0x0000000c, 0x0000000d, 0x0000000f, 0x00000011, 0x00000012, 0x00000013, 0x00000014, 0x00000016, 0x00000018, 0x00000019, 0x0000001a, 0x0000001a, 0x0000001a, }, .l2_block_sz = { 0x00000000, 0x00000002, 0x00000004, 0x00000006, 0x00000008, 0x0000000a, 0x0000000c, 0x0000000e, 0x00000010, 0x00000012, 0x00000014, 0x00000016, 0x00000018, 0x0000001a, 0x0000001c, 0x0000001e, }, .zlx_nr = IPU7P5_PS_ZLX_FW_RD_NUM, .zlx_axi_pool = { 0x00000f30, }, .zlx_en = { 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, }, .zlx_conf = { 0x00000000, 0x00010101, 0x00000000, 0x00000000, 0x00010101, 0x00010101, 0x00000000, 0x00000000, 0x00000000, 0x00010101, 0x00010101, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, .uao_p_num = IPU7P5_PS_UAO_FW_RD_PLANENUM, .uao_p2tlb = { 0x0000002e, 0x00000035, 0x00000036, 0x00000031, 0x00000037, 0x00000038, 0x00000039, 0x00000032, 0x00000033, 0x0000003a, 0x0000003b, 0x0000003c, 0x00000034, 0x0, 0x0, 0x0, }, }, { .name = "PS_FW_WR", .offset = IPU7P5_PS_MMU_FW_WR_OFFSET, .zlx_offset = IPU7P5_PS_ZLX_FW_WR_OFFSET, .uao_offset = IPU7P5_PS_UAO_FW_WR_OFFSET, .info_bits = 0x20003e01, .refill = 0x00002322, .collapse_en_bitmap = 0x1, .at_sp_arb_cfg = 0x1, .l1_block = IPU7P5_PS_MMU_FW_WR_L1_BLOCKNR_REG, .l2_block = IPU7P5_PS_MMU_FW_WR_L2_BLOCKNR_REG, .nr_l1streams = IPU7P5_PS_MMU_FW_WR_STREAM_NUM, .nr_l2streams = IPU7P5_PS_MMU_FW_WR_STREAM_NUM, .l1_block_sz = { 0x00000000, 0x00000008, 0x0000000a, 0x0000000c, 0x0000000d, 0x0000000e, 0x0000000f, 0x00000010, 0x00000010, 0x00000010, }, .l2_block_sz = { 0x00000000, 0x00000002, 0x00000004, 0x00000006, 0x00000008, 0x0000000a, 0x0000000c, 0x0000000e, 0x00000010, 0x00000012, }, .zlx_nr = IPU7P5_PS_ZLX_FW_WR_NUM, .zlx_axi_pool = { 0x00000f20, }, .zlx_en = { 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, }, .zlx_conf = { 0x00000000, 0x00010101, 0x00010101, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, .uao_p_num = IPU7P5_PS_UAO_FW_WR_PLANENUM, .uao_p2tlb = { 0x0000002e, 0x0000002f, 0x00000030, 0x00000031, 0x00000032, 0x00000033, 0x00000034, 0x0, 0x0, 0x0, }, }, { .name = "PS_DATA_RD", .offset = IPU7P5_PS_MMU_SRT_RD_OFFSET, .zlx_offset = IPU7P5_PS_ZLX_DATA_RD_OFFSET, .uao_offset = IPU7P5_PS_UAO_SRT_RD_OFFSET, .info_bits = 0x20003f01, .refill = 0x00002524, .collapse_en_bitmap = 0x1, .at_sp_arb_cfg = 0x1, .l1_block = IPU7P5_PS_MMU_SRT_RD_L1_BLOCKNR_REG, .l2_block = IPU7P5_PS_MMU_SRT_RD_L2_BLOCKNR_REG, .nr_l1streams = IPU7P5_PS_MMU_SRT_RD_STREAM_NUM, .nr_l2streams = IPU7P5_PS_MMU_SRT_RD_STREAM_NUM, .l1_block_sz = { 0x00000000, 0x00000004, 0x00000006, 0x00000008, 0x0000000b, 0x0000000d, 0x0000000f, 0x00000013, 0x00000017, 0x00000019, 0x0000001b, 0x0000001d, 0x0000001f, 0x0000002b, 0x00000033, 0x0000003f, 0x00000047, 0x00000049, 0x0000004b, 0x0000004c, 0x0000004d, 0x0000004e, }, .l2_block_sz = { 0x00000000, 0x00000002, 0x00000004, 0x00000006, 0x00000008, 0x0000000a, 0x0000000c, 0x0000000e, 0x00000010, 0x00000012, 0x00000014, 0x00000016, 0x00000018, 0x0000001a, 0x0000001c, 0x0000001e, 0x00000020, 0x00000022, 0x00000024, 0x00000026, 0x00000028, 0x0000002a, }, .zlx_nr = IPU7P5_PS_ZLX_DATA_RD_NUM, .zlx_axi_pool = { 0x00000f30, }, .zlx_en = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, }, .zlx_conf = { 0x00030303, 0x00010101, 0x00010101, 0x00030202, 0x00010101, 0x00010101, 0x00030303, 0x00030303, 0x00010101, 0x00030800, 0x00030500, 0x00020101, 0x00042000, 0x00031000, 0x00042000, 0x00031000, 0x00020400, 0x00010101, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, .uao_p_num = IPU7P5_PS_UAO_SRT_RD_PLANENUM, .uao_p2tlb = { 0x0000001c, 0x0000001d, 0x0000001e, 0x0000001f, 0x00000020, 0x00000021, 0x00000022, 0x00000023, 0x00000024, 0x00000025, 0x00000026, 0x00000027, 0x00000028, 0x00000029, 0x0000002a, 0x0000002b, 0x0000002c, 0x0000002d, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, }, { .name = "PS_DATA_WR", .offset = IPU7P5_PS_MMU_SRT_WR_OFFSET, .zlx_offset = IPU7P5_PS_ZLX_DATA_WR_OFFSET, .uao_offset = IPU7P5_PS_UAO_SRT_WR_OFFSET, .info_bits = 0x20003d01, .refill = 0x00002120, .collapse_en_bitmap = 0x1, .at_sp_arb_cfg = 0x1, .l1_block = IPU7P5_PS_MMU_SRT_WR_L1_BLOCKNR_REG, .l2_block = IPU7P5_PS_MMU_SRT_WR_L2_BLOCKNR_REG, .nr_l1streams = IPU7P5_PS_MMU_SRT_WR_STREAM_NUM, .nr_l2streams = IPU7P5_PS_MMU_SRT_WR_STREAM_NUM, .l1_block_sz = { 0x00000000, 0x00000002, 0x00000006, 0x0000000a, 0x0000000c, 0x0000000e, 0x00000010, 0x00000012, 0x00000014, 0x00000016, 0x00000018, 0x0000001a, 0x0000001c, 0x0000001e, 0x00000020, 0x00000022, 0x00000024, 0x00000028, 0x0000002a, 0x00000036, 0x0000003e, 0x00000040, 0x00000042, 0x0000004e, 0x00000056, 0x0000005c, 0x00000068, 0x00000070, 0x00000076, 0x00000077, 0x00000078, 0x00000079, }, .l2_block_sz = { 0x00000000, 0x00000002, 0x00000006, 0x0000000a, 0x0000000c, 0x0000000e, 0x00000010, 0x00000012, 0x00000014, 0x00000016, 0x00000018, 0x0000001a, 0x0000001c, 0x0000001e, 0x00000020, 0x00000022, 0x00000024, 0x00000028, 0x0000002a, 0x00000036, 0x0000003e, 0x00000040, 0x00000042, 0x0000004e, 0x00000056, 0x0000005c, 0x00000068, 0x00000070, 0x00000076, 0x00000077, 0x00000078, 0x00000079, }, .zlx_nr = IPU7P5_PS_ZLX_DATA_WR_NUM, .zlx_axi_pool = { 0x00000f50, }, .zlx_en = { 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, }, .zlx_conf = { 0x00010102, 0x00030103, 0x00030103, 0x00010101, 0x00010101, 0x00030101, 0x00010101, 0x38010101, 0x00000000, 0x00000000, 0x38010101, 0x38010101, 0x38010101, 0x38010101, 0x38010101, 0x38010101, 0x00030303, 0x00010101, 0x00042000, 0x00031000, 0x00010101, 0x00010101, 0x00042000, 0x00031000, 0x00031000, 0x00042000, 0x00031000, 0x00031000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, .uao_p_num = IPU7P5_PS_UAO_SRT_WR_PLANENUM, .uao_p2tlb = { 0x00000000, 0x00000001, 0x00000002, 0x00000003, 0x00000004, 0x00000005, 0x00000006, 0x00000007, 0x00000008, 0x00000009, 0x0000000a, 0x0000000b, 0x0000000c, 0x0000000d, 0x0000000e, 0x0000000f, 0x00000010, 0x00000011, 0x00000012, 0x00000013, 0x00000014, 0x00000015, 0x00000016, 0x00000017, 0x00000018, 0x00000019, 0x0000001a, 0x0000001b, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, }, }, .dmem_offset = IPU_PSYS_DMEM_OFFSET, }, }; static struct ipu7_isys_internal_pdata ipu7_isys_ipdata = { .hw_variant = { .offset = IPU_UNIFIED_OFFSET, .nr_mmus = IPU7_IS_MMU_NUM, .mmu_hw = { { .name = "IS_FW_RD", .offset = IPU7_IS_MMU_FW_RD_OFFSET, .zlx_offset = IPU7_IS_ZLX_UC_RD_OFFSET, .uao_offset = IPU7_IS_UAO_UC_RD_OFFSET, .info_bits = 0x20006701, .refill = 0x00002726, .collapse_en_bitmap = 0x0, .l1_block = IPU7_IS_MMU_FW_RD_L1_BLOCKNR_REG, .l2_block = IPU7_IS_MMU_FW_RD_L2_BLOCKNR_REG, .nr_l1streams = IPU7_IS_MMU_FW_RD_STREAM_NUM, .nr_l2streams = IPU7_IS_MMU_FW_RD_STREAM_NUM, .l1_block_sz = { 0x0, 0x8, 0xa, }, .l2_block_sz = { 0x0, 0x2, 0x4, }, .zlx_nr = IPU7_IS_ZLX_UC_RD_NUM, .zlx_axi_pool = { 0x00000f30, }, .zlx_en = { 0, 0, 0, 0 }, .zlx_conf = { 0x0, 0x0, 0x0, 0x0, }, .uao_p_num = IPU7_IS_UAO_UC_RD_PLANENUM, .uao_p2tlb = { 0x00000061, 0x00000064, 0x00000065, }, }, { .name = "IS_FW_WR", .offset = IPU7_IS_MMU_FW_WR_OFFSET, .zlx_offset = IPU7_IS_ZLX_UC_WR_OFFSET, .uao_offset = IPU7_IS_UAO_UC_WR_OFFSET, .info_bits = 0x20006801, .refill = 0x00002524, .collapse_en_bitmap = 0x0, .l1_block = IPU7_IS_MMU_FW_WR_L1_BLOCKNR_REG, .l2_block = IPU7_IS_MMU_FW_WR_L2_BLOCKNR_REG, .nr_l1streams = IPU7_IS_MMU_FW_WR_STREAM_NUM, .nr_l2streams = IPU7_IS_MMU_FW_WR_STREAM_NUM, .l1_block_sz = { 0x0, 0x8, 0xa, }, .l2_block_sz = { 0x0, 0x2, 0x4, }, .zlx_nr = IPU7_IS_ZLX_UC_WR_NUM, .zlx_axi_pool = { 0x00000f20, }, .zlx_en = { 0, 1, 1, 0, }, .zlx_conf = { 0x0, 0x00010101, 0x00010101, }, .uao_p_num = IPU7_IS_UAO_UC_WR_PLANENUM, .uao_p2tlb = { 0x00000061, 0x00000062, 0x00000063, }, }, { .name = "IS_DATA_WR_ISOC", .offset = IPU7_IS_MMU_M0_OFFSET, .zlx_offset = IPU7_IS_ZLX_M0_OFFSET, .uao_offset = IPU7_IS_UAO_M0_WR_OFFSET, .info_bits = 0x20006601, .refill = 0x00002120, .collapse_en_bitmap = 0x0, .l1_block = IPU7_IS_MMU_M0_L1_BLOCKNR_REG, .l2_block = IPU7_IS_MMU_M0_L2_BLOCKNR_REG, .nr_l1streams = IPU7_IS_MMU_M0_STREAM_NUM, .nr_l2streams = IPU7_IS_MMU_M0_STREAM_NUM, .l1_block_sz = { 0x0, 0x3, 0x6, 0x8, 0xa, 0xc, 0xe, 0x10, }, .l2_block_sz = { 0x0, 0x2, 0x4, 0x6, 0x8, 0xa, 0xc, 0xe, }, .zlx_nr = IPU7_IS_ZLX_M0_NUM, .zlx_axi_pool = { 0x00000f10, }, .zlx_en = { 1, 1, 1, 1, 1, 1, 1, 1, }, .zlx_conf = { 0x00010103, 0x00010103, 0x00010101, 0x00010101, 0x00010101, 0x00010101, 0x00010101, 0x00010101, }, .uao_p_num = IPU7_IS_UAO_M0_WR_PLANENUM, .uao_p2tlb = { 0x00000049, 0x0000004a, 0x0000004b, 0x0000004c, 0x0000004d, 0x0000004e, 0x0000004f, 0x00000050, }, }, { .name = "IS_DATA_WR_SNOOP", .offset = IPU7_IS_MMU_M1_OFFSET, .zlx_offset = IPU7_IS_ZLX_M1_OFFSET, .uao_offset = IPU7_IS_UAO_M1_WR_OFFSET, .info_bits = 0x20006901, .refill = 0x00002322, .collapse_en_bitmap = 0x0, .l1_block = IPU7_IS_MMU_M1_L1_BLOCKNR_REG, .l2_block = IPU7_IS_MMU_M1_L2_BLOCKNR_REG, .nr_l1streams = IPU7_IS_MMU_M1_STREAM_NUM, .nr_l2streams = IPU7_IS_MMU_M1_STREAM_NUM, .l1_block_sz = { 0x0, 0x3, 0x6, 0x9, 0xc, 0xe, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, 0x20, 0x22, }, .l2_block_sz = { 0x0, 0x2, 0x4, 0x6, 0x8, 0xa, 0xc, 0xe, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, }, .zlx_nr = IPU7_IS_ZLX_M1_NUM, .zlx_axi_pool = { 0x00000f20, }, .zlx_en = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, }, .zlx_conf = { 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010101, 0x00010101, 0x00010101, 0x00010101, 0x00010101, 0x00010101, 0x00010101, 0x00010101, }, .uao_p_num = IPU7_IS_UAO_M1_WR_PLANENUM, .uao_p2tlb = { 0x00000051, 0x00000052, 0x00000053, 0x00000054, 0x00000055, 0x00000056, 0x00000057, 0x00000058, 0x00000059, 0x0000005a, 0x0000005b, 0x0000005c, 0x0000005d, 0x0000005e, 0x0000005f, 0x00000060, }, }, }, .cdc_fifos = 3, .cdc_fifo_threshold = {6, 8, 2}, .dmem_offset = IPU_ISYS_DMEM_OFFSET, .spc_offset = IPU_ISYS_SPC_OFFSET, }, .isys_dma_overshoot = IPU_ISYS_OVERALLOC_MIN, }; static struct ipu7_psys_internal_pdata ipu7_psys_ipdata = { .hw_variant = { .offset = IPU_UNIFIED_OFFSET, .nr_mmus = IPU7_PS_MMU_NUM, .mmu_hw = { { .name = "PS_FW_RD", .offset = IPU7_PS_MMU_FW_RD_OFFSET, .zlx_offset = IPU7_PS_ZLX_FW_RD_OFFSET, .uao_offset = IPU7_PS_UAO_FW_RD_OFFSET, .info_bits = 0x20004801, .refill = 0x00002726, .collapse_en_bitmap = 0x0, .l1_block = IPU7_PS_MMU_FW_RD_L1_BLOCKNR_REG, .l2_block = IPU7_PS_MMU_FW_RD_L2_BLOCKNR_REG, .nr_l1streams = IPU7_PS_MMU_FW_RD_STREAM_NUM, .nr_l2streams = IPU7_PS_MMU_FW_RD_STREAM_NUM, .l1_block_sz = { 0, 0x8, 0xa, 0xc, 0xd, 0xf, 0x11, 0x12, 0x13, 0x14, 0x16, 0x18, 0x19, 0x1a, 0x1a, 0x1a, 0x1a, 0x1a, 0x1a, 0x1a, }, .l2_block_sz = { 0x0, 0x2, 0x4, 0x6, 0x8, 0xa, 0xc, 0xe, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, 0x20, 0x22, 0x24, 0x26, }, .zlx_nr = IPU7_PS_ZLX_FW_RD_NUM, .zlx_axi_pool = { 0x00000f30, }, .zlx_en = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, .zlx_conf = { 0x0, }, .uao_p_num = IPU7_PS_UAO_FW_RD_PLANENUM, .uao_p2tlb = { 0x00000036, 0x0000003d, 0x0000003e, 0x00000039, 0x0000003f, 0x00000040, 0x00000041, 0x0000003a, 0x0000003b, 0x00000042, 0x00000043, 0x00000044, 0x0000003c, }, }, { .name = "PS_FW_WR", .offset = IPU7_PS_MMU_FW_WR_OFFSET, .zlx_offset = IPU7_PS_ZLX_FW_WR_OFFSET, .uao_offset = IPU7_PS_UAO_FW_WR_OFFSET, .info_bits = 0x20004601, .refill = 0x00002322, .collapse_en_bitmap = 0x0, .l1_block = IPU7_PS_MMU_FW_WR_L1_BLOCKNR_REG, .l2_block = IPU7_PS_MMU_FW_WR_L2_BLOCKNR_REG, .nr_l1streams = IPU7_PS_MMU_FW_WR_STREAM_NUM, .nr_l2streams = IPU7_PS_MMU_FW_WR_STREAM_NUM, .l1_block_sz = { 0, 0x8, 0xa, 0xc, 0xd, 0xe, 0xf, 0x10, 0x10, 0x10, }, .l2_block_sz = { 0x0, 0x2, 0x4, 0x6, 0x8, 0xa, 0xc, 0xe, 0x10, 0x12, }, .zlx_nr = IPU7_PS_ZLX_FW_WR_NUM, .zlx_axi_pool = { 0x00000f20, }, .zlx_en = { 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, }, .zlx_conf = { 0x0, 0x00010101, 0x00010101, }, .uao_p_num = IPU7_PS_UAO_FW_WR_PLANENUM, .uao_p2tlb = { 0x00000036, 0x00000037, 0x00000038, 0x00000039, 0x0000003a, 0x0000003b, 0x0000003c, }, }, { .name = "PS_DATA_RD", .offset = IPU7_PS_MMU_SRT_RD_OFFSET, .zlx_offset = IPU7_PS_ZLX_DATA_RD_OFFSET, .uao_offset = IPU7_PS_UAO_SRT_RD_OFFSET, .info_bits = 0x20004701, .refill = 0x00002120, .collapse_en_bitmap = 0x0, .l1_block = IPU7_PS_MMU_SRT_RD_L1_BLOCKNR_REG, .l2_block = IPU7_PS_MMU_SRT_RD_L2_BLOCKNR_REG, .nr_l1streams = IPU7_PS_MMU_SRT_RD_STREAM_NUM, .nr_l2streams = IPU7_PS_MMU_SRT_RD_STREAM_NUM, .l1_block_sz = { 0x0, 0x4, 0x6, 0x8, 0xb, 0xd, 0xf, 0x11, 0x13, 0x15, 0x17, 0x23, 0x2b, 0x37, 0x3f, 0x41, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x55, 0x57, 0x59, 0x5b, 0x5d, 0x5f, 0x61, }, .l2_block_sz = { 0x0, 0x2, 0x4, 0x6, 0x8, 0xa, 0xc, 0xe, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, 0x20, 0x22, 0x24, 0x26, 0x28, 0x2a, 0x2c, 0x2e, 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e, 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e, }, .zlx_nr = IPU7_PS_ZLX_DATA_RD_NUM, .zlx_axi_pool = { 0x00000f30, }, .zlx_en = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, .zlx_conf = { 0x00030303, 0x00010101, 0x00010101, 0x00030202, 0x00010101, 0x00010101, 0x00010101, 0x00030800, 0x00030500, 0x00020101, 0x00042000, 0x00031000, 0x00042000, 0x00031000, 0x00020400, 0x00010101, }, .uao_p_num = IPU7_PS_UAO_SRT_RD_PLANENUM, .uao_p2tlb = { 0x00000022, 0x00000023, 0x00000024, 0x00000025, 0x00000026, 0x00000027, 0x00000028, 0x00000029, 0x0000002a, 0x0000002b, 0x0000002c, 0x0000002d, 0x0000002e, 0x0000002f, 0x00000030, 0x00000031, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0000001e, 0x0000001f, 0x00000020, 0x00000021, 0x00000032, 0x00000033, 0x00000034, 0x00000035, }, }, { .name = "PS_DATA_WR", .offset = IPU7_PS_MMU_SRT_WR_OFFSET, .zlx_offset = IPU7_PS_ZLX_DATA_WR_OFFSET, .uao_offset = IPU7_PS_UAO_SRT_WR_OFFSET, .info_bits = 0x20004501, .refill = 0x00002120, .collapse_en_bitmap = 0x0, .l1_block = IPU7_PS_MMU_SRT_WR_L1_BLOCKNR_REG, .l2_block = IPU7_PS_MMU_SRT_WR_L2_BLOCKNR_REG, .nr_l1streams = IPU7_PS_MMU_SRT_WR_STREAM_NUM, .nr_l2streams = IPU7_PS_MMU_SRT_WR_STREAM_NUM, .l1_block_sz = { 0x0, 0x2, 0x6, 0xa, 0xc, 0xe, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, 0x20, 0x22, 0x24, 0x26, 0x32, 0x3a, 0x3c, 0x3e, 0x4a, 0x52, 0x58, 0x64, 0x6c, 0x72, 0x7e, 0x86, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x94, 0x96, 0x98, }, .l2_block_sz = { 0x0, 0x2, 0x4, 0x6, 0x8, 0xa, 0xc, 0xe, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, 0x20, 0x22, 0x24, 0x26, 0x28, 0x2a, 0x2c, 0x2e, 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e, 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e, }, .zlx_nr = IPU7_PS_ZLX_DATA_WR_NUM, .zlx_axi_pool = { 0x00000f50, }, .zlx_en = { 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, }, .zlx_conf = { 0x00010102, 0x00030103, 0x00030103, 0x00010101, 0x00010101, 0x00030101, 0x00010101, 0x38010101, 0x0, 0x0, 0x38010101, 0x38010101, 0x38010101, 0x38010101, 0x38010101, 0x38010101, 0x00010101, 0x00042000, 0x00031000, 0x00010101, 0x00010101, 0x00042000, 0x00031000, 0x00031000, 0x00042000, 0x00031000, 0x00031000, 0x00042000, 0x00031000, 0x00031000, 0x0, 0x0, }, .uao_p_num = IPU7_PS_UAO_SRT_WR_PLANENUM, .uao_p2tlb = { 0x00000000, 0x00000001, 0x00000002, 0x00000003, 0x00000004, 0x00000005, 0x00000006, 0x00000007, 0x00000008, 0x00000009, 0x0000000a, 0x0000000b, 0x0000000c, 0x0000000d, 0x0000000e, 0x0000000f, 0x00000010, 0x00000011, 0x00000012, 0x00000013, 0x00000014, 0x00000015, 0x00000016, 0x00000017, 0x00000018, 0x00000019, 0x0000001a, 0x0000001b, 0x0000001c, 0x0000001d, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0000001e, 0x0000001f, 0x00000020, 0x00000021, }, }, }, .dmem_offset = IPU_PSYS_DMEM_OFFSET, }, }; static const struct ipu7_buttress_ctrl isys_buttress_ctrl = { .subsys_id = IPU_IS, .ratio = IPU7_IS_FREQ_CTL_DEFAULT_RATIO, .ratio_shift = IPU_FREQ_CTL_RATIO_SHIFT, .cdyn = IPU_FREQ_CTL_CDYN, .cdyn_shift = IPU_FREQ_CTL_CDYN_SHIFT, .freq_ctl = BUTTRESS_REG_IS_WORKPOINT_REQ, .pwr_sts_shift = IPU_BUTTRESS_PWR_STATE_IS_PWR_SHIFT, .pwr_sts_mask = IPU_BUTTRESS_PWR_STATE_IS_PWR_MASK, .pwr_sts_on = IPU_BUTTRESS_PWR_STATE_UP_DONE, .pwr_sts_off = IPU_BUTTRESS_PWR_STATE_DN_DONE, .ovrd_clk = BUTTRESS_OVERRIDE_IS_CLK, .own_clk_ack = BUTTRESS_OWN_ACK_IS_CLK, }; static const struct ipu7_buttress_ctrl psys_buttress_ctrl = { .subsys_id = IPU_PS, .ratio = IPU7_PS_FREQ_CTL_DEFAULT_RATIO, .ratio_shift = IPU_FREQ_CTL_RATIO_SHIFT, .cdyn = IPU_FREQ_CTL_CDYN, .cdyn_shift = IPU_FREQ_CTL_CDYN_SHIFT, .freq_ctl = BUTTRESS_REG_PS_WORKPOINT_REQ, .pwr_sts_shift = IPU_BUTTRESS_PWR_STATE_PS_PWR_SHIFT, .pwr_sts_mask = IPU_BUTTRESS_PWR_STATE_PS_PWR_MASK, .pwr_sts_on = IPU_BUTTRESS_PWR_STATE_UP_DONE, .pwr_sts_off = IPU_BUTTRESS_PWR_STATE_DN_DONE, .ovrd_clk = BUTTRESS_OVERRIDE_PS_CLK, .own_clk_ack = BUTTRESS_OWN_ACK_PS_CLK, }; int ipu7_buttress_psys_freq_get(void *data, u64 *val) { struct ipu7_device *isp = data; u32 reg_val; int ret; ret = pm_runtime_get_sync(&isp->psys->auxdev.dev); if (ret < 0) { pm_runtime_put(&isp->psys->auxdev.dev); dev_err(&isp->pdev->dev, "Runtime PM failed (%d)\n", ret); return ret; } reg_val = readl(isp->base + BUTTRESS_REG_PS_WORKPOINT_REQ); pm_runtime_put(&isp->psys->auxdev.dev); *val = BUTTRESS_PS_FREQ_RATIO_STEP * (reg_val & BUTTRESS_PS_FREQ_CTL_RATIO_MASK); return 0; } void ipu7_internal_pdata_init(struct ipu7_isys_internal_pdata *isys_ipdata, struct ipu7_psys_internal_pdata *psys_ipdata) { isys_ipdata->csi2.nports = ARRAY_SIZE(ipu7_csi_offsets); isys_ipdata->csi2.offsets = ipu7_csi_offsets; #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC isys_ipdata->tpg.ntpgs = ARRAY_SIZE(ipu7_tpg_offsets); isys_ipdata->tpg.offsets = ipu7_tpg_offsets; isys_ipdata->tpg.sels = NULL; #endif isys_ipdata->num_parallel_streams = IPU7_ISYS_NUM_STREAMS; psys_ipdata->hw_variant.spc_offset = IPU7_PSYS_SPC_OFFSET; } static int ipu7_isys_check_fwnode_graph(struct fwnode_handle *fwnode) { struct fwnode_handle *endpoint; if (IS_ERR_OR_NULL(fwnode)) return -EINVAL; endpoint = fwnode_graph_get_next_endpoint(fwnode, NULL); if (endpoint) { fwnode_handle_put(endpoint); return 0; } return ipu7_isys_check_fwnode_graph(fwnode->secondary); } static struct ipu7_bus_device * ipu7_isys_init(struct pci_dev *pdev, struct device *parent, struct ipu7_buttress_ctrl *ctrl, void __iomem *base, const struct ipu7_isys_internal_pdata *ipdata, unsigned int nr) { struct ipu7_bus_device *isys_adev; struct ipu7_isys_pdata *pdata; #if IS_ENABLED(CONFIG_INTEL_IPU7_ACPI) struct ipu7_isys_subdev_pdata *acpi_pdata; #endif struct fwnode_handle *fwnode = dev_fwnode(&pdev->dev); int ret; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) ret = ipu7_isys_check_fwnode_graph(fwnode); if (ret) { if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary)) { dev_err(&pdev->dev, "fwnode graph has no endpoints connection\n"); return ERR_PTR(-EINVAL); } } #else ret = ipu7_isys_check_fwnode_graph(fwnode); if (ret) { if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary)) { dev_err(&pdev->dev, "fwnode graph has no endpoints connection\n"); return ERR_PTR(-EINVAL); } ret = ipu_bridge_init(&pdev->dev, ipu_bridge_parse_ssdb); if (ret) { dev_err_probe(&pdev->dev, ret, "IPU bridge init failed\n"); return ERR_PTR(ret); } } #endif pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); if (!pdata) return ERR_PTR(-ENOMEM); pdata->base = base; pdata->ipdata = ipdata; isys_adev = ipu7_bus_initialize_device(pdev, parent, pdata, ctrl, IPU_ISYS_NAME); if (IS_ERR(isys_adev)) { dev_err_probe(&pdev->dev, PTR_ERR(isys_adev), "ipu7_bus_initialize_device isys failed\n"); kfree(pdata); return ERR_CAST(isys_adev); } #if IS_ENABLED(CONFIG_INTEL_IPU7_ACPI) if (!spdata) { dev_err(&pdev->dev, "No subdevice info provided"); ipu7_get_acpi_devices(isys, &isys->dev, &acpi_pdata, NULL, isys_init_acpi_add_device); pdata->spdata = acpi_pdata; } else { dev_info(&pdev->dev, "Subdevice info found"); ipu7_get_acpi_devices(isys, &isys->dev, &acpi_pdata, &spdata, isys_init_acpi_add_device); } #endif isys_adev->mmu = ipu7_mmu_init(&pdev->dev, base, ISYS_MMID, &ipdata->hw_variant); if (IS_ERR(isys_adev->mmu)) { dev_err_probe(&pdev->dev, PTR_ERR(isys_adev), "ipu7_mmu_init(isys_adev->mmu) failed\n"); put_device(&isys_adev->auxdev.dev); kfree(pdata); return ERR_CAST(isys_adev->mmu); } isys_adev->mmu->dev = &isys_adev->auxdev.dev; isys_adev->subsys = IPU_IS; ret = ipu7_bus_add_device(isys_adev); if (ret) { kfree(pdata); return ERR_PTR(ret); } return isys_adev; } static struct ipu7_bus_device * ipu7_psys_init(struct pci_dev *pdev, struct device *parent, struct ipu7_buttress_ctrl *ctrl, void __iomem *base, const struct ipu7_psys_internal_pdata *ipdata, unsigned int nr) { struct ipu7_bus_device *psys_adev; struct ipu7_psys_pdata *pdata; int ret; pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); if (!pdata) return ERR_PTR(-ENOMEM); pdata->base = base; pdata->ipdata = ipdata; psys_adev = ipu7_bus_initialize_device(pdev, parent, pdata, ctrl, IPU_PSYS_NAME); if (IS_ERR(psys_adev)) { dev_err_probe(&pdev->dev, PTR_ERR(psys_adev), "ipu7_bus_initialize_device psys failed\n"); kfree(pdata); return ERR_CAST(psys_adev); } psys_adev->mmu = ipu7_mmu_init(&pdev->dev, base, PSYS_MMID, &ipdata->hw_variant); if (IS_ERR(psys_adev->mmu)) { dev_err_probe(&pdev->dev, PTR_ERR(psys_adev), "ipu7_mmu_init(psys_adev->mmu) failed\n"); put_device(&psys_adev->auxdev.dev); kfree(pdata); return ERR_CAST(psys_adev->mmu); } psys_adev->mmu->dev = &psys_adev->auxdev.dev; psys_adev->subsys = IPU_PS; ret = ipu7_bus_add_device(psys_adev); if (ret) { kfree(pdata); return ERR_PTR(ret); } return psys_adev; } static struct ia_gofo_msg_log_info_ts fw_error_log[IPU_SUBSYS_NUM]; void ipu7_dump_fw_error_log(const struct ipu7_bus_device *adev) { void __iomem *reg = adev->isp->base + ((adev->subsys == IPU_IS) ? BUTTRESS_REG_FW_GP24 : BUTTRESS_REG_FW_GP8); memcpy_fromio(&fw_error_log[adev->subsys], reg, sizeof(fw_error_log[adev->subsys])); } EXPORT_SYMBOL_NS_GPL(ipu7_dump_fw_error_log, INTEL_IPU7); #ifdef CONFIG_DEBUG_FS static struct debugfs_blob_wrapper isys_fw_error; static struct debugfs_blob_wrapper psys_fw_error; static int ipu7_init_debugfs(struct ipu7_device *isp) { struct dentry *file; struct dentry *dir; dir = debugfs_create_dir(pci_name(isp->pdev), NULL); if (!dir) return -ENOMEM; isys_fw_error.data = &fw_error_log[IPU_IS]; isys_fw_error.size = sizeof(fw_error_log[IPU_IS]); file = debugfs_create_blob("is_fw_error", 0400, dir, &isys_fw_error); if (!file) goto err; psys_fw_error.data = &fw_error_log[IPU_PS]; psys_fw_error.size = sizeof(fw_error_log[IPU_PS]); file = debugfs_create_blob("ps_fw_error", 0400, dir, &psys_fw_error); if (!file) goto err; isp->ipu7_dir = dir; if (ipu7_buttress_debugfs_init(isp)) goto err; return 0; err: debugfs_remove_recursive(dir); return -ENOMEM; } static void ipu7_remove_debugfs(struct ipu7_device *isp) { /* * Since isys and psys debugfs dir will be created under ipu root dir, * mark its dentry to NULL to avoid duplicate removal. */ debugfs_remove_recursive(isp->ipu7_dir); isp->ipu7_dir = NULL; } #endif /* CONFIG_DEBUG_FS */ static int ipu7_pci_config_setup(struct pci_dev *dev) { u16 pci_command; int ret; pci_read_config_word(dev, PCI_COMMAND, &pci_command); pci_command |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER; pci_write_config_word(dev, PCI_COMMAND, pci_command); ret = pci_enable_msi(dev); if (ret) dev_err(&dev->dev, "Failed to enable msi (%d)\n", ret); return ret; } static int ipu7_map_fw_code_region(struct ipu7_bus_device *sys, void *data, size_t size) { struct sg_table *sgt = &sys->fw_sgt; struct device *dev = &sys->auxdev.dev; struct page **pages; unsigned long n_pages, i; int ret; n_pages = PHYS_PFN(PAGE_ALIGN(size)); pages = kmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); if (!pages) return -ENOMEM; for (i = 0; i < n_pages; i++) { struct page *p = vmalloc_to_page(data); if (!p) { ret = -ENODEV; goto out; } pages[i] = p; data += PAGE_SIZE; } ret = sg_alloc_table_from_pages(sgt, pages, n_pages, 0, size, GFP_KERNEL); if (ret) { ret = -ENOMEM; goto out; } ret = dma_map_sgtable(dev, sgt, DMA_TO_DEVICE, 0); if (ret < 0) { dev_err(dev, "map fw code[%lu pages %u nents] failed\n", n_pages, sgt->nents); ret = -ENOMEM; sg_free_table(sgt); goto out; } dev_dbg(dev, "fw code region mapped at 0x%llx entries %d\n", sgt->sgl->dma_address, sgt->nents); dma_sync_sgtable_for_device(dev, sgt, DMA_TO_DEVICE); out: kfree(pages); return ret; } static void ipu7_unmap_fw_code_region(struct ipu7_bus_device *sys) { dma_unmap_sg(&sys->auxdev.dev, sys->fw_sgt.sgl, sys->fw_sgt.nents, DMA_TO_DEVICE); sg_free_table(&sys->fw_sgt); } static int ipu7_init_fw_code_region_by_sys(struct ipu7_bus_device *sys, char *sys_name) { struct device *dev = &sys->auxdev.dev; struct ipu7_device *isp = sys->isp; int ret; /* Copy FW binaries to specific location. */ ret = ipu7_cpd_copy_binary(isp->cpd_fw->data, sys_name, isp->fw_code_region, &sys->fw_entry); if (ret) { dev_err(dev, "%s binary not found.\n", sys_name); return ret; } ret = pm_runtime_get_sync(dev); if (ret < 0) { dev_err(dev, "Failed to get runtime PM\n"); return ret; } ret = ipu7_mmu_hw_init(sys->mmu); if (ret) { dev_err(dev, "Failed to set mmu hw\n"); pm_runtime_put(dev); return ret; } /* Map code region. */ ret = ipu7_map_fw_code_region(sys, isp->fw_code_region, IPU_FW_CODE_REGION_SIZE); if (ret) dev_err(dev, "Failed to map fw code region for %s.\n", sys_name); ipu7_mmu_hw_cleanup(sys->mmu); pm_runtime_put(dev); return ret; } static int ipu7_init_fw_code_region(struct ipu7_device *isp) { int ret; /* * Allocate and map memory for FW execution. * Not required in secure mode, in which FW runs in IMR. */ isp->fw_code_region = vmalloc(IPU_FW_CODE_REGION_SIZE); if (!isp->fw_code_region) return -ENOMEM; ret = ipu7_init_fw_code_region_by_sys(isp->isys, "isys"); if (ret) goto fail_init; ret = ipu7_init_fw_code_region_by_sys(isp->psys, "psys"); if (ret) goto fail_init; return 0; fail_init: vfree(isp->fw_code_region); return ret; } static int ipu7_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct ipu7_isys_internal_pdata *isys_ipdata; struct ipu7_psys_internal_pdata *psys_ipdata; struct device *dev = &pdev->dev; struct ipu7_device *isp; phys_addr_t phys, pb_phys; void __iomem *const *iomap; void __iomem *isys_base = NULL; void __iomem *psys_base = NULL; struct ipu7_buttress_ctrl *isys_ctrl = NULL, *psys_ctrl = NULL; unsigned int dma_mask = IPU_DMA_MASK; struct fwnode_handle *fwnode = dev_fwnode(&pdev->dev); u32 is_es; int ret; if (!fwnode || fwnode_property_read_u32(fwnode, "is_es", &is_es)) is_es = 0; isp = devm_kzalloc(dev, sizeof(*isp), GFP_KERNEL); if (!isp) return -ENOMEM; dev_set_name(dev, "intel-ipu7"); isp->pdev = pdev; INIT_LIST_HEAD(&isp->devices); ret = pcim_enable_device(pdev); if (ret) return dev_err_probe(dev, ret, "Enable PCI device failed\n"); dev_info(dev, "Device 0x%x (rev: 0x%x)\n", pdev->device, pdev->revision); phys = pci_resource_start(pdev, IPU_PCI_BAR); pb_phys = pci_resource_start(pdev, IPU_PCI_PBBAR); dev_info(dev, "BAR0 base %llx BAR2 base %llx\n", phys, pb_phys); ret = pcim_iomap_regions(pdev, BIT(IPU_PCI_BAR) | BIT(IPU_PCI_PBBAR), pci_name(pdev)); if (ret) return dev_err_probe(dev, ret, "Failed to I/O memory remapping (%d)\n", ret); iomap = pcim_iomap_table(pdev); if (!iomap) return dev_err_probe(dev, -ENODEV, "Failed to iomap table\n"); isp->base = iomap[IPU_PCI_BAR]; isp->pb_base = iomap[IPU_PCI_PBBAR]; dev_info(dev, "BAR0 mapped at %p BAR2 mapped at %p\n", isp->base, isp->pb_base); pci_set_drvdata(pdev, isp); pci_set_master(pdev); switch (id->device) { case IPU7_PCI_ID: isp->hw_ver = IPU7_VER_7; isp->cpd_fw_name = IPU7_FIRMWARE_NAME; isys_ipdata = &ipu7_isys_ipdata; psys_ipdata = &ipu7_psys_ipdata; break; case IPU7P5_PCI_ID: isp->hw_ver = IPU7_VER_7P5; isp->cpd_fw_name = IPU7P5_FIRMWARE_NAME; isys_ipdata = &ipu7p5_isys_ipdata; psys_ipdata = &ipu7p5_psys_ipdata; break; default: WARN(1, "Unsupported IPU7 device"); return -ENODEV; } ipu7_internal_pdata_init(isys_ipdata, psys_ipdata); isys_base = isp->base + isys_ipdata->hw_variant.offset; psys_base = isp->base + psys_ipdata->hw_variant.offset; ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(dma_mask)); if (ret) return dev_err_probe(dev, ret, "Failed to set DMA mask\n"); ret = dma_set_max_seg_size(dev, UINT_MAX); if (ret) return dev_err_probe(dev, ret, "Failed to set max_seg_size\n"); ret = ipu7_pci_config_setup(pdev); if (ret) return ret; ret = ipu7_buttress_init(isp); if (ret) return ret; dev_info(dev, "cpd file name: %s\n", isp->cpd_fw_name); ret = request_firmware(&isp->cpd_fw, isp->cpd_fw_name, dev); if (ret) { dev_err_probe(dev, ret, "Requesting signed firmware %s failed\n", isp->cpd_fw_name); goto buttress_exit; } ret = ipu7_cpd_validate_cpd_file(isp, isp->cpd_fw->data, isp->cpd_fw->size); if (ret) { dev_err_probe(dev, ret, "Failed to validate cpd\n"); goto out_ipu_bus_del_devices; } isys_ctrl = devm_kmemdup(dev, &isys_buttress_ctrl, sizeof(isys_buttress_ctrl), GFP_KERNEL); if (!isys_ctrl) { ret = -ENOMEM; goto out_ipu_bus_del_devices; } isp->isys = ipu7_isys_init(pdev, dev, isys_ctrl, isys_base, isys_ipdata, 0); if (IS_ERR(isp->isys)) { ret = PTR_ERR(isp->isys); goto out_ipu_bus_del_devices; } psys_ctrl = devm_kmemdup(dev, &psys_buttress_ctrl, sizeof(psys_buttress_ctrl), GFP_KERNEL); if (!psys_ctrl) { ret = -ENOMEM; goto out_ipu_bus_del_devices; } isp->psys = ipu7_psys_init(pdev, &isp->isys->auxdev.dev, psys_ctrl, psys_base, psys_ipdata, 0); if (IS_ERR(isp->psys)) { ret = PTR_ERR(isp->psys); goto out_ipu_bus_del_devices; } ret = devm_request_threaded_irq(dev, pdev->irq, ipu7_buttress_isr, ipu7_buttress_isr_threaded, IRQF_SHARED, IPU_NAME, isp); if (ret) goto out_ipu_bus_del_devices; if (!isp->secure_mode) { ret = ipu7_init_fw_code_region(isp); if (ret) goto out_ipu_bus_del_devices; } else { ret = pm_runtime_get_sync(&isp->psys->auxdev.dev); if (ret < 0) { dev_err(&isp->psys->auxdev.dev, "Failed to get runtime PM\n"); goto out_ipu_bus_del_devices; } ret = ipu7_mmu_hw_init(isp->psys->mmu); if (ret) { dev_err_probe(&isp->pdev->dev, ret, "Failed to init MMU hardware\n"); goto out_ipu_bus_del_devices; } ret = ipu7_map_fw_code_region(isp->psys, (void *)isp->cpd_fw->data, isp->cpd_fw->size); if (ret) { dev_err_probe(&isp->pdev->dev, ret, "failed to map fw image\n"); goto out_ipu_bus_del_devices; } ret = ipu7_buttress_authenticate(isp); if (ret) { dev_err_probe(&isp->pdev->dev, ret, "FW authentication failed\n"); goto out_ipu_bus_del_devices; } ipu7_mmu_hw_cleanup(isp->psys->mmu); pm_runtime_put(&isp->psys->auxdev.dev); } #ifdef CONFIG_DEBUG_FS ret = ipu7_init_debugfs(isp); if (ret) { dev_err_probe(dev, ret, "Failed to initialize debugfs"); goto out_ipu_bus_del_devices; } #endif pm_runtime_put_noidle(dev); pm_runtime_allow(dev); isp->ipu7_bus_ready_to_probe = true; return 0; out_ipu_bus_del_devices: if (!IS_ERR_OR_NULL(isp->isys) && isp->isys->fw_sgt.nents) ipu7_unmap_fw_code_region(isp->isys); if (!IS_ERR_OR_NULL(isp->psys) && isp->psys->fw_sgt.nents) ipu7_unmap_fw_code_region(isp->psys); #ifdef CONFIG_DEBUG_FS if (!IS_ERR_OR_NULL(isp->fw_code_region)) vfree(isp->fw_code_region); #endif if (!IS_ERR_OR_NULL(isp->psys) && !IS_ERR_OR_NULL(isp->psys->mmu)) ipu7_mmu_cleanup(isp->psys->mmu); if (!IS_ERR_OR_NULL(isp->isys) && !IS_ERR_OR_NULL(isp->isys->mmu)) ipu7_mmu_cleanup(isp->isys->mmu); if (!IS_ERR_OR_NULL(isp->psys)) pm_runtime_put(&isp->psys->auxdev.dev); ipu7_bus_del_devices(pdev); release_firmware(isp->cpd_fw); buttress_exit: ipu7_buttress_exit(isp); return ret; } static void ipu7_pci_remove(struct pci_dev *pdev) { struct ipu7_device *isp = pci_get_drvdata(pdev); #ifdef CONFIG_DEBUG_FS ipu7_remove_debugfs(isp); #endif if (!IS_ERR_OR_NULL(isp->isys) && isp->isys->fw_sgt.nents) ipu7_unmap_fw_code_region(isp->isys); if (!IS_ERR_OR_NULL(isp->psys) && isp->psys->fw_sgt.nents) ipu7_unmap_fw_code_region(isp->psys); if (!IS_ERR_OR_NULL(isp->fw_code_region)) vfree(isp->fw_code_region); ipu7_bus_del_devices(pdev); pm_runtime_forbid(&pdev->dev); pm_runtime_get_noresume(&pdev->dev); pci_release_regions(pdev); pci_disable_device(pdev); ipu7_buttress_exit(isp); release_firmware(isp->cpd_fw); ipu7_mmu_cleanup(isp->psys->mmu); ipu7_mmu_cleanup(isp->isys->mmu); } static void ipu7_pci_reset_prepare(struct pci_dev *pdev) { struct ipu7_device *isp = pci_get_drvdata(pdev); dev_warn(&pdev->dev, "FLR prepare\n"); pm_runtime_forbid(&isp->pdev->dev); } static void ipu7_pci_reset_done(struct pci_dev *pdev) { struct ipu7_device *isp = pci_get_drvdata(pdev); ipu7_buttress_restore(isp); if (isp->secure_mode) ipu7_buttress_reset_authentication(isp); isp->ipc_reinit = true; pm_runtime_allow(&isp->pdev->dev); dev_warn(&pdev->dev, "FLR completed\n"); } #ifdef CONFIG_PM /* * PCI base driver code requires driver to provide these to enable * PCI device level PM state transitions (D0<->D3) */ static int ipu7_suspend(struct device *dev) { return 0; } static int ipu7_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct ipu7_device *isp = pci_get_drvdata(pdev); struct ipu7_buttress *b = &isp->buttress; int ret; isp->secure_mode = ipu7_buttress_get_secure_mode(isp); dev_info(dev, "IPU7 in %s mode\n", isp->secure_mode ? "secure" : "non-secure"); ipu7_buttress_restore(isp); ret = ipu7_buttress_ipc_reset(isp, &b->cse); if (ret) dev_err(&isp->pdev->dev, "IPC reset protocol failed!\n"); ret = pm_runtime_get_sync(&isp->psys->auxdev.dev); if (ret < 0) { dev_err(&isp->psys->auxdev.dev, "Failed to get runtime PM\n"); return 0; } ret = ipu7_buttress_authenticate(isp); if (ret) dev_err(&isp->pdev->dev, "FW authentication failed(%d)\n", ret); pm_runtime_put(&isp->psys->auxdev.dev); return 0; } static int ipu7_runtime_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct ipu7_device *isp = pci_get_drvdata(pdev); int ret; ipu7_buttress_restore(isp); if (isp->ipc_reinit) { struct ipu7_buttress *b = &isp->buttress; isp->ipc_reinit = false; ret = ipu7_buttress_ipc_reset(isp, &b->cse); if (ret) dev_err(&isp->pdev->dev, "IPC reset protocol failed!\n"); } return 0; } static const struct dev_pm_ops ipu7_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(&ipu7_suspend, &ipu7_resume) SET_RUNTIME_PM_OPS(&ipu7_suspend, /* Same as in suspend flow */ &ipu7_runtime_resume, NULL) }; #define IPU_PM (&ipu7_pm_ops) #else #define IPU_PM NULL #endif static const struct pci_device_id ipu7_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU7_PCI_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU7P5_PCI_ID)}, {0,} }; MODULE_DEVICE_TABLE(pci, ipu7_pci_tbl); static const struct pci_error_handlers pci_err_handlers = { .reset_prepare = ipu7_pci_reset_prepare, .reset_done = ipu7_pci_reset_done, }; static struct pci_driver ipu7_pci_driver = { .name = IPU_NAME, .id_table = ipu7_pci_tbl, .probe = ipu7_pci_probe, .remove = ipu7_pci_remove, .driver = { .pm = IPU_PM, }, .err_handler = &pci_err_handlers, }; module_pci_driver(ipu7_pci_driver); MODULE_IMPORT_NS(INTEL_IPU_BRIDGE); MODULE_AUTHOR("Bingbu Cao "); MODULE_AUTHOR("Tianshu Qiu "); MODULE_AUTHOR("Qingwu Zhang "); MODULE_AUTHOR("Intel"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Intel ipu7 pci driver"); ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/ipu7.h000066400000000000000000000150701465530421300253140ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 - 2024 Intel Corporation */ #ifndef IPU7_H #define IPU7_H #include #include #include #include #include "ipu7-buttress.h" struct ipu7_bus_device; struct pci_dev; struct firmware; #define IPU_NAME "intel-ipu7" #define IPU_MEDIA_DEV_MODEL_NAME "ipu7" #define IPU7_FIRMWARE_NAME "intel/ipu/ipu7_fw.bin" #define IPU7P5_FIRMWARE_NAME "intel/ipu/ipu7ptl_fw.bin" #define IPU7_ISYS_NUM_STREAMS 12 #define IPU7_PCI_ID 0x645d #define IPU7P5_PCI_ID 0xb05d enum ipu7_version { IPU7_VER_INVALID = 0, IPU7_VER_7 = 1, IPU7_VER_7P5 = 2, }; static inline bool is_ipu7p5(u8 hw_ver) { return hw_ver == IPU7_VER_7P5; } #define IPU_UNIFIED_OFFSET 0 /* * ISYS DMA can overshoot. For higher resolutions over allocation is one line * but it must be at minimum 1024 bytes. Value could be different in * different versions / generations thus provide it via platform data. */ #define IPU_ISYS_OVERALLOC_MIN 1024 /* * Physical pages in GDA is 128, page size is 2K for IPU6, 1K for others. */ #define IPU_DEVICE_GDA_NR_PAGES 128 /* * Virtualization factor to calculate the available virtual pages. */ #define IPU_DEVICE_GDA_VIRT_FACTOR 32 #define IPU_FW_CODE_REGION_SIZE 0x1000000 /* 16MB */ #define IPU_FW_CODE_REGION_START 0x4000000 /* 64MB */ #define IPU_FW_CODE_REGION_END (IPU_FW_CODE_REGION_START + \ IPU_FW_CODE_REGION_SIZE) /* 80MB */ struct ipu7_device { struct pci_dev *pdev; struct list_head devices; struct ipu7_bus_device *isys; struct ipu7_bus_device *psys; struct ipu7_buttress buttress; const struct firmware *cpd_fw; const char *cpd_fw_name; /* Only for non-secure mode. */ void *fw_code_region; void __iomem *base; void __iomem *pb_base; #ifdef CONFIG_DEBUG_FS struct dentry *ipu7_dir; #endif u8 hw_ver; bool ipc_reinit; bool secure_mode; bool ipu7_bus_ready_to_probe; }; #define IPU_DMA_MASK 39 #define IPU_LIB_CALL_TIMEOUT_MS 2000 #define IPU_PSYS_CMD_TIMEOUT_MS 2000 #define IPU_PSYS_OPEN_CLOSE_TIMEOUT_US 50 #define IPU_PSYS_OPEN_CLOSE_RETRY (10000 / IPU_PSYS_OPEN_CLOSE_TIMEOUT_US) #define IPU_ISYS_NAME "isys" #define IPU_PSYS_NAME "psys" #define IPU_MMU_ADDR_BITS 32 /* FW is accessible within the first 2 GiB only in non-secure mode. */ #define IPU_MMU_ADDR_BITS_NON_SECURE 31 #define IPU7_IS_MMU_NUM 4 #define IPU7_PS_MMU_NUM 4 #define IPU7P5_IS_MMU_NUM 4 #define IPU7P5_PS_MMU_NUM 4 #define IPU_MMU_MAX_NUM 4 /* max(IS, PS) */ #define IPU_MMU_MAX_TLB_L1_STREAMS 40 #define IPU_MMU_MAX_TLB_L2_STREAMS 40 #define IPU_ZLX_MAX_NUM 32 #define IPU_ZLX_POOL_NUM 8 #define IPU_UAO_PLANE_MAX_NUM 64 /* * To maximize the IOSF utlization, IPU need to send requests in bursts. * At the DMA interface with the buttress, there are CDC FIFOs with burst * collection capability. CDC FIFO burst collectors have a configurable * threshold and is configured based on the outcome of performance measurements. * * isys has 3 ports with IOSF interface for VC0, VC1 and VC2 * psys has 4 ports with IOSF interface for VC0, VC1w, VC1r and VC2 * * Threshold values are pre-defined and are arrived at after performance * evaluations on a type of IPU */ #define IPU_MAX_VC_IOSF_PORTS 4 /* * IPU must configure correct arbitration mechanism related to the IOSF VC * requests. There are two options per VC0 and VC1 - > 0 means rearbitrate on * stall and 1 means stall until the request is completed. */ #define IPU_BTRS_ARB_MODE_TYPE_REARB 0 #define IPU_BTRS_ARB_MODE_TYPE_STALL 1 /* Currently chosen arbitration mechanism for VC0 */ #define IPU_BTRS_ARB_STALL_MODE_VC0 \ IPU_BTRS_ARB_MODE_TYPE_REARB /* Currently chosen arbitration mechanism for VC1 */ #define IPU_BTRS_ARB_STALL_MODE_VC1 \ IPU_BTRS_ARB_MODE_TYPE_REARB struct ipu7_isys_subdev_pdata; /* One L2 entry maps 1024 L1 entries and one L1 entry per page */ #define IPU_MMUV2_L2_RANGE (1024 * PAGE_SIZE) /* Max L2 blocks per stream */ #define IPU_MMUV2_MAX_L2_BLOCKS 2 /* Max L1 blocks per stream */ #define IPU_MMUV2_MAX_L1_BLOCKS 16 #define IPU_MMUV2_TRASH_RANGE (IPU_MMUV2_L2_RANGE * \ IPU_MMUV2_MAX_L2_BLOCKS) /* Entries per L1 block */ #define MMUV2_ENTRIES_PER_L1_BLOCK 16 #define MMUV2_TRASH_L1_BLOCK_OFFSET (MMUV2_ENTRIES_PER_L1_BLOCK * \ PAGE_SIZE) #define MMUV2_TRASH_L2_BLOCK_OFFSET IPU_MMUV2_L2_RANGE struct ipu7_mmu_hw { char name[32]; void __iomem *base; void __iomem *zlx_base; void __iomem *uao_base; u32 offset; u32 zlx_offset; u32 uao_offset; u32 info_bits; u32 refill; u32 collapse_en_bitmap; u32 at_sp_arb_cfg; u32 l1_block; u32 l2_block; u8 nr_l1streams; u8 nr_l2streams; u32 l1_block_sz[IPU_MMU_MAX_TLB_L1_STREAMS]; u32 l2_block_sz[IPU_MMU_MAX_TLB_L2_STREAMS]; u8 zlx_nr; u32 zlx_axi_pool[IPU_ZLX_POOL_NUM]; u32 zlx_en[IPU_ZLX_MAX_NUM]; u32 zlx_conf[IPU_ZLX_MAX_NUM]; u32 uao_p_num; u32 uao_p2tlb[IPU_UAO_PLANE_MAX_NUM]; }; struct ipu7_mmu_pdata { u32 nr_mmus; struct ipu7_mmu_hw mmu_hw[IPU_MMU_MAX_NUM]; int mmid; }; struct ipu7_isys_csi2_pdata { void __iomem *base; }; struct ipu7_isys_internal_csi2_pdata { u32 nports; u32 *offsets; }; #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC struct ipu7_isys_internal_tpg_pdata { u32 ntpgs; u32 *offsets; u32 *sels; }; #endif struct ipu7_hw_variants { unsigned long offset; u32 nr_mmus; struct ipu7_mmu_hw mmu_hw[IPU_MMU_MAX_NUM]; u8 cdc_fifos; u8 cdc_fifo_threshold[IPU_MAX_VC_IOSF_PORTS]; u32 dmem_offset; u32 spc_offset; /* SPC offset from psys base */ }; struct ipu7_isys_internal_pdata { struct ipu7_isys_internal_csi2_pdata csi2; #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC struct ipu7_isys_internal_tpg_pdata tpg; #endif struct ipu7_hw_variants hw_variant; u32 num_parallel_streams; u32 isys_dma_overshoot; }; struct ipu7_isys_pdata { void __iomem *base; const struct ipu7_isys_internal_pdata *ipdata; }; struct ipu7_psys_internal_pdata { struct ipu7_hw_variants hw_variant; }; struct ipu7_psys_pdata { void __iomem *base; const struct ipu7_psys_internal_pdata *ipdata; }; int request_cpd_fw(const struct firmware **firmware_p, const char *name, struct device *device); extern enum ipu7_version ipu7_ver; void ipu7_internal_pdata_init(struct ipu7_isys_internal_pdata *isys_ipdata, struct ipu7_psys_internal_pdata *psys_ipdata); void ipu7_dump_fw_error_log(const struct ipu7_bus_device *adev); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) #include /* Helpers for building against various kernel versions */ static inline struct media_pipeline *media_entity_pipeline(struct media_entity *entity) { return entity->pipe; } #endif #endif /* IPU7_H */ ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/psys/000077500000000000000000000000001465530421300252525ustar00rootroot00000000000000ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/psys/Makefile000066400000000000000000000007371465530421300267210ustar00rootroot00000000000000# SPDX-License-Identifier: GPL-2.0 # Copyright (c) 2017 - 2024 Intel Corporation. is_kernel_lt_6_10 = $(shell if [ $$(printf "6.10\n$(KERNELVERSION)" | sort -V | head -n1) != "6.10" ]; then echo 1; fi) ifeq ($(is_kernel_lt_6_10), 1) ifneq ($(EXTERNAL_BUILD), 1) src := $(srctree)/$(src) endif endif intel-ipu7-psys-objs += ipu-psys.o \ ipu7-psys.o \ ipu7-fw-psys.o obj-$(CONFIG_VIDEO_INTEL_IPU7) += intel-ipu7-psys.o ccflags-y += -I$(src)/ ccflags-y += -I$(src)/../ ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/psys/ipu-psys.c000066400000000000000000001112361465530421300272130ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2013 - 2024 Intel Corporation #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ipu7.h" #include "ipu7-mmu.h" #include "ipu7-bus.h" #include "ipu7-buttress.h" #include "ipu7-cpd.h" #include "ipu7-fw-psys.h" #include "ipu7-psys.h" #include "ipu7-platform-regs.h" #include "ipu7-syscom.h" #include "ipu7-boot.h" static bool async_fw_init; module_param(async_fw_init, bool, 0664); MODULE_PARM_DESC(async_fw_init, "Enable asynchronous firmware initialization"); #define IPU_PSYS_NUM_DEVICES 4 static int psys_runtime_pm_resume(struct device *dev); static int psys_runtime_pm_suspend(struct device *dev); #define IPU_FW_CALL_TIMEOUT_JIFFIES \ msecs_to_jiffies(IPU_PSYS_CMD_TIMEOUT_MS) static dev_t ipu7_psys_dev_t; static DECLARE_BITMAP(ipu7_psys_devices, IPU_PSYS_NUM_DEVICES); static DEFINE_MUTEX(ipu7_psys_mutex); static struct fw_init_task { struct delayed_work work; struct ipu7_psys *psys; } fw_init_task; static void ipu7_psys_remove(struct auxiliary_device *auxdev); static int ipu7_psys_get_userpages(struct ipu7_dma_buf_attach *attach) { struct vm_area_struct *vma; unsigned long start, end; int npages, array_size; struct page **pages; struct sg_table *sgt; int nr = 0; int ret = -ENOMEM; start = (unsigned long)attach->userptr; end = PAGE_ALIGN(start + attach->len); npages = PHYS_PFN(end - (start & PAGE_MASK)); array_size = npages * sizeof(struct page *); sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); if (!sgt) return -ENOMEM; if (attach->npages != 0) { pages = attach->pages; npages = attach->npages; attach->vma_is_io = 1; goto skip_pages; } pages = kvzalloc(array_size, GFP_KERNEL); if (!pages) goto free_sgt; mmap_read_lock(current->mm); vma = vma_lookup(current->mm, start); if (unlikely(!vma)) { ret = -EFAULT; goto error_up_read; } if (WARN_ON(vma->vm_flags & (VM_IO | VM_PFNMAP))) { ret = -EINVAL; goto error_up_read; } #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 5, 0) nr = get_user_pages(start & PAGE_MASK, npages, FOLL_WRITE, pages, NULL); #else nr = get_user_pages(start & PAGE_MASK, npages, FOLL_WRITE, pages); #endif if (nr < npages) goto error_up_read; mmap_read_unlock(current->mm); attach->pages = pages; attach->npages = npages; skip_pages: ret = sg_alloc_table_from_pages(sgt, pages, npages, start & ~PAGE_MASK, attach->len, GFP_KERNEL); if (ret < 0) goto error; attach->sgt = sgt; return 0; error_up_read: mmap_read_unlock(current->mm); error: while (nr > 0) put_page(pages[--nr]); if (array_size <= PAGE_SIZE) kfree(pages); else vfree(pages); free_sgt: kfree(sgt); dev_err(attach->dev, "failed to get userpages:%d\n", ret); return ret; } static void ipu7_psys_put_userpages(struct ipu7_dma_buf_attach *attach) { if (!attach || !attach->userptr || !attach->sgt) return; if (!attach->vma_is_io) { int i = attach->npages; while (--i >= 0) { set_page_dirty_lock(attach->pages[i]); put_page(attach->pages[i]); } } kvfree(attach->pages); sg_free_table(attach->sgt); kfree(attach->sgt); attach->sgt = NULL; } static int ipu7_dma_buf_attach(struct dma_buf *dbuf, struct dma_buf_attachment *attach) { struct ipu7_psys_kbuffer *kbuf = dbuf->priv; struct ipu7_dma_buf_attach *ipu7_attach; ipu7_attach = kzalloc(sizeof(*ipu7_attach), GFP_KERNEL); if (!ipu7_attach) return -ENOMEM; ipu7_attach->len = kbuf->len; ipu7_attach->userptr = kbuf->userptr; attach->priv = ipu7_attach; return 0; } static void ipu7_dma_buf_detach(struct dma_buf *dbuf, struct dma_buf_attachment *attach) { struct ipu7_dma_buf_attach *ipu7_attach = attach->priv; kfree(ipu7_attach); attach->priv = NULL; } static struct sg_table *ipu7_dma_buf_map(struct dma_buf_attachment *attach, enum dma_data_direction dir) { struct ipu7_dma_buf_attach *ipu7_attach = attach->priv; unsigned long attrs; int ret; ret = ipu7_psys_get_userpages(ipu7_attach); if (ret) return NULL; attrs = DMA_ATTR_SKIP_CPU_SYNC; ret = dma_map_sgtable(attach->dev, ipu7_attach->sgt, dir, attrs); if (ret < 0) { dev_dbg(attach->dev, "buf map failed\n"); return ERR_PTR(-EIO); } /* * Initial cache flush to avoid writing dirty pages for buffers which * are later marked as IPU_BUFFER_FLAG_NO_FLUSH. */ dma_sync_sgtable_for_device(attach->dev, ipu7_attach->sgt, DMA_BIDIRECTIONAL); return ipu7_attach->sgt; } static void ipu7_dma_buf_unmap(struct dma_buf_attachment *attach, struct sg_table *sg, enum dma_data_direction dir) { struct ipu7_dma_buf_attach *ipu7_attach = attach->priv; dma_unmap_sgtable(attach->dev, sg, dir, DMA_ATTR_SKIP_CPU_SYNC); ipu7_psys_put_userpages(ipu7_attach); } static int ipu7_dma_buf_mmap(struct dma_buf *dbuf, struct vm_area_struct *vma) { return -ENOTTY; } static void ipu7_dma_buf_release(struct dma_buf *buf) { struct ipu7_psys_kbuffer *kbuf = buf->priv; if (!kbuf) return; if (kbuf->db_attach) { dev_dbg(kbuf->db_attach->dev, "releasing buffer %d\n", kbuf->fd); ipu7_psys_put_userpages(kbuf->db_attach->priv); } kfree(kbuf); } static int ipu7_dma_buf_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction dir) { return -ENOTTY; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0) || \ LINUX_VERSION_CODE == KERNEL_VERSION(5, 15, 255) || \ LINUX_VERSION_CODE == KERNEL_VERSION(5, 15, 71) static int ipu7_dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map) #else static int ipu7_dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map) #endif { struct dma_buf_attachment *attach; struct ipu7_dma_buf_attach *ipu7_attach; if (list_empty(&dmabuf->attachments)) return -EINVAL; attach = list_last_entry(&dmabuf->attachments, struct dma_buf_attachment, node); ipu7_attach = attach->priv; if (!ipu7_attach || !ipu7_attach->pages || !ipu7_attach->npages) return -EINVAL; map->vaddr = vm_map_ram(ipu7_attach->pages, ipu7_attach->npages, 0); map->is_iomem = false; if (!map->vaddr) return -EINVAL; return 0; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0) || \ LINUX_VERSION_CODE == KERNEL_VERSION(5, 15, 255) || \ LINUX_VERSION_CODE == KERNEL_VERSION(5, 15, 71) static void ipu7_dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map) #else static int ipu7_dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map) #endif { struct dma_buf_attachment *attach; struct ipu7_dma_buf_attach *ipu7_attach; if (WARN_ON(list_empty(&dmabuf->attachments))) return; attach = list_last_entry(&dmabuf->attachments, struct dma_buf_attachment, node); ipu7_attach = attach->priv; if (WARN_ON(!ipu7_attach || !ipu7_attach->pages || !ipu7_attach->npages)) return; vm_unmap_ram(map->vaddr, ipu7_attach->npages); } struct dma_buf_ops ipu7_dma_buf_ops = { .attach = ipu7_dma_buf_attach, .detach = ipu7_dma_buf_detach, .map_dma_buf = ipu7_dma_buf_map, .unmap_dma_buf = ipu7_dma_buf_unmap, .release = ipu7_dma_buf_release, .begin_cpu_access = ipu7_dma_buf_begin_cpu_access, .mmap = ipu7_dma_buf_mmap, .vmap = ipu7_dma_buf_vmap, .vunmap = ipu7_dma_buf_vunmap, }; static int ipu7_psys_get_graph_id(struct ipu7_psys_fh *fh) { u8 graph_id = 0; for (graph_id = 0; graph_id < IPU_PSYS_NUM_STREAMS; graph_id++) { if (fh->psys->graph_id[graph_id] == INVALID_STREAM_ID) break; } if (graph_id == IPU_PSYS_NUM_STREAMS) return -EBUSY; fh->psys->graph_id[graph_id] = graph_id; return graph_id; } static void ipu7_psys_put_graph_id(struct ipu7_psys_fh *fh) { fh->psys->graph_id[fh->ip->graph_id] = INVALID_STREAM_ID; } static void ipu7_psys_stream_deinit(struct ipu7_psys_stream *ip, struct ipu7_bus_device *adev) { u8 i; mutex_destroy(&ip->event_mutex); mutex_destroy(&ip->task_mutex); for (i = 0; i < MAX_TASK_REQUEST_QUEUE_SIZE; i++) { if (ip->task_queue[i].msg_task) { ip->task_queue[i].available = 0; dma_free_attrs(&adev->auxdev.dev, sizeof(struct ipu7_msg_task), ip->task_queue[i].msg_task, ip->task_queue[i].task_dma_addr, 0); } } } static int ipu7_psys_stream_init(struct ipu7_psys_stream *ip, struct ipu7_bus_device *adev) { struct device *dev = &adev->auxdev.dev; u8 i, j; ip->event_read_index = 0; ip->event_write_index = 0; for (i = 0; i < MAX_TASK_REQUEST_QUEUE_SIZE; i++) { ip->task_queue[i].available = 1; ip->task_queue[i].msg_task = dma_alloc_attrs(dev, sizeof(struct ipu7_msg_task), &ip->task_queue[i].task_dma_addr, GFP_KERNEL, 0); if (!ip->task_queue[i].msg_task) { dev_err(dev, "Failed to allocate msg task.\n"); goto allocate_fail; } } for (i = 0; i < MAX_TASK_EVENT_QUEUE_SIZE; i++) ip->event_queue[i].available = 1; init_completion(&ip->graph_open); init_completion(&ip->graph_close); return 0; allocate_fail: for (j = 0; j < i; j++) { if (ip->task_queue[j].msg_task) { ip->task_queue[j].available = 0; dma_free_attrs(dev, sizeof(struct ipu7_msg_task), ip->task_queue[j].msg_task, ip->task_queue[j].task_dma_addr, 0); } } return -ENOMEM; } static int ipu7_psys_open(struct inode *inode, struct file *file) { struct ipu7_psys *psys = inode_to_ipu_psys(inode); struct device *dev = &psys->adev->auxdev.dev; struct ipu7_psys_fh *fh; struct ipu7_psys_stream *ip; int rval; fh = kzalloc(sizeof(*fh), GFP_KERNEL); if (!fh) return -ENOMEM; ip = kzalloc(sizeof(*ip), GFP_KERNEL); if (!ip) { rval = -ENOMEM; goto alloc_failed; } rval = ipu7_psys_stream_init(ip, psys->adev); if (rval) goto stream_init_failed; fh->ip = ip; ip->fh = fh; fh->psys = psys; file->private_data = fh; mutex_init(&fh->mutex); INIT_LIST_HEAD(&fh->bufmap); init_waitqueue_head(&fh->wait); mutex_init(&ip->task_mutex); mutex_init(&ip->event_mutex); mutex_lock(&psys->mutex); rval = ipu7_psys_get_graph_id(fh); if (rval < 0) goto open_failed; fh->ip->graph_id = rval; rval = pm_runtime_get_sync(dev); if (rval < 0) { dev_err(dev, "Runtime PM failed (%d)\n", rval); goto rpm_put; } list_add_tail(&fh->list, &psys->fhs); mutex_unlock(&psys->mutex); return 0; rpm_put: pm_runtime_put(dev); ipu7_psys_put_graph_id(fh); open_failed: ipu7_psys_stream_deinit(ip, psys->adev); mutex_destroy(&fh->mutex); mutex_unlock(&psys->mutex); stream_init_failed: kfree(ip); alloc_failed: kfree(fh); return rval; } static inline void ipu7_psys_kbuf_unmap(struct ipu7_psys_kbuffer *kbuf) { if (!kbuf) return; kbuf->valid = false; #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0) || \ LINUX_VERSION_CODE == KERNEL_VERSION(5, 15, 255) || \ LINUX_VERSION_CODE == KERNEL_VERSION(5, 15, 71) if (kbuf->kaddr) { struct iosys_map dmap; iosys_map_set_vaddr(&dmap, kbuf->kaddr); #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 2, 0) dma_buf_vunmap_unlocked(kbuf->dbuf, &dmap); #else dma_buf_vunmap(kbuf->dbuf, &dmap); #endif } #elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) && \ LINUX_VERSION_CODE != KERNEL_VERSION(5, 10, 46) if (kbuf->kaddr) { struct dma_buf_map dmap; dma_buf_map_set_vaddr(&dmap, kbuf->kaddr); dma_buf_vunmap(kbuf->dbuf, &dmap); } #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 2, 0) if (kbuf->sgt) dma_buf_unmap_attachment_unlocked(kbuf->db_attach, kbuf->sgt, DMA_BIDIRECTIONAL); #else if (kbuf->sgt) dma_buf_unmap_attachment(kbuf->db_attach, kbuf->sgt, DMA_BIDIRECTIONAL); #endif if (kbuf->db_attach) dma_buf_detach(kbuf->dbuf, kbuf->db_attach); dma_buf_put(kbuf->dbuf); kbuf->db_attach = NULL; kbuf->dbuf = NULL; kbuf->sgt = NULL; } static int ipu7_psys_release(struct inode *inode, struct file *file) { struct ipu7_psys *psys = inode_to_ipu_psys(inode); struct ipu7_psys_fh *fh = file->private_data; struct ipu7_psys_kbuffer *kbuf, *kbuf0; struct dma_buf_attachment *dba; mutex_lock(&fh->mutex); /* clean up buffers */ if (!list_empty(&fh->bufmap)) { list_for_each_entry_safe(kbuf, kbuf0, &fh->bufmap, list) { list_del(&kbuf->list); dba = kbuf->db_attach; /* Unmap and release buffers */ if (kbuf->dbuf && dba) { ipu7_psys_kbuf_unmap(kbuf); } else { if (dba) ipu7_psys_put_userpages(dba->priv); kfree(kbuf); } } } mutex_unlock(&fh->mutex); ipu7_psys_stream_deinit(fh->ip, psys->adev); mutex_lock(&psys->mutex); list_del(&fh->list); ipu7_psys_put_graph_id(fh); kfree(fh->ip); if (list_empty(&psys->fhs)) psys->power_gating = 0; mutex_unlock(&psys->mutex); mutex_destroy(&fh->mutex); kfree(fh); pm_runtime_put(&psys->adev->auxdev.dev); return 0; } static int ipu7_psys_getbuf(struct ipu_psys_buffer *buf, struct ipu7_psys_fh *fh) { struct device *dev = &fh->psys->adev->auxdev.dev; struct ipu7_psys_kbuffer *kbuf; DEFINE_DMA_BUF_EXPORT_INFO(exp_info); struct dma_buf *dbuf; int ret; if (!buf->base.userptr) { dev_err(dev, "Buffer allocation not supported\n"); return -EINVAL; } if (!PAGE_ALIGNED(buf->base.userptr)) { dev_err(dev, "Not page-aligned userptr is not supported\n"); return -EINVAL; } kbuf = kzalloc(sizeof(*kbuf), GFP_KERNEL); if (!kbuf) return -ENOMEM; kbuf->len = buf->len; kbuf->userptr = buf->base.userptr; kbuf->flags = buf->flags; exp_info.ops = &ipu7_dma_buf_ops; exp_info.size = kbuf->len; exp_info.flags = O_RDWR; exp_info.priv = kbuf; dbuf = dma_buf_export(&exp_info); if (IS_ERR(dbuf)) { kfree(kbuf); return PTR_ERR(dbuf); } ret = dma_buf_fd(dbuf, 0); if (ret < 0) { dma_buf_put(dbuf); return ret; } kbuf->fd = ret; buf->base.fd = ret; buf->flags &= ~IPU_BUFFER_FLAG_USERPTR; buf->flags |= IPU_BUFFER_FLAG_DMA_HANDLE; kbuf->flags = buf->flags; mutex_lock(&fh->mutex); list_add(&kbuf->list, &fh->bufmap); mutex_unlock(&fh->mutex); dev_dbg(dev, "IOC_GETBUF: userptr %p size %llu to fd %d", buf->base.userptr, buf->len, buf->base.fd); return 0; } static int ipu7_psys_putbuf(struct ipu_psys_buffer *buf, struct ipu7_psys_fh *fh) { return 0; } static struct ipu7_psys_kbuffer * ipu7_psys_lookup_kbuffer(struct ipu7_psys_fh *fh, int fd) { struct ipu7_psys_kbuffer *kbuf; list_for_each_entry(kbuf, &fh->bufmap, list) { if (kbuf->fd == fd) return kbuf; } return NULL; } static int ipu7_psys_unmapbuf_locked(int fd, struct ipu7_psys_fh *fh, struct ipu7_psys_kbuffer *kbuf) { struct device *dev = &fh->psys->adev->auxdev.dev; if (!kbuf || fd != kbuf->fd) { dev_err(dev, "invalid kbuffer\n"); return -EINVAL; } /* From now on it is not safe to use this kbuffer */ ipu7_psys_kbuf_unmap(kbuf); list_del(&kbuf->list); if (!kbuf->userptr) kfree(kbuf); dev_dbg(dev, "%s fd %d unmapped\n", __func__, fd); return 0; } static int ipu7_psys_mapbuf_locked(int fd, struct ipu7_psys_fh *fh, struct ipu7_psys_kbuffer *kbuf) { struct device *dev = &fh->psys->adev->auxdev.dev; struct dma_buf *dbuf; #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0) || \ LINUX_VERSION_CODE == KERNEL_VERSION(5, 15, 255) || \ LINUX_VERSION_CODE == KERNEL_VERSION(5, 15, 71) struct iosys_map dmap = { .is_iomem = false, }; #elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) && \ LINUX_VERSION_CODE != KERNEL_VERSION(5, 10, 46) struct dma_buf_map dmap; #endif int ret; dbuf = dma_buf_get(fd); if (IS_ERR(dbuf)) return -EINVAL; if (!kbuf) { /* This fd isn't generated by ipu7_psys_getbuf, it * is a new fd. Create a new kbuf item for this fd, and * add this kbuf to bufmap list. */ kbuf = kzalloc(sizeof(*kbuf), GFP_KERNEL); if (!kbuf) { ret = -ENOMEM; goto mapbuf_fail; } list_add(&kbuf->list, &fh->bufmap); } /* fd valid and found, need remap */ if (kbuf->dbuf && (kbuf->dbuf != dbuf || kbuf->len != dbuf->size)) { dev_dbg(dev, "dmabuf fd %d with kbuf %p changed, need remap.\n", fd, kbuf); ret = ipu7_psys_unmapbuf_locked(fd, fh, kbuf); if (ret) goto mapbuf_fail; kbuf = ipu7_psys_lookup_kbuffer(fh, fd); /* changed external dmabuf */ if (!kbuf) { kbuf = kzalloc(sizeof(*kbuf), GFP_KERNEL); if (!kbuf) { ret = -ENOMEM; goto mapbuf_fail; } list_add(&kbuf->list, &fh->bufmap); } } if (kbuf->sgt) { dev_dbg(dev, "fd %d has been mapped!\n", fd); dma_buf_put(dbuf); goto mapbuf_end; } kbuf->dbuf = dbuf; if (kbuf->len == 0) kbuf->len = kbuf->dbuf->size; kbuf->fd = fd; kbuf->db_attach = dma_buf_attach(kbuf->dbuf, dev); if (IS_ERR(kbuf->db_attach)) { ret = PTR_ERR(kbuf->db_attach); dev_dbg(dev, "dma buf attach failed\n"); goto kbuf_map_fail; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 2, 0) kbuf->sgt = dma_buf_map_attachment_unlocked(kbuf->db_attach, DMA_BIDIRECTIONAL); #else kbuf->sgt = dma_buf_map_attachment(kbuf->db_attach, DMA_BIDIRECTIONAL); #endif if (IS_ERR_OR_NULL(kbuf->sgt)) { ret = -EINVAL; kbuf->sgt = NULL; dev_dbg(dev, "dma buf map attachment failed\n"); goto kbuf_map_fail; } kbuf->dma_addr = sg_dma_address(kbuf->sgt->sgl); #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 2, 0) ret = dma_buf_vmap_unlocked(kbuf->dbuf, &dmap); #else ret = dma_buf_vmap(kbuf->dbuf, &dmap); #endif if (ret) { dev_dbg(dev, "dma buf vmap failed\n"); goto kbuf_map_fail; } kbuf->kaddr = dmap.vaddr; dev_dbg(dev, "%s kbuf %p fd %d with len %llu mapped\n", __func__, kbuf, fd, kbuf->len); mapbuf_end: kbuf->valid = true; return 0; kbuf_map_fail: ipu7_psys_kbuf_unmap(kbuf); list_del(&kbuf->list); if (!kbuf->userptr) kfree(kbuf); mapbuf_fail: dma_buf_put(dbuf); dev_err(dev, "%s failed for fd %d\n", __func__, fd); return ret; } static long ipu7_psys_mapbuf(int fd, struct ipu7_psys_fh *fh) { long ret; struct ipu7_psys_kbuffer *kbuf; mutex_lock(&fh->mutex); kbuf = ipu7_psys_lookup_kbuffer(fh, fd); ret = ipu7_psys_mapbuf_locked(fd, fh, kbuf); mutex_unlock(&fh->mutex); dev_dbg(&fh->psys->adev->auxdev.dev, "IOC_MAPBUF ret %ld\n", ret); return ret; } static long ipu7_psys_unmapbuf(int fd, struct ipu7_psys_fh *fh) { struct device *dev = &fh->psys->adev->auxdev.dev; struct ipu7_psys_kbuffer *kbuf; long ret; mutex_lock(&fh->mutex); kbuf = ipu7_psys_lookup_kbuffer(fh, fd); if (!kbuf) { dev_err(dev, "buffer with fd %d not found\n", fd); mutex_unlock(&fh->mutex); return -EINVAL; } ret = ipu7_psys_unmapbuf_locked(fd, fh, kbuf); mutex_unlock(&fh->mutex); dev_dbg(dev, "IOC_UNMAPBUF\n"); return ret; } static long ipu_psys_graph_open(struct ipu_psys_graph_info *graph, struct ipu7_psys_fh *fh) { struct ipu7_psys *psys = fh->psys; int ret = 0; if (fh->ip->graph_state != IPU_MSG_GRAPH_STATE_CLOSED) { dev_err(&psys->dev, "Wrong state %d to open graph %d\n", fh->ip->graph_state, fh->ip->graph_id); return -EINVAL; } if (!graph->nodes || graph->num_nodes > MAX_GRAPH_NODES) { dev_err(&psys->dev, "nodes is wrong\n"); return -EINVAL; } if (copy_from_user(fh->ip->nodes, graph->nodes, graph->num_nodes * sizeof(*graph->nodes))) { dev_err(&psys->dev, "Failed to copy nodes\n"); return -EINVAL; } reinit_completion(&fh->ip->graph_open); ret = ipu7_fw_psys_graph_open(graph, psys, fh->ip); if (ret) { dev_err(&psys->dev, "Failed to open graph %d\n", fh->ip->graph_id); return ret; } fh->ip->graph_state = IPU_MSG_GRAPH_STATE_OPEN_WAIT; ret = wait_for_completion_timeout(&fh->ip->graph_open, IPU_FW_CALL_TIMEOUT_JIFFIES); if (!ret) { dev_err(&psys->dev, "Open graph %d timeout\n", fh->ip->graph_id); fh->ip->graph_state = IPU_MSG_GRAPH_STATE_CLOSED; return -ETIMEDOUT; } if (fh->ip->graph_state != IPU_MSG_GRAPH_STATE_OPEN) { dev_err(&psys->dev, "Failed to set graph\n"); fh->ip->graph_state = IPU_MSG_GRAPH_STATE_CLOSED; return -EINVAL; } graph->graph_id = fh->ip->graph_id; return 0; } static long ipu_psys_graph_close(int graph_id, struct ipu7_psys_fh *fh) { struct ipu7_psys *psys = fh->psys; int ret = 0; if (fh->ip->graph_state != IPU_MSG_GRAPH_STATE_OPEN) { dev_err(&psys->dev, "Wrong state %d to open graph %d\n", fh->ip->graph_state, fh->ip->graph_id); return -EINVAL; } reinit_completion(&fh->ip->graph_close); ret = ipu7_fw_psys_graph_close(fh->ip->graph_id, fh->psys); if (ret) { dev_err(&psys->dev, "Failed to close graph %d\n", fh->ip->graph_id); return ret; } fh->ip->graph_state = IPU_MSG_GRAPH_STATE_CLOSE_WAIT; ret = wait_for_completion_timeout(&fh->ip->graph_close, IPU_FW_CALL_TIMEOUT_JIFFIES); if (!ret) { dev_err(&psys->dev, "Close graph %d timeout\n", fh->ip->graph_id); return -ETIMEDOUT; } if (fh->ip->graph_state != IPU_MSG_GRAPH_STATE_CLOSED) { dev_err(&psys->dev, "Failed to close graph\n"); fh->ip->graph_state = IPU_MSG_GRAPH_STATE_CLOSED; return -EINVAL; } return 0; } static struct ipu_psys_task_queue * ipu7_psys_get_task_queue(struct ipu7_psys_stream *ip, struct ipu_psys_task_request *task) { struct ipu_psys_task_queue *tq = NULL; struct device *dev = &ip->fh->psys->dev; struct device *adev = &ip->fh->psys->adev->auxdev.dev; struct ipu7_psys_kbuffer *kbuf = NULL; u32 i, j; int fd, prevfd = -1; if (task->term_buf_count > MAX_GRAPH_TERMINALS) { dev_err(dev, "num_teminal_buffer is too large\n"); return NULL; } mutex_lock(&ip->task_mutex); for (i = 0U; i < MAX_TASK_REQUEST_QUEUE_SIZE; i++) { if (ip->task_queue[i].available == 1U) { tq = &ip->task_queue[i]; if (copy_from_user(tq->task_buffers, task->task_buffers, task->term_buf_count * sizeof(*task->task_buffers))) { dev_err(dev, "failed to copy task buffers\n"); goto unlock; } for (j = 0; j < task->term_buf_count; j++) { fd = tq->task_buffers[j].term_buf.base.fd; kbuf = ipu7_psys_lookup_kbuffer(ip->fh, fd); if (!kbuf) { dev_err(dev, "fd %d not found\n", fd); goto unlock; } tq->ipu7_addr[j] = kbuf->dma_addr + tq->task_buffers[j].term_buf.data_offset; if ((tq->task_buffers[j].term_buf.flags & IPU_BUFFER_FLAG_NO_FLUSH) || prevfd == fd) continue; prevfd = fd; dma_sync_sgtable_for_device(adev, kbuf->sgt, DMA_BIDIRECTIONAL); } ip->task_queue[i].available = 0U; dev_dbg(dev, "frame %d to task queue %p\n", task->frame_id, tq); mutex_unlock(&ip->task_mutex); return tq; } } dev_err(dev, "No available take queues for stream %p\n", ip); unlock: mutex_unlock(&ip->task_mutex); return NULL; } static long ipu_psys_task_request(struct ipu_psys_task_request *task, struct ipu7_psys_fh *fh) { struct ipu7_psys *psys = fh->psys; struct ipu_psys_task_queue *tq; int ret = 0; if (task->term_buf_count == 0 || !task->task_buffers) { dev_err(&psys->dev, "task_buffer is NULL\n"); return -EINVAL; } tq = ipu7_psys_get_task_queue(fh->ip, task); if (!tq) { dev_err(&psys->dev, "Failed to get task queue\n"); return -EINVAL; } ret = ipu7_fw_psys_task_request(task, fh->ip, tq, psys); if (ret) { dev_err(&psys->dev, "Failed to request task %d\n", fh->ip->graph_id); mutex_lock(&fh->ip->task_mutex); tq->available = 1; mutex_unlock(&fh->ip->task_mutex); return ret; } tq->task_state = IPU_MSG_TASK_STATE_WAIT_DONE; return 0; } static unsigned int ipu7_psys_poll(struct file *file, struct poll_table_struct *wait) { struct ipu7_psys_fh *fh = file->private_data; struct device *dev = &fh->psys->adev->auxdev.dev; struct ipu7_psys_stream *ip = fh->ip; unsigned int res = 0; dev_dbg(dev, "ipu psys poll\n"); poll_wait(file, &fh->wait, wait); mutex_lock(&ip->event_mutex); if (!ip->event_queue[ip->event_read_index].available) res = POLLIN; mutex_unlock(&ip->event_mutex); dev_dbg(dev, "ipu psys poll res %u\n", res); return res; } static long ipu7_psys_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { union { struct ipu_psys_graph_info graph; struct ipu_psys_task_request task; struct ipu_psys_buffer buf; struct ipu_psys_event ev; struct ipu_psys_capability caps; } karg; struct ipu7_psys_fh *fh = file->private_data; long err = 0; void __user *up = (void __user *)arg; bool copy = (cmd != IPU_IOC_MAPBUF && cmd != IPU_IOC_UNMAPBUF && cmd != IPU_IOC_GRAPH_CLOSE); if (copy) { if (_IOC_SIZE(cmd) > sizeof(karg)) return -ENOTTY; if (_IOC_DIR(cmd) & _IOC_WRITE) { err = copy_from_user(&karg, up, _IOC_SIZE(cmd)); if (err) return -EFAULT; } } switch (cmd) { case IPU_IOC_MAPBUF: err = ipu7_psys_mapbuf(arg, fh); break; case IPU_IOC_UNMAPBUF: err = ipu7_psys_unmapbuf(arg, fh); break; case IPU_IOC_QUERYCAP: karg.caps = fh->psys->caps; break; case IPU_IOC_GETBUF: err = ipu7_psys_getbuf(&karg.buf, fh); break; case IPU_IOC_PUTBUF: err = ipu7_psys_putbuf(&karg.buf, fh); break; case IPU_IOC_GRAPH_OPEN: err = ipu_psys_graph_open(&karg.graph, fh); break; case IPU_IOC_GRAPH_CLOSE: err = ipu_psys_graph_close(arg, fh); break; case IPU_IOC_TASK_REQUEST: err = ipu_psys_task_request(&karg.task, fh); break; case IPU_IOC_DQEVENT: err = ipu7_ioctl_dqevent(&karg.ev, fh, file->f_flags); break; default: err = -ENOTTY; break; } if (err) return err; if (copy && _IOC_DIR(cmd) & _IOC_READ) if (copy_to_user(up, &karg, _IOC_SIZE(cmd))) return -EFAULT; return 0; } static const struct file_operations ipu7_psys_fops = { .open = ipu7_psys_open, .release = ipu7_psys_release, .unlocked_ioctl = ipu7_psys_ioctl, .poll = ipu7_psys_poll, .owner = THIS_MODULE, }; static void ipu7_psys_dev_release(struct device *dev) { } #ifdef CONFIG_PM static int psys_runtime_pm_resume(struct device *dev) { struct ipu7_bus_device *adev = to_ipu7_bus_device(dev); struct ipu7_psys *psys = ipu7_bus_get_drvdata(adev); unsigned long flags; int rval; if (!psys) return 0; spin_lock_irqsave(&psys->ready_lock, flags); if (psys->ready) { spin_unlock_irqrestore(&psys->ready_lock, flags); return 0; } spin_unlock_irqrestore(&psys->ready_lock, flags); rval = ipu7_mmu_hw_init(adev->mmu); if (rval) return rval; if (async_fw_init && !psys->adev->syscom) { dev_err(dev, "%s: asynchronous firmware init not finished, skipping\n", __func__); return 0; } if (!ipu7_buttress_auth_done(adev->isp)) { dev_dbg(dev, "%s: not yet authenticated, skipping\n", __func__); return 0; } ipu7_psys_setup_hw(psys); ipu7_psys_subdomains_power(psys, 1); rval = ipu7_boot_start_fw(psys->adev); if (rval) { dev_err(&psys->dev, "failed to start psys fw. ret: %d\n", rval); return rval; } rval = ipu7_fw_psys_open(psys); if (rval) { dev_err(&psys->adev->auxdev.dev, "Failed to open abi.\n"); return rval; } spin_lock_irqsave(&psys->ready_lock, flags); psys->ready = 1; spin_unlock_irqrestore(&psys->ready_lock, flags); return 0; } static int psys_runtime_pm_suspend(struct device *dev) { struct ipu7_bus_device *adev = to_ipu7_bus_device(dev); struct ipu7_psys *psys = ipu7_bus_get_drvdata(adev); unsigned long flags; if (!psys) return 0; if (!psys->ready) return 0; spin_lock_irqsave(&psys->ready_lock, flags); psys->ready = 0; spin_unlock_irqrestore(&psys->ready_lock, flags); ipu7_fw_psys_close(psys); ipu7_boot_stop_fw(psys->adev); ipu7_psys_subdomains_power(psys, 0); ipu7_mmu_hw_cleanup(adev->mmu); return 0; } /* The following PM callbacks are needed to enable runtime PM in IPU PCI * device resume, otherwise, runtime PM can't work in PCI resume from * S3 state. */ static int psys_resume(struct device *dev) { return 0; } static int psys_suspend(struct device *dev) { return 0; } static const struct dev_pm_ops psys_pm_ops = { .runtime_suspend = psys_runtime_pm_suspend, .runtime_resume = psys_runtime_pm_resume, .suspend = psys_suspend, .resume = psys_resume, }; #define PSYS_PM_OPS (&psys_pm_ops) #else #define PSYS_PM_OPS NULL #endif #ifdef CONFIG_DEBUG_FS static int ipu7_psys_icache_prefetch_sp_get(void *data, u64 *val) { struct ipu7_psys *psys = data; *val = psys->icache_prefetch_sp; return 0; } static int ipu7_psys_icache_prefetch_sp_set(void *data, u64 val) { struct ipu7_psys *psys = data; if (val != !!val) return -EINVAL; psys->icache_prefetch_sp = val; return 0; } DEFINE_SIMPLE_ATTRIBUTE(psys_icache_prefetch_sp_fops, ipu7_psys_icache_prefetch_sp_get, ipu7_psys_icache_prefetch_sp_set, "%llu\n"); static int ipu7_psys_icache_prefetch_isp_get(void *data, u64 *val) { struct ipu7_psys *psys = data; *val = psys->icache_prefetch_isp; return 0; } static int ipu7_psys_icache_prefetch_isp_set(void *data, u64 val) { struct ipu7_psys *psys = data; if (val != !!val) return -EINVAL; psys->icache_prefetch_isp = val; return 0; } DEFINE_SIMPLE_ATTRIBUTE(psys_icache_prefetch_isp_fops, ipu7_psys_icache_prefetch_isp_get, ipu7_psys_icache_prefetch_isp_set, "%llu\n"); #define FW_LOG_BUF_SIZE (2 * 1024 * 1024) static int psys_fw_log_init(struct ipu7_psys *psys) { struct device *dev = &psys->adev->auxdev.dev; struct psys_fw_log *fw_log; void *log_buf; if (psys->fw_log) return 0; fw_log = devm_kzalloc(dev, sizeof(*fw_log), GFP_KERNEL); if (!fw_log) return -ENOMEM; mutex_init(&fw_log->mutex); log_buf = devm_kzalloc(dev, FW_LOG_BUF_SIZE, GFP_KERNEL); if (!log_buf) return -ENOMEM; fw_log->head = log_buf; fw_log->addr = log_buf; fw_log->count = 0; fw_log->size = 0; psys->fw_log = fw_log; return 0; } static ssize_t fwlog_read(struct file *file, char __user *userbuf, size_t size, loff_t *pos) { struct ipu7_psys *psys = file->private_data; struct psys_fw_log *fw_log = psys->fw_log; struct device *dev = &psys->adev->auxdev.dev; void *buf; int ret = 0; if (!fw_log) return 0; buf = kvzalloc(FW_LOG_BUF_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; mutex_lock(&fw_log->mutex); if (!psys->fw_log->size) { dev_warn(dev, "no available psys fw log"); mutex_unlock(&fw_log->mutex); goto free_and_return; } memcpy(buf, psys->fw_log->addr, psys->fw_log->size); dev_dbg(dev, "copy %d bytes fw log to user\n", psys->fw_log->size); mutex_unlock(&fw_log->mutex); ret = simple_read_from_buffer(userbuf, size, pos, buf, psys->fw_log->size); free_and_return: kvfree(buf); return ret; } static const struct file_operations psys_fw_log_fops = { .open = simple_open, .owner = THIS_MODULE, .read = fwlog_read, .llseek = default_llseek, }; static int ipu7_psys_init_debugfs(struct ipu7_psys *psys) { struct dentry *file; struct dentry *dir; dir = debugfs_create_dir("psys", psys->adev->isp->ipu7_dir); if (IS_ERR(dir)) return -ENOMEM; file = debugfs_create_file("icache_prefetch_sp", 0600, dir, psys, &psys_icache_prefetch_sp_fops); if (IS_ERR(file)) goto err; file = debugfs_create_file("icache_prefetch_isp", 0600, dir, psys, &psys_icache_prefetch_isp_fops); if (IS_ERR(file)) goto err; file = debugfs_create_file("fwlog", 0400, dir, psys, &psys_fw_log_fops); if (IS_ERR(file)) goto err; psys->debugfsdir = dir; return 0; err: debugfs_remove_recursive(dir); return -ENOMEM; } #endif static void run_fw_init_work(struct work_struct *work) { struct fw_init_task *task = (struct fw_init_task *)work; struct ipu7_psys *psys = task->psys; struct auxiliary_device *auxdev = &psys->adev->auxdev; int rval; rval = ipu7_fw_psys_init(psys); if (rval) { dev_err(&auxdev->dev, "FW init failed(%d)\n", rval); ipu7_psys_remove(auxdev); } else { dev_info(&auxdev->dev, "FW init done\n"); } } static int ipu7_psys_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *auxdev_id) { struct ipu7_bus_device *adev = auxdev_to_adev(auxdev); struct device *dev = &auxdev->dev; struct ipu7_psys *psys; unsigned int minor; int i, rval = -E2BIG; if (!adev->isp->ipu7_bus_ready_to_probe) return -EPROBE_DEFER; rval = alloc_chrdev_region(&ipu7_psys_dev_t, 0, IPU_PSYS_NUM_DEVICES, IPU_PSYS_NAME); if (rval) { dev_err(dev, "can't alloc psys chrdev region (%d)\n", rval); return rval; } rval = pm_runtime_resume_and_get(&auxdev->dev); if (rval < 0) return rval; rval = ipu7_mmu_hw_init(adev->mmu); if (rval) goto out_unregister_chr_region; mutex_lock(&ipu7_psys_mutex); minor = find_next_zero_bit(ipu7_psys_devices, IPU_PSYS_NUM_DEVICES, 0); if (minor == IPU_PSYS_NUM_DEVICES) { dev_err(dev, "too many devices\n"); goto out_unlock; } psys = devm_kzalloc(dev, sizeof(*psys), GFP_KERNEL); if (!psys) { rval = -ENOMEM; goto out_unlock; } for (i = 0 ; i < IPU_PSYS_NUM_STREAMS; i++) psys->graph_id[i] = INVALID_STREAM_ID; adev->auxdrv_data = (const struct ipu7_auxdrv_data *)auxdev_id->driver_data; adev->auxdrv = to_auxiliary_drv(dev->driver); psys->adev = adev; psys->pdata = adev->pdata; psys->icache_prefetch_sp = 0; psys->power_gating = 0; cdev_init(&psys->cdev, &ipu7_psys_fops); psys->cdev.owner = ipu7_psys_fops.owner; rval = cdev_add(&psys->cdev, MKDEV(MAJOR(ipu7_psys_dev_t), minor), 1); if (rval) { dev_err(dev, "cdev_add failed (%d)\n", rval); goto out_unlock; } set_bit(minor, ipu7_psys_devices); spin_lock_init(&psys->ready_lock); psys->ready = 0; psys->timeout = IPU_PSYS_CMD_TIMEOUT_MS; mutex_init(&psys->mutex); INIT_LIST_HEAD(&psys->fhs); if (async_fw_init) { INIT_DELAYED_WORK((struct delayed_work *)&fw_init_task, run_fw_init_work); fw_init_task.psys = psys; schedule_delayed_work((struct delayed_work *)&fw_init_task, 0); } else { rval = ipu7_fw_psys_init(psys); } if (rval) { dev_err(dev, "FW init failed(%d)\n", rval); goto out_mutex_destroy; } psys->dev.parent = dev; psys->dev.devt = MKDEV(MAJOR(ipu7_psys_dev_t), minor); psys->dev.release = ipu7_psys_dev_release; dev_set_name(&psys->dev, "ipu7-psys%d", minor); rval = device_register(&psys->dev); if (rval < 0) { dev_err(&psys->dev, "psys device_register failed\n"); goto out_fw_release; } psys_fw_log_init(psys); /* Add the hw stepping information to caps */ strscpy(psys->caps.dev_model, IPU_MEDIA_DEV_MODEL_NAME, sizeof(psys->caps.dev_model)); dev_set_drvdata(dev, psys); mutex_unlock(&ipu7_psys_mutex); #ifdef CONFIG_DEBUG_FS /* Debug fs failure is not fatal. */ ipu7_psys_init_debugfs(psys); #endif dev_info(dev, "psys probe minor: %d\n", minor); ipu7_mmu_hw_cleanup(adev->mmu); pm_runtime_put(&auxdev->dev); return 0; out_fw_release: ipu7_fw_psys_release(psys); out_mutex_destroy: mutex_destroy(&psys->mutex); cdev_del(&psys->cdev); out_unlock: /* Safe to call even if the init is not called */ mutex_unlock(&ipu7_psys_mutex); ipu7_mmu_hw_cleanup(adev->mmu); out_unregister_chr_region: unregister_chrdev_region(ipu7_psys_dev_t, IPU_PSYS_NUM_DEVICES); pm_runtime_put(&auxdev->dev); return rval; } static void ipu7_psys_remove(struct auxiliary_device *auxdev) { struct ipu7_psys *psys = dev_get_drvdata(&auxdev->dev); struct ipu7_device *isp = psys->adev->isp; struct device *dev = &auxdev->dev; #ifdef CONFIG_DEBUG_FS if (isp->ipu7_dir) debugfs_remove_recursive(psys->debugfsdir); #endif mutex_lock(&ipu7_psys_mutex); ipu7_fw_psys_release(psys); device_unregister(&psys->dev); clear_bit(MINOR(psys->cdev.dev), ipu7_psys_devices); cdev_del(&psys->cdev); mutex_unlock(&ipu7_psys_mutex); mutex_destroy(&psys->mutex); unregister_chrdev_region(ipu7_psys_dev_t, IPU_PSYS_NUM_DEVICES); dev_info(dev, "removed\n"); } static irqreturn_t psys_isr_threaded(struct ipu7_bus_device *adev) { struct ipu7_psys *psys = ipu7_bus_get_drvdata(adev); struct device *dev = &psys->adev->auxdev.dev; void __iomem *base = psys->pdata->base; u32 status, state; int r; mutex_lock(&psys->mutex); #ifdef CONFIG_PM r = pm_runtime_get_if_in_use(dev); if (!r || WARN_ON_ONCE(r < 0)) { mutex_unlock(&psys->mutex); return IRQ_NONE; } #endif state = ipu7_boot_get_boot_state(adev); if (IA_GOFO_FW_BOOT_STATE_IS_CRITICAL(state)) { //TODO: Add log parser dev_warn(&psys->dev, "error state %u\n", state); } else { status = readl(base + IPU_REG_PSYS_TO_SW_IRQ_CNTL_STATUS); writel(status, base + IPU_REG_PSYS_TO_SW_IRQ_CNTL_CLEAR); if (status & IRQ_FROM_LOCAL_FW) ipu7_psys_handle_events(psys); } pm_runtime_put(dev); mutex_unlock(&psys->mutex); return IRQ_HANDLED; } static const struct ipu7_auxdrv_data ipu7_psys_auxdrv_data = { .isr_threaded = psys_isr_threaded, .wake_isr_thread = true, }; static const struct auxiliary_device_id ipu7_psys_id_table[] = { { .name = "intel_ipu7.psys", .driver_data = (kernel_ulong_t)&ipu7_psys_auxdrv_data, }, { } }; MODULE_DEVICE_TABLE(auxiliary, ipu7_psys_id_table); static struct auxiliary_driver ipu7_psys_driver = { .name = IPU_PSYS_NAME, .probe = ipu7_psys_probe, .remove = ipu7_psys_remove, .id_table = ipu7_psys_id_table, .driver = { .pm = PSYS_PM_OPS, }, }; module_auxiliary_driver(ipu7_psys_driver); MODULE_AUTHOR("Bingbu Cao "); MODULE_AUTHOR("Qingwu Zhang "); MODULE_AUTHOR("Tianshu Qiu "); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Intel ipu7 processing system driver"); MODULE_IMPORT_NS(INTEL_IPU7); MODULE_IMPORT_NS(DMA_BUF); ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/psys/ipu7-fw-psys.c000066400000000000000000000423631465530421300277200ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2016 - 2024 Intel Corporation #include #include #include #include "abi/ipu7_fw_common_abi.h" #include "abi/ipu7_fw_msg_abi.h" #include "abi/ipu7_fw_psys_config_abi.h" #include "ipu7-boot.h" #include "ipu7-bus.h" #include "ipu7-fw-psys.h" #include "ipu7-syscom.h" #include "ipu7-psys.h" #define TLV_TYPE(type) ((u32)(type) & 0x3FU) #define TLV_SIZE(buf_size) (((buf_size) / TLV_ITEM_ALIGNMENT) & 0xFFFFU) #define BIT64(bit_num) (1 << (bit_num)) /** * Node resource ID of INSYS, required when there is a link from INSYS to PSYS. */ #define IPU_PSYS_NODE_RSRC_ID_IS (0xFEU) /** * Special node resource ID to identify a generic external node. Required * when there is a link to/from IPU and that node. */ #define IPU_PSYS_NODE_RSRC_ID_EXT_IP (0xFFU) int ipu7_fw_psys_init(struct ipu7_psys *psys) { struct ipu7_bus_device *adev = psys->adev; struct device *dev = &adev->auxdev.dev; struct ipu7_syscom_context *syscom; struct ipu7_psys_config *psys_config; struct syscom_queue_config *queue_configs; dma_addr_t psys_config_dma_addr; u32 freq; int i, num_queues, ret; /* Allocate and init syscom context. */ syscom = devm_kzalloc(dev, sizeof(struct ipu7_syscom_context), GFP_KERNEL); if (!syscom) return -ENOMEM; adev->syscom = syscom; syscom->num_input_queues = FWPS_MSG_ABI_MAX_INPUT_QUEUES; syscom->num_output_queues = FWPS_MSG_ABI_MAX_OUTPUT_QUEUES; num_queues = syscom->num_input_queues + syscom->num_output_queues; queue_configs = devm_kzalloc(dev, FW_QUEUE_CONFIG_SIZE(num_queues), GFP_KERNEL); if (!queue_configs) { ipu7_fw_psys_release(psys); return -ENOMEM; } syscom->queue_configs = queue_configs; queue_configs[FWPS_MSG_ABI_OUT_ACK_QUEUE_ID].max_capacity = IPU_PSYS_ACK_QUEUE_SIZE; queue_configs[FWPS_MSG_ABI_OUT_ACK_QUEUE_ID].token_size_in_bytes = IPU_PSYS_OUT_MSG_SIZE; queue_configs[FWPS_MSG_ABI_OUT_LOG_QUEUE_ID].max_capacity = IPU_PSYS_LOG_QUEUE_SIZE; queue_configs[FWPS_MSG_ABI_OUT_LOG_QUEUE_ID].token_size_in_bytes = IPU_PSYS_OUT_MSG_SIZE; queue_configs[FWPS_MSG_ABI_IN_DEV_QUEUE_ID].max_capacity = IPU_PSYS_CMD_QUEUE_SIZE; queue_configs[FWPS_MSG_ABI_IN_DEV_QUEUE_ID].token_size_in_bytes = FWPS_MSG_HOST2FW_MAX_SIZE; queue_configs[FWPS_MSG_ABI_IN_RESERVED_QUEUE_ID].max_capacity = 0; queue_configs[FWPS_MSG_ABI_IN_RESERVED_QUEUE_ID].token_size_in_bytes = 0; for (i = FWPS_MSG_ABI_IN_FIRST_TASK_QUEUE_ID; i < num_queues; i++) { queue_configs[i].max_capacity = IPU_PSYS_TASK_QUEUE_SIZE; queue_configs[i].token_size_in_bytes = sizeof(struct ia_gofo_msg_indirect); } /* Allocate PSYS subsys config. */ psys_config = dma_alloc_attrs(dev, sizeof(struct ipu7_psys_config), &psys_config_dma_addr, GFP_KERNEL, 0); if (!psys_config) { dev_err(dev, "Failed to allocate psys subsys config.\n"); ipu7_fw_psys_release(psys); return -ENOMEM; } psys->subsys_config = psys_config; psys->subsys_config_dma_addr = psys_config_dma_addr; memset(psys_config, 0, sizeof(struct ipu7_psys_config)); ret = ipu7_buttress_get_psys_freq(adev->isp, &freq); if (ret) { dev_err(dev, "Failed to get PSYS frequency.\n"); ipu7_fw_psys_release(psys); return ret; } ret = ipu7_boot_init_boot_config(adev, queue_configs, num_queues, freq, psys_config_dma_addr); if (ret) ipu7_fw_psys_release(psys); return ret; } void ipu7_fw_psys_release(struct ipu7_psys *psys) { struct ipu7_bus_device *adev = psys->adev; ipu7_boot_release_boot_config(adev); if (psys->subsys_config) { dma_free_attrs(&adev->auxdev.dev, sizeof(struct ipu7_psys_config), psys->subsys_config, psys->subsys_config_dma_addr, 0); psys->subsys_config = NULL; psys->subsys_config_dma_addr = 0; } } static int ipu7_fw_dev_ready(struct ipu7_psys *psys, u16 type) { const struct ia_gofo_msg_header_ack *ack_header; u8 buffer[FWPS_MSG_FW2HOST_MAX_SIZE]; int rval; rval = ipu7_fw_psys_event_handle(psys, buffer); if (rval) return rval; ack_header = (const struct ia_gofo_msg_header_ack *)buffer; if (ack_header->header.tlv_header.tlv_type == type) return 0; return -EAGAIN; } static int ipu7_fw_dev_open(struct ipu7_psys *psys) { struct ipu7_syscom_context *ctx = psys->adev->syscom; struct ipu7_msg_dev_open *token; dev_dbg(&psys->dev, "send_token: fw psys open\n"); token = ipu7_syscom_get_token(ctx, FWPS_MSG_ABI_IN_DEV_QUEUE_ID); if (!token) return -ENODATA; token->header.tlv_header.tlv_type = TLV_TYPE(IPU_MSG_TYPE_DEV_OPEN); token->header.tlv_header.tlv_len32 = TLV_SIZE(sizeof(*token)); token->header.user_token = 0; token->max_graphs = IPU_PSYS_MAX_GRAPH_NUMS; token->dev_msg_map = (u8)(IPU_MSG_DEVICE_OPEN_SEND_RESP | IPU_MSG_DEVICE_OPEN_SEND_IRQ); token->enable_power_gating = 0; ipu7_syscom_put_token(ctx, FWPS_MSG_ABI_IN_DEV_QUEUE_ID); ipu7_buttress_wakeup_ps_uc(psys->adev->isp); return 0; } int ipu7_fw_psys_open(struct ipu7_psys *psys) { u32 retry = IPU_PSYS_OPEN_CLOSE_RETRY; int rval; rval = ipu7_fw_dev_open(psys); if (rval) { dev_err(&psys->dev, "failed to open PSYS dev.\n"); return rval; } psys->dev_state = IPU_MSG_DEV_STATE_OPEN_WAIT; do { usleep_range(IPU_PSYS_OPEN_CLOSE_TIMEOUT_US, IPU_PSYS_OPEN_CLOSE_TIMEOUT_US + 10); rval = ipu7_fw_dev_ready(psys, IPU_MSG_TYPE_DEV_OPEN_ACK); if (!rval) { dev_dbg(&psys->dev, "dev open done.\n"); psys->dev_state = IPU_MSG_DEV_STATE_OPEN; return 0; } } while (retry--); if (!retry) dev_err(&psys->dev, "wait dev open timeout!\n"); return rval; } static int ipu7_fw_dev_close(struct ipu7_psys *psys) { struct ipu7_syscom_context *ctx = psys->adev->syscom; struct ipu7_msg_dev_close *token; dev_dbg(&psys->dev, "send_token: fw psys close\n"); token = ipu7_syscom_get_token(ctx, FWPS_MSG_ABI_IN_DEV_QUEUE_ID); if (!token) return -ENODATA; token->header.tlv_header.tlv_type = TLV_TYPE(IPU_MSG_TYPE_DEV_CLOSE); token->header.tlv_header.tlv_len32 = TLV_SIZE(sizeof(*token)); token->header.user_token = 0; token->dev_msg_map = (u8)(IPU_MSG_DEVICE_CLOSE_SEND_RESP | IPU_MSG_DEVICE_CLOSE_SEND_IRQ); ipu7_syscom_put_token(ctx, FWPS_MSG_ABI_IN_DEV_QUEUE_ID); ipu7_buttress_wakeup_ps_uc(psys->adev->isp); return 0; } void ipu7_fw_psys_close(struct ipu7_psys *psys) { u32 retry = IPU_PSYS_OPEN_CLOSE_RETRY; int rval; rval = ipu7_fw_dev_close(psys); if (rval) { dev_err(&psys->dev, "failed to close PSYS dev.\n"); return; } psys->dev_state = IPU_MSG_DEV_STATE_CLOSE_WAIT; do { usleep_range(IPU_PSYS_OPEN_CLOSE_TIMEOUT_US, IPU_PSYS_OPEN_CLOSE_TIMEOUT_US + 10); rval = ipu7_fw_dev_ready(psys, IPU_MSG_TYPE_DEV_CLOSE_ACK); if (!rval) { dev_dbg(&psys->dev, "dev close done.\n"); psys->dev_state = IPU_MSG_DEV_STATE_CLOSED; return; } } while (retry--); if (!retry) dev_err(&psys->dev, "wait dev close timeout!\n"); } static void ipu7_fw_psys_build_node_profile(const struct node_profile *profile, void **buf_ptr_ptr) { struct ipu7_msg_cb_profile *cb_profile = (struct ipu7_msg_cb_profile *)*buf_ptr_ptr; u16 buf_size = sizeof(*cb_profile); memcpy(cb_profile->profile_base.teb, profile->teb, sizeof(cb_profile->profile_base.teb)); memcpy(cb_profile->rbm, profile->rbm, sizeof(cb_profile->rbm)); memcpy(cb_profile->deb, profile->deb, sizeof(cb_profile->deb)); memcpy(cb_profile->reb, profile->reb, sizeof(cb_profile->reb)); cb_profile->profile_base.tlv_header.tlv_type = TLV_TYPE(IPU_MSG_NODE_PROFILE_TYPE_CB); cb_profile->profile_base.tlv_header.tlv_len32 = TLV_SIZE(buf_size); *buf_ptr_ptr += buf_size; } /* skip term, return false */ static bool ipu7_fw_psys_build_node_term(const struct node_ternimal *term, void **buf_ptr_ptr) { struct ipu7_msg_term *msg_term = (struct ipu7_msg_term *)*buf_ptr_ptr; u16 buf_size = sizeof(*msg_term); memset(msg_term, 0, sizeof(*msg_term)); /* TODO: check on TEB on the skipped terminals */ if (!term->term_id && !term->buf_size) return false; msg_term->term_id = term->term_id; /* Disable progress message on connect terminals */ msg_term->event_req_bm = 0U; msg_term->payload_size = term->buf_size; msg_term->tlv_header.tlv_type = TLV_TYPE(IPU_MSG_TERM_TYPE_BASE); msg_term->tlv_header.tlv_len32 = TLV_SIZE(buf_size); *buf_ptr_ptr += buf_size; return true; } /* When skip processing node, just return false */ static bool ipu7_fw_psys_build_node(const struct graph_node *node, void **buf_ptr_ptr) { struct ipu7_msg_node *msg_node = (struct ipu7_msg_node *)*buf_ptr_ptr; u16 buf_size = sizeof(*msg_node); bool ret = false; u8 i = 0; memset(msg_node, 0, sizeof(*msg_node)); /** * Pass node info to FW, do not check for external IP and ISYS * As FW expects a external node */ if (node->node_rsrc_id != IPU_PSYS_NODE_RSRC_ID_IS && node->node_rsrc_id != IPU_PSYS_NODE_RSRC_ID_EXT_IP) { if (node->profiles[0].teb[0] == 0U) return false; } /** * Sanity check for dummy node, TEB should set to required one */ if (node->node_rsrc_id == IPU_PSYS_NODE_RSRC_ID_IS || node->node_rsrc_id == IPU_PSYS_NODE_RSRC_ID_EXT_IP) { if (node->profiles[0].teb[0] != IPU_MSG_NODE_DONT_CARE_TEB_LO || node->profiles[0].teb[1] != IPU_MSG_NODE_DONT_CARE_TEB_HI) return false; } msg_node->node_rsrc_id = node->node_rsrc_id; msg_node->node_ctx_id = node->node_ctx_id; msg_node->num_frags = 1; /* No fragment support */ *buf_ptr_ptr += buf_size; msg_node->profiles_list.head_offset = (u16)((uintptr_t)*buf_ptr_ptr - (uintptr_t)&msg_node->profiles_list); for (i = 0; i < ARRAY_SIZE(node->profiles); i++) { ipu7_fw_psys_build_node_profile(&node->profiles[i], buf_ptr_ptr); msg_node->profiles_list.num_elems++; } msg_node->terms_list.head_offset = (u16)((uintptr_t)*buf_ptr_ptr - (uintptr_t)&msg_node->terms_list); for (i = 0; i < ARRAY_SIZE(node->terminals); i++) { ret = ipu7_fw_psys_build_node_term(&node->terminals[i], buf_ptr_ptr); if (ret) msg_node->terms_list.num_elems++; } buf_size = (u32)(uintptr_t)*buf_ptr_ptr - (uintptr_t)msg_node; msg_node->tlv_header.tlv_type = TLV_TYPE(IPU_MSG_NODE_TYPE_BASE); msg_node->tlv_header.tlv_len32 = TLV_SIZE(buf_size); return true; } static bool ipu7_fw_psys_build_link(const struct graph_link *link, void **buf_ptr_ptr) { struct ipu7_msg_link *msg_link = (struct ipu7_msg_link *)*buf_ptr_ptr; if (!link->ep_src.node_ctx_id && !link->ep_dst.node_ctx_id && !link->ep_src.term_id && !link->ep_dst.term_id) return false; msg_link->endpoints.ep_src.node_ctx_id = link->ep_src.node_ctx_id; msg_link->endpoints.ep_src.term_id = link->ep_src.term_id; msg_link->endpoints.ep_dst.node_ctx_id = link->ep_dst.node_ctx_id; msg_link->endpoints.ep_dst.term_id = link->ep_dst.term_id; msg_link->foreign_key = link->foreign_key; msg_link->streaming_mode = link->streaming_mode; msg_link->pbk_id = link->pbk_id; msg_link->pbk_slot_id = link->pbk_slot_id; msg_link->delayed_link = link->delayed_link; *buf_ptr_ptr += sizeof(*msg_link); msg_link->link_options.num_elems = 0; msg_link->link_options.head_offset = (u16)((uintptr_t)*buf_ptr_ptr - (uintptr_t)&msg_link->link_options); msg_link->tlv_header.tlv_type = TLV_TYPE(IPU_MSG_LINK_TYPE_GENERIC); msg_link->tlv_header.tlv_len32 = TLV_SIZE(sizeof(*msg_link)); return true; } int ipu7_fw_psys_graph_open(const struct ipu_psys_graph_info *graph, struct ipu7_psys *psys, struct ipu7_psys_stream *ip) { struct ipu7_syscom_context *ctx = psys->adev->syscom; void *buf_ptr; struct ipu7_msg_graph_open *graph_open; u32 buf_size = 0; bool ret = false; u8 i = 0; dev_dbg(&psys->dev, "send_token: fw psys graph open\n"); buf_ptr = ipu7_syscom_get_token(ctx, FWPS_MSG_ABI_IN_DEV_QUEUE_ID); if (!buf_ptr) return -ENODATA; graph_open = (struct ipu7_msg_graph_open *)buf_ptr; memset(graph_open, 0, sizeof(*graph_open)); graph_open->graph_id = ip->graph_id; graph_open->graph_msg_map = (u8)(IPU_MSG_GRAPH_OPEN_SEND_RESP | IPU_MSG_GRAPH_OPEN_SEND_IRQ); buf_ptr += sizeof(*graph_open); graph_open->nodes.head_offset = (u16)((uintptr_t)buf_ptr - (uintptr_t)&graph_open->nodes); for (i = 0; i < ARRAY_SIZE(ip->nodes); i++) { ret = ipu7_fw_psys_build_node(&ip->nodes[i], &buf_ptr); if (ret) graph_open->nodes.num_elems++; } graph_open->links.head_offset = (u16)((uintptr_t)buf_ptr - (uintptr_t)&graph_open->links); for (i = 0; i < ARRAY_SIZE(graph->links); i++) { ret = ipu7_fw_psys_build_link(&graph->links[i], &buf_ptr); if (ret) graph_open->links.num_elems++; } buf_size = (u32)((uintptr_t)buf_ptr - (uintptr_t)graph_open); graph_open->header.tlv_header.tlv_type = TLV_TYPE(IPU_MSG_TYPE_GRAPH_OPEN); graph_open->header.tlv_header.tlv_len32 = TLV_SIZE(buf_size); graph_open->header.user_token = 0; ipu7_syscom_put_token(ctx, FWPS_MSG_ABI_IN_DEV_QUEUE_ID); ipu7_buttress_wakeup_ps_uc(psys->adev->isp); return 0; } int ipu7_fw_psys_graph_close(u8 graph_id, struct ipu7_psys *psys) { struct ipu7_syscom_context *ctx = psys->adev->syscom; struct ipu7_msg_graph_close *token; dev_dbg(&psys->dev, "send_token: fw psys graph close\n"); token = ipu7_syscom_get_token(ctx, FWPS_MSG_ABI_IN_DEV_QUEUE_ID); if (!token) return -ENODATA; token->header.tlv_header.tlv_type = TLV_TYPE(IPU_MSG_TYPE_GRAPH_CLOSE); token->header.tlv_header.tlv_len32 = TLV_SIZE(sizeof(*token)); token->header.user_token = 0; token->graph_id = graph_id; token->graph_msg_map = (u8)(IPU_MSG_DEVICE_CLOSE_SEND_RESP | IPU_MSG_DEVICE_CLOSE_SEND_IRQ); ipu7_syscom_put_token(ctx, FWPS_MSG_ABI_IN_DEV_QUEUE_ID); ipu7_buttress_wakeup_ps_uc(psys->adev->isp); return 0; } int ipu7_fw_psys_task_request(const struct ipu_psys_task_request *task, struct ipu7_psys_stream *ip, struct ipu_psys_task_queue *tq, struct ipu7_psys *psys) { struct ipu7_syscom_context *ctx = psys->adev->syscom; struct ipu7_msg_task *msg = tq->msg_task; struct ia_gofo_msg_indirect *ind; u32 node_q_id = ip->q_id[task->node_ctx_id]; u32 teb_hi, teb_lo; u64 teb; u8 i, term_id; u8 num_terms; ind = ipu7_syscom_get_token(ctx, node_q_id); if (!ind) return -ENODATA; memset(msg, 0, sizeof(*msg)); msg->graph_id = task->graph_id; msg->node_ctx_id = task->node_ctx_id; msg->profile_idx = 0U; /* Only one profile on HKR */ msg->frame_id = task->frame_id; msg->frag_id = 0U; /* No frag, set to 0 */ /* * Each task has a flag indicating if ack needed, it may be used to * reduce interrupts if multiple CBs supported. */ msg->req_done_msg = 1; msg->req_done_irq = 1; memcpy(msg->payload_reuse_bm, task->payload_reuse_bm, sizeof(task->payload_reuse_bm)); teb_hi = ip->nodes[msg->node_ctx_id].profiles[0].teb[1]; teb_lo = ip->nodes[msg->node_ctx_id].profiles[0].teb[0]; teb = (teb_lo | (((u64)teb_hi) << 32)); num_terms = ip->nodes[msg->node_ctx_id].num_terms; for (i = 0U; i < num_terms; i++) { term_id = tq->task_buffers[i].term_id; if (BIT64(term_id) & teb) msg->term_buffers[term_id] = tq->ipu7_addr[i]; } msg->header.tlv_header.tlv_type = TLV_TYPE(IPU_MSG_TYPE_TASK_REQ); msg->header.tlv_header.tlv_len32 = TLV_SIZE(sizeof(*msg)); msg->header.user_token = (u64)tq; ind->header.tlv_header.tlv_type = TLV_TYPE(IPU_MSG_TYPE_INDIRECT); ind->header.tlv_header.tlv_len32 = TLV_SIZE(sizeof(*ind)); ind->header.msg_options.num_elems = 0; ind->header.msg_options.head_offset = 0; ind->ref_header = msg->header.tlv_header; ind->ref_msg_ptr = tq->task_dma_addr; ipu7_syscom_put_token(ctx, node_q_id); ipu7_buttress_wakeup_ps_uc(psys->adev->isp); return 0; } int ipu7_fw_psys_event_handle(struct ipu7_psys *psys, u8 *buf_ptr) { struct ipu7_syscom_context *ctx = psys->adev->syscom; void *token; token = ipu7_syscom_get_token(ctx, FWPS_MSG_ABI_OUT_ACK_QUEUE_ID); if (!token) return -ENODATA; memcpy(buf_ptr, token, sizeof(u8) * FWPS_MSG_FW2HOST_MAX_SIZE); ipu7_syscom_put_token(ctx, FWPS_MSG_ABI_OUT_ACK_QUEUE_ID); return 0; } int ipu7_fw_psys_get_log(struct ipu7_psys *psys) { void *token; struct ia_gofo_msg_log *log_msg; u8 msg_type, msg_len; u32 count, fmt_id; struct device *dev = &psys->adev->auxdev.dev; struct psys_fw_log *fw_log = psys->fw_log; token = ipu7_syscom_get_token(psys->adev->syscom, FWPS_MSG_ABI_OUT_LOG_QUEUE_ID); if (!token) return -ENODATA; while (token) { log_msg = (struct ia_gofo_msg_log *)token; msg_type = log_msg->header.tlv_header.tlv_type; msg_len = log_msg->header.tlv_header.tlv_len32; if (msg_type != IPU_MSG_TYPE_DEV_LOG || !msg_len) dev_warn(dev, "Invalid msg data from Log queue!\n"); count = log_msg->log_info_ts.log_info.log_counter; fmt_id = log_msg->log_info_ts.log_info.fmt_id; if (count > fw_log->count + 1) dev_warn(dev, "log msg lost, count %u+1 != %u!\n", count, fw_log->count); if (fmt_id == IA_GOFO_MSG_LOG_FMT_ID_INVALID) { dev_err(dev, "invalid log msg fmt_id 0x%x!\n", fmt_id); ipu7_syscom_put_token(psys->adev->syscom, FWPS_MSG_ABI_OUT_LOG_QUEUE_ID); mutex_unlock(&fw_log->mutex); return -EIO; } memcpy(fw_log->head, (void *)&log_msg->log_info_ts, sizeof(struct ia_gofo_msg_log_info_ts)); fw_log->count = count; fw_log->head += sizeof(struct ia_gofo_msg_log_info_ts); fw_log->size += sizeof(struct ia_gofo_msg_log_info_ts); ipu7_syscom_put_token(psys->adev->syscom, FWPS_MSG_ABI_OUT_LOG_QUEUE_ID); token = ipu7_syscom_get_token(psys->adev->syscom, FWPS_MSG_ABI_OUT_LOG_QUEUE_ID); }; return 0; } ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/psys/ipu7-fw-psys.h000066400000000000000000000023251465530421300277170ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2016 - 2024 Intel Corporation */ #ifndef IPU7_FW_PSYS_H #define IPU7_FW_PSYS_H #include "abi/ipu7_fw_common_abi.h" #include "abi/ipu7_fw_msg_abi.h" #include "ipu7-syscom.h" #include // TODO: use FW configuration replace it #define IPU_PSYS_MAX_GRAPH_NUMS (8U) struct ipu7_msg_to_str { const enum ipu7_msg_type type; const char *msg; }; struct ipu7_psys; struct ipu7_psys_stream; struct ipu_psys_task_queue; int ipu7_fw_psys_init(struct ipu7_psys *psys); void ipu7_fw_psys_release(struct ipu7_psys *psys); int ipu7_fw_psys_open(struct ipu7_psys *psys); void ipu7_fw_psys_close(struct ipu7_psys *psys); int ipu7_fw_psys_graph_open(const struct ipu_psys_graph_info *graph, struct ipu7_psys *psys, struct ipu7_psys_stream *ip); int ipu7_fw_psys_graph_close(u8 graph_id, struct ipu7_psys *psys); int ipu7_fw_psys_task_request(const struct ipu_psys_task_request *task, struct ipu7_psys_stream *ip, struct ipu_psys_task_queue *tq, struct ipu7_psys *psys); int ipu7_fw_psys_event_handle(struct ipu7_psys *psys, u8 *buf_ptr); int ipu7_fw_psys_get_log(struct ipu7_psys *psys); #endif /* IPU7_FW_PSYS_H */ ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/psys/ipu7-psys.c000066400000000000000000000260611465530421300273030ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2020 - 2024 Intel Corporation #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ipu7.h" #include "ipu7-bus.h" #include "ipu7-buttress-regs.h" #include "ipu7-psys.h" #include "ipu7-platform-regs.h" #include "ipu7-fw-psys.h" static bool early_pg_transfer; module_param(early_pg_transfer, bool, 0664); MODULE_PARM_DESC(early_pg_transfer, "Copy PGs back to user after resource allocation"); bool enable_power_gating = true; module_param(enable_power_gating, bool, 0664); MODULE_PARM_DESC(enable_power_gating, "enable power gating"); #define DOMAIN_POWE_TIMEOUT_US (200 * USEC_PER_MSEC) static const struct ipu7_msg_to_str ps_fw_msg[] = { {IPU_MSG_TYPE_RESERVED, "IPU_MSG_TYPE_RESERVED"}, {IPU_MSG_TYPE_INDIRECT, "IPU_MSG_TYPE_INDIRECT"}, {IPU_MSG_TYPE_DEV_LOG, "IPU_MSG_TYPE_DEV_LOG"}, {IPU_MSG_TYPE_GENERAL_ERR, "IPU_MSG_TYPE_GENERAL_ERR"}, {IPU_MSG_TYPE_DEV_OPEN, "IPU_MSG_TYPE_DEV_OPEN"}, {IPU_MSG_TYPE_DEV_OPEN_ACK, "IPU_MSG_TYPE_DEV_OPEN_ACK"}, {IPU_MSG_TYPE_GRAPH_OPEN, "IPU_MSG_TYPE_GRAPH_OPEN"}, {IPU_MSG_TYPE_GRAPH_OPEN_ACK, "IPU_MSG_TYPE_GRAPH_OPEN_ACK"}, {IPU_MSG_TYPE_TASK_REQ, "IPU_MSG_TYPE_TASK_REQ"}, {IPU_MSG_TYPE_TASK_DONE, "IPU_MSG_TYPE_TASK_DONE"}, {IPU_MSG_TYPE_GRAPH_CLOSE, "IPU_MSG_TYPE_GRAPH_CLOSE"}, {IPU_MSG_TYPE_GRAPH_CLOSE_ACK, "IPU_MSG_TYPE_GRAPH_CLOSE_ACK"}, {IPU_MSG_TYPE_DEV_CLOSE, "IPU_MSG_TYPE_DEV_CLOSE"}, {IPU_MSG_TYPE_DEV_CLOSE_ACK, "IPU_MSG_TYPE_DEV_CLOSE_ACK"}, {IPU_MSG_TYPE_N, "IPU_MSG_TYPE_N"}, }; void ipu7_psys_subdomains_power(struct ipu7_psys *psys, bool on) { struct device *dev = &psys->adev->auxdev.dev; u32 val, req; int ret; void __iomem *base = psys->adev->isp->base; /* power domain req */ req = on ? IPU_PSYS_DOMAIN_POWER_MASK : 0; val = readl(base + BUTTRESS_REG_PS_DOMAINS_STATUS); dev_dbg(dev, "power %s psys sub-domains. status: 0x%x\n", on ? "UP" : "DOWN", val); if ((val & IPU_PSYS_DOMAIN_POWER_MASK) == req) { dev_warn(dev, "psys sub-domains power already in request state.\n"); return; } writel(req, base + BUTTRESS_REG_PS_WORKPOINT_DOMAIN_REQ); ret = readl_poll_timeout(base + BUTTRESS_REG_PS_DOMAINS_STATUS, val, !(val & IPU_PSYS_DOMAIN_POWER_IN_PROGRESS) && ((val & IPU_PSYS_DOMAIN_POWER_MASK) == req), 100, DOMAIN_POWE_TIMEOUT_US); if (ret) dev_err(dev, "Psys sub-domains power %s timeout! status: 0x%x\n", on ? "UP" : "DOWN", val); } void ipu7_psys_setup_hw(struct ipu7_psys *psys) { void __iomem *base = psys->pdata->base; u32 val = IRQ_FROM_LOCAL_FW; /* Enable TO_SW IRQ from FW */ writel(val, base + IPU_REG_PSYS_TO_SW_IRQ_CNTL_CLEAR); writel(val, base + IPU_REG_PSYS_TO_SW_IRQ_CNTL_MASK); writel(val, base + IPU_REG_PSYS_TO_SW_IRQ_CNTL_ENABLE); /* correct the initial printf configuration */ writel(0x2, base + PS_UC_CTRL_BASE + PRINTF_AXI_CNTL); } static struct ipu7_psys_stream* ipu7_psys_get_ip_from_fh(struct ipu7_psys *psys, u8 graph_id) { struct ipu7_psys_fh *fh; list_for_each_entry(fh, &psys->fhs, list) { if (fh->ip->graph_id == graph_id) return fh->ip; } return NULL; } static void ipu7_psys_handle_graph_open_ack(struct ipu7_psys *psys, const void *buffer) { const struct ipu7_msg_graph_open_ack *ack_msg = (const struct ipu7_msg_graph_open_ack *)buffer; const struct ia_gofo_msg_header_ack *ack_header = &ack_msg->header; struct ipu7_psys_stream *ip; struct device *dev = &psys->dev; const struct ia_gofo_tlv_header *msg_tlv_item; u16 num_items; u16 head_offset; u32 i; dev_dbg(dev, "[ACK]%s: graph_id: %d\n", __func__, ack_msg->graph_id); if (ack_msg->graph_id > (u8)IPU_PSYS_MAX_GRAPH_NUMS) { dev_err(dev, "%s: graph id %d\n", __func__, ack_msg->graph_id); return; } ip = ipu7_psys_get_ip_from_fh(psys, ack_msg->graph_id); if (!ip) { dev_err(dev, "%s: graph id %d\n", __func__, ack_msg->graph_id); return; } if (ip->graph_state != IPU_MSG_GRAPH_STATE_OPEN_WAIT) { dev_err(dev, "%s state %d\n", __func__, ip->graph_state); goto open_graph_exit; } num_items = ack_header->header.msg_options.num_elems; if (!num_items) { dev_err(dev, "%s, num_items is 0\n", __func__); goto open_graph_exit; } head_offset = ack_header->header.msg_options.head_offset; msg_tlv_item = (const struct ia_gofo_tlv_header *) ((u8 *)&ack_header->header.msg_options + head_offset); if (!msg_tlv_item) { dev_err(dev, "%s: failed to get tlv item\n", __func__); goto open_graph_exit; } for (i = 0U; i < num_items; i++) { u32 option_type = msg_tlv_item->tlv_type; u32 option_bytes = msg_tlv_item->tlv_len32 * TLV_ITEM_ALIGNMENT; struct ipu7_msg_graph_open_ack_task_q_info *msg_option = (struct ipu7_msg_graph_open_ack_task_q_info *)msg_tlv_item; switch (option_type) { case IPU_MSG_GRAPH_ACK_TASK_Q_INFO: /* * Should do check that: * - Each managed node has a queue ID * - Queue ID's are sane */ dev_dbg(dev, "[ACK]set node_ctx_id %d q_id %d\n", msg_option->node_ctx_id, msg_option->q_id); ip->q_id[msg_option->node_ctx_id] = msg_option->q_id; break; default: /* * Only one option supported */ dev_err(dev, "not supported %u\n", option_type); break; } msg_tlv_item = (struct ia_gofo_tlv_header *) (((u8 *)msg_tlv_item) + option_bytes); } ip->graph_state = IPU_MSG_GRAPH_STATE_OPEN; open_graph_exit: complete(&ip->graph_open); } static int ipu7_psys_handle_task_done(struct ipu7_psys *psys, void *buffer, u32 error) { const struct ipu7_msg_task_done *ack_msg = (const struct ipu7_msg_task_done *)buffer; const struct ia_gofo_msg_header_ack *ack_header = &ack_msg->header; const struct ia_gofo_msg_header *msg_header = &ack_header->header; struct ipu_psys_task_queue *task_queue; struct device *dev = &psys->dev; struct ipu7_psys_stream *ip; u32 i, index_id = 0; if (ack_msg->graph_id > (u8)IPU_PSYS_MAX_GRAPH_NUMS) { dev_err(dev, "%s: graph id %d\n", __func__, ack_msg->graph_id); return -EINVAL; } ip = ipu7_psys_get_ip_from_fh(psys, ack_msg->graph_id); if (!ip) { dev_err(dev, "%s: graph id %d\n", __func__, ack_msg->graph_id); return -EINVAL; } mutex_lock(&ip->event_mutex); task_queue = (struct ipu_psys_task_queue *)msg_header->user_token; if (!task_queue) { dev_err(dev, "%s: task_token is NULL\n", __func__); mutex_unlock(&ip->event_mutex); return -EINVAL; } if (task_queue->task_state != IPU_MSG_TASK_STATE_WAIT_DONE) { dev_err(dev, "%s: task_queue %d graph %d node %d error %d\n", __func__, task_queue->index, ack_msg->graph_id, ack_msg->node_ctx_id, task_queue->task_state); mutex_unlock(&ip->event_mutex); return -ENOENT; } index_id = task_queue->index; task_queue->available = 1; task_queue->task_state = IPU_MSG_TASK_STATE_DONE; dev_dbg(dev, "%s: task_token(%p, %d)\n", __func__, task_queue, index_id); if (ip->event_queue[ip->event_write_index].available != 0U) { i = ip->event_write_index; ip->event_queue[i].graph_id = ack_msg->graph_id; ip->event_queue[i].node_ctx_id = ack_msg->node_ctx_id; ip->event_queue[i].frame_id = ack_msg->frame_id; ip->event_queue[i].err_code = error; ip->event_queue[i].available = 0U; ip->event_write_index = (i + 1U) % MAX_TASK_EVENT_QUEUE_SIZE; } else { dev_dbg(dev, "%s: event queue is full(%d)\n", __func__, MAX_TASK_EVENT_QUEUE_SIZE); } mutex_unlock(&ip->event_mutex); wake_up_interruptible(&ip->fh->wait); return 0; } static void ipu7_psys_handle_graph_close_ack(struct ipu7_psys *psys, void *buffer) { struct ipu7_msg_graph_close_ack *ack_msg = (struct ipu7_msg_graph_close_ack *)buffer; struct device *dev = &psys->dev; struct ipu7_psys_stream *ip; dev_dbg(dev, "[ACK]%s:graph_id: %d\n", __func__, ack_msg->graph_id); if (ack_msg->graph_id > (u8)IPU_PSYS_MAX_GRAPH_NUMS) { dev_err(dev, "%s: graph id %d\n", __func__, ack_msg->graph_id); return; } ip = ipu7_psys_get_ip_from_fh(psys, ack_msg->graph_id); if (!ip) { dev_err(dev, "%s: graph id %d\n", __func__, ack_msg->graph_id); return; } if (ip->graph_state != IPU_MSG_GRAPH_STATE_CLOSE_WAIT) { dev_err(dev, "%s state %d\n", __func__, ip->graph_state); goto graph_close_exit; } ip->graph_state = IPU_MSG_GRAPH_STATE_CLOSED; graph_close_exit: complete(&ip->graph_close); } void ipu7_psys_handle_events(struct ipu7_psys *psys) { const struct ia_gofo_msg_header_ack *ack_header; u8 buffer[FWPS_MSG_FW2HOST_MAX_SIZE]; struct device *dev = &psys->dev; u32 error = 0; int ret = 0; do { #ifdef ENABLE_FW_OFFLINE_LOGGER ipu7_fw_psys_get_log(psys); #endif ret = ipu7_fw_psys_event_handle(psys, buffer); if (ret) break; ack_header = (const struct ia_gofo_msg_header_ack *)buffer; dev_dbg(dev, "[ACK]%s: ack msg %s received\n", __func__, ps_fw_msg[ack_header->header.tlv_header.tlv_type].msg); if (!IA_GOFO_MSG_ERR_IS_OK(ack_header->err)) { dev_err(dev, "group %d, code %d, detail: %d, %d\n", ack_header->err.err_group, ack_header->err.err_code, ack_header->err.err_detail[0], ack_header->err.err_detail[1]); error = (ack_header->err.err_group == IPU_MSG_ERR_GROUP_TASK) ? IPU_PSYS_EVT_ERROR_FRAME : IPU_PSYS_EVT_ERROR_INTERNAL; } switch (ack_header->header.tlv_header.tlv_type) { case IPU_MSG_TYPE_GRAPH_OPEN_ACK: ipu7_psys_handle_graph_open_ack(psys, buffer); break; case IPU_MSG_TYPE_TASK_DONE: ipu7_psys_handle_task_done(psys, buffer, error); break; case IPU_MSG_TYPE_GRAPH_CLOSE_ACK: ipu7_psys_handle_graph_close_ack(psys, buffer); break; case IPU_MSG_TYPE_GENERAL_ERR: /* already printed the log above for general error */ break; default: dev_err(&psys->dev, "Unknown type %d\n", ack_header->header.tlv_header.tlv_type); } } while (1); } static int ipu7_psys_complete_event(struct ipu7_psys_stream *ip, struct ipu_psys_event *event) { u32 i; mutex_lock(&ip->event_mutex); /* Check if there is already an event in the queue */ i = ip->event_read_index; if (ip->event_queue[i].available == 1U) { mutex_unlock(&ip->event_mutex); return -EAGAIN; } ip->event_read_index = (i + 1U) % MAX_TASK_EVENT_QUEUE_SIZE; event->graph_id = ip->event_queue[i].graph_id; event->node_ctx_id = ip->event_queue[i].node_ctx_id; event->frame_id = ip->event_queue[i].frame_id; event->error = ip->event_queue[i].err_code; ip->event_queue[i].available = 1; mutex_unlock(&ip->event_mutex); dev_dbg(&ip->fh->psys->dev, "event %d graph %d cb %d frame %d dequeued", i, event->graph_id, event->node_ctx_id, event->frame_id); return 0; } long ipu7_ioctl_dqevent(struct ipu_psys_event *event, struct ipu7_psys_fh *fh, unsigned int f_flags) { struct ipu7_psys *psys = fh->psys; int rval = 0; dev_dbg(&psys->adev->auxdev.dev, "IOC_DQEVENT\n"); if (!(f_flags & O_NONBLOCK)) { rval = wait_event_interruptible(fh->wait, !ipu7_psys_complete_event(fh->ip, event)); if (rval == -ERESTARTSYS) return rval; } else { rval = ipu7_psys_complete_event(fh->ip, event); } return rval; } ipu7-drivers-0~git202407090310.f40d5a70/drivers/media/pci/intel/ipu7/psys/ipu7-psys.h000066400000000000000000000115361465530421300273110ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2013 - 2024 Intel Corporation */ #ifndef IPU7_PSYS_H #define IPU7_PSYS_H #include #include #include #include "ipu7.h" #include "ipu7-fw-psys.h" #define IPU_PSYS_WORK_QUEUE system_power_efficient_wq #define IPU_PSYS_CMD_QUEUE_SIZE 0x20 #define IPU_PSYS_TASK_QUEUE_SIZE 0x20 #define IPU_PSYS_ACK_QUEUE_SIZE 0x40 #define IPU_PSYS_LOG_QUEUE_SIZE 256 #define IPU_PSYS_OUT_MSG_SIZE 256 /** * Each event from FW will be first queued into a * event queue, define the queue depth here */ #define MAX_TASK_EVENT_QUEUE_SIZE 3 /** * Each task queue from user will be first queued into * a task queue, define the queue depth here */ #define MAX_TASK_REQUEST_QUEUE_SIZE 8 // TODO: use FW configuration to replace it #define IPU_PSYS_NUM_STREAMS 8 #define INVALID_STREAM_ID 0xFF /** * Task request queues per stream * * Each task will first assigned a task queue buffer here, * all the nodes will share the same task queue, maximum * queue will be full there. */ struct ipu_psys_task_queue { struct ipu_psys_term_buffers task_buffers[MAX_GRAPH_TERMINALS]; dma_addr_t ipu7_addr[MAX_GRAPH_TERMINALS]; u32 index; /* index of the task request buffer */ u8 available; /* if the task queue can be used or not */ struct ipu7_msg_task *msg_task; dma_addr_t task_dma_addr; /* task state of each task input, represent ipu7_msg_task_state */ enum ipu7_msg_task_state task_state; }; struct psys_fw_log { struct mutex mutex; /* protect whole struct */ void *head; void *addr; u32 count; /* running counter of log */ u32 size; /* actual size of log content, in bits */ }; /** * Task quest event context * * Each task request should get its event ack from FW and save * to this structure and for user dequeue purpose. */ struct ipu_psys_task_ack { u8 graph_id; /* graph id of the task request */ u8 node_ctx_id; /* logical node id */ u8 frame_id; /* frame id of the original task request */ u8 available; /* if the task ack slot can be used */ u32 err_code; /* error indication to user */ }; /** * stream here is equal to pipe, each stream has * its dedicated graph_id, and task request queue. * * For multiple stream supported design. */ struct ipu7_psys_stream { struct ipu7_psys_fh *fh; u8 graph_id; /* graph_id on this stream */ /* Serialize task done queue */ struct mutex event_mutex; /* current event queue write index, incremental */ u32 event_write_index; /* current event queue read index, incremental */ u32 event_read_index; struct ipu_psys_task_ack event_queue[MAX_TASK_EVENT_QUEUE_SIZE]; /* Serialize task queue */ struct mutex task_mutex; struct ipu_psys_task_queue task_queue[MAX_TASK_REQUEST_QUEUE_SIZE]; u8 num_nodes; /* Number of enabled nodes */ struct graph_node nodes[MAX_GRAPH_NODES]; u8 q_id[MAX_GRAPH_NODES]; /* syscom input queue id assigned by fw */ struct completion graph_open; struct completion graph_close; /* Graph state, represent enum ipu7_msg_graph_state */ enum ipu7_msg_graph_state graph_state; }; struct task_struct; struct ipu7_psys { struct ipu_psys_capability caps; struct cdev cdev; struct device dev; struct mutex mutex; /* Psys various */ int ready; /* psys fw status */ bool icache_prefetch_sp; bool icache_prefetch_isp; spinlock_t ready_lock; /* protect psys firmware state */ struct list_head fhs; struct ipu7_psys_pdata *pdata; struct ipu7_bus_device *adev; #ifdef CONFIG_DEBUG_FS struct dentry *debugfsdir; #endif unsigned long timeout; struct psys_fw_log *fw_log; int power_gating; /* available graph_id range is 0 ~ IPU_PSYS_NUM_STREAMS - 1 */ u8 graph_id[IPU_PSYS_NUM_STREAMS]; /* Device state, represent enum ipu7_msg_dev_state */ enum ipu7_msg_dev_state dev_state; struct ipu7_psys_config *subsys_config; dma_addr_t subsys_config_dma_addr; }; struct ipu7_psys_fh { struct ipu7_psys *psys; struct mutex mutex; /* Protects bufmap & kcmds fields */ struct list_head list; struct list_head bufmap; wait_queue_head_t wait; struct ipu7_psys_stream *ip; }; struct ipu7_dma_buf_attach { struct device *dev; u64 len; void *userptr; struct sg_table *sgt; bool vma_is_io; struct page **pages; size_t npages; }; struct ipu7_psys_kbuffer { u64 len; void *userptr; u32 flags; int fd; void *kaddr; struct list_head list; dma_addr_t dma_addr; struct sg_table *sgt; struct dma_buf_attachment *db_attach; struct dma_buf *dbuf; bool valid; /* True when buffer is usable */ }; #define inode_to_ipu_psys(inode) \ container_of((inode)->i_cdev, struct ipu7_psys, cdev) void ipu7_psys_setup_hw(struct ipu7_psys *psys); void ipu7_psys_subdomains_power(struct ipu7_psys *psys, bool on); void ipu7_psys_handle_events(struct ipu7_psys *psys); long ipu7_ioctl_dqevent(struct ipu_psys_event *event, struct ipu7_psys_fh *fh, unsigned int f_flags); #endif /* IPU7_PSYS_H */ ipu7-drivers-0~git202407090310.f40d5a70/include/000077500000000000000000000000001465530421300203705ustar00rootroot00000000000000ipu7-drivers-0~git202407090310.f40d5a70/include/media/000077500000000000000000000000001465530421300214475ustar00rootroot00000000000000ipu7-drivers-0~git202407090310.f40d5a70/include/media/ipu7-isys.h000066400000000000000000000020461465530421300234730ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2014 - 2022 Intel Corporation */ #ifndef MEDIA_IPU_ISYS_H #define MEDIA_IPU_ISYS_H #include #include #include #include struct ipu7_isys_csi2_config { unsigned int nlanes; unsigned int port; }; struct ipu7_isys_subdev_i2c_info { struct i2c_board_info board_info; int i2c_adapter_id; char i2c_adapter_bdf[32]; }; struct ipu7_isys_subdev_info { struct ipu7_isys_csi2_config *csi2; struct ipu7_isys_subdev_i2c_info i2c; }; struct ipu7_isys_clk_mapping { struct clk_lookup clkdev_data; char *platform_clock_name; }; struct ipu7_isys_subdev_pdata { struct ipu7_isys_subdev_info **subdevs; struct ipu7_isys_clk_mapping *clk_map; }; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) struct sensor_async_subdev { struct v4l2_async_subdev asd; struct ipu7_isys_csi2_config csi2; }; #else struct sensor_async_sd { struct v4l2_async_connection asc; struct ipu7_isys_csi2_config csi2; }; #endif #endif /* MEDIA_IPU7_ISYS_H */ ipu7-drivers-0~git202407090310.f40d5a70/include/uapi/000077500000000000000000000000001465530421300213265ustar00rootroot00000000000000ipu7-drivers-0~git202407090310.f40d5a70/include/uapi/linux/000077500000000000000000000000001465530421300224655ustar00rootroot00000000000000ipu7-drivers-0~git202407090310.f40d5a70/include/uapi/linux/ipu7-psys.h000066400000000000000000000154741465530421300245310ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* Copyright (C) 2013 - 2023 Intel Corporation */ #ifndef _UAPI_IPU_PSYS_H #define _UAPI_IPU_PSYS_H #ifdef __KERNEL__ #include #else #include #endif struct ipu_psys_capability { uint32_t version; uint8_t driver[20]; uint8_t dev_model[32]; uint32_t reserved[17]; } __attribute__ ((packed)); /** * PSYS event error to user */ enum ipu_psys_event_error { IPU_PSYS_EVT_ERROR_NONE = 0U, IPU_PSYS_EVT_ERROR_INTERNAL = 1U, IPU_PSYS_EVT_ERROR_FRAME = 2U, IPU_PSYS_EVT_ERROR_FORCE_CLOSED = 3U, IPU_PSYS_EVT_ERROR_MAX } __attribute__ ((packed)); /** * struct ipu_psys_event - event back from driver to user for requested tasks * @graph_id: unique id per graph * @node_ctx_id: unique logical context id per cb * @frame_id: unique id per frame, originally assigned by user * @error: error code of ipu_psys_event_error type */ struct ipu_psys_event { uint8_t graph_id; uint8_t node_ctx_id; uint8_t frame_id; uint32_t error; int32_t reserved[2]; } __attribute__ ((packed)); /** * struct ipu_psys_buffer - for input/output terminals * @len: total allocated size @ base address * @userptr: user pointer * @fd: DMA-BUF handle * @data_offset:offset to valid data * @bytes_used: amount of valid data including offset * @flags: flags */ struct ipu_psys_buffer { uint64_t len; union { int fd; void __user *userptr; uint64_t reserved; } base; uint32_t data_offset; uint32_t bytes_used; uint32_t flags; uint32_t reserved[2]; } __attribute__ ((packed)); /**< Max number of logical node */ #define MAX_GRAPH_NODES 5U /**< Max number of profile */ #define MAX_GRAPH_NODE_PROFILES 1U #define MAX_GRAPH_LINKS 10U #define MAX_GRAPH_TERMINALS 32U /** * Settings per node on the bitmap * @teb: Terminal Enable bitmap * @deb: Device Enable bitmap * @rbm: Routing bitmap * @reb: Routing Enable bitmap */ struct node_profile { uint32_t teb[2]; uint32_t deb[4]; uint32_t rbm[4]; uint32_t reb[4]; } __attribute__ ((packed)); /** * struct node_ternimal - terminal description on the node * * Terminal is the logical connection entity that is in the node, * it can be different types, one node could have multiple terminal. * * @term_id: id of the terminal * @buf_size: payload(PAC or SDI) size of the certain terminal */ struct node_ternimal { uint8_t term_id; uint32_t buf_size; } __attribute__ ((packed)); /** * struct graph_node - Description of graph that will be used for device * and graph open purpose * * Node is the logical entity of a graph, one graph could have multiple * nodes and it could have connection between each node with terminal. * * @node_rsrc_id: Physical node id * @node_ctx_id: Logical node id, unique per graph * @num_terms: Number of enabled terms in the node * @profiles: bitmap settings on the node * @terminals: terminal info on the node */ struct graph_node { uint8_t node_rsrc_id; uint8_t node_ctx_id; uint8_t num_terms; struct node_profile profiles[MAX_GRAPH_NODE_PROFILES]; struct node_ternimal terminals[MAX_GRAPH_TERMINALS]; } __attribute__ ((packed)); /** * struct graph_link_ep - link endpoint description * * Link endpoint is used to descripe the connection between different nodes. * * @node_ctx_id: Node ID as described in the list of nodes in the fgraph * @term_id: Term ID as described in the list of terms in the fgraph */ struct graph_link_ep { uint8_t node_ctx_id; uint8_t term_id; } __attribute__ ((packed)); /** * All local links (links between nodes within a subsystem) require this * value to be set. */ #define IPU_PSYS_FOREIGN_KEY_NONE UINT16_MAX /** None value of TOP pbk id if not used */ #define IPU_PSYS_LINK_PBK_ID_NONE UINT8_MAX /** None value of TOP pbk slot id if not used */ #define IPU_PSYS_LINK_PBK_SLOT_ID_NONE UINT8_MAX /** Static Offline */ #define IPU_PSYS_LINK_STREAMING_MODE_SOFF 0U /** * struct graph_link - graph link to connect between cbs * * The sink and source links are defined with terminal information. * * @ep_src: Source side of the link * @ep_dst: Destination side of the link * @foreign_key: MUST set to IPU_PSYS_FOREIGN_KEY_NONE * @streaming_mode: Value should be set from IPU_PSYS_LINK_STREAMING_MODE_X * @pbk_id: TOP PBK id that used to connected to external IP * @pbk_slot_id: TOP PBK slot id that used to connected to external IP * @delayed_link: A delay link between producer N and consumer N+1 frame */ struct graph_link { struct graph_link_ep ep_src; struct graph_link_ep ep_dst; uint16_t foreign_key; uint8_t streaming_mode; uint8_t pbk_id; uint8_t pbk_slot_id; uint8_t delayed_link; } __attribute__ ((packed)); /** * struct ipu_psys_graph_info * * Topology that describes an IPU internal connection includes CB and terminal * information. * * @graph_id: id of graph, set initial to 0xFF by user, returned by driver * @num_nodes: number of nodes in graph * @nodes: node entity * @links: link entity */ struct ipu_psys_graph_info { uint8_t graph_id; uint8_t num_nodes; struct graph_node __user *nodes; struct graph_link links[MAX_GRAPH_LINKS]; } __attribute__ ((packed)); /** * struct ipu_psys_term_buffers * * Descprion of each terminal payload buffer * * @term_id: terminal id * @term_buf: terminal buffer */ struct ipu_psys_term_buffers { uint8_t term_id; struct ipu_psys_buffer term_buf; } __attribute__ ((packed)); /** * struct ipu_psys_task_request * * Task request is for user to send a request associated with terminal * payload and expect IPU to process, each task request would expect * an event, @see ipu_psys_event * * @graph_id: graph id returned from graph open * @node_ctx_id: unique logical context id per cb * @frame_id: frame id * @payload_reuse_bm: Any terminal marked here must be enabled * @term_buf_count: the number of terminal buffers * @task_buffers: terminal buffers on the task request */ struct ipu_psys_task_request { uint8_t graph_id; uint8_t node_ctx_id; uint8_t frame_id; uint32_t payload_reuse_bm[2]; uint8_t term_buf_count; struct ipu_psys_term_buffers __user *task_buffers; } __attribute__ ((packed)); #define IPU_BUFFER_FLAG_INPUT (1 << 0) #define IPU_BUFFER_FLAG_OUTPUT (1 << 1) #define IPU_BUFFER_FLAG_MAPPED (1 << 2) #define IPU_BUFFER_FLAG_NO_FLUSH (1 << 3) #define IPU_BUFFER_FLAG_DMA_HANDLE (1 << 4) #define IPU_BUFFER_FLAG_USERPTR (1 << 5) #define IPU_IOC_QUERYCAP _IOR('A', 1, struct ipu_psys_capability) #define IPU_IOC_MAPBUF _IOWR('A', 2, int) #define IPU_IOC_UNMAPBUF _IOWR('A', 3, int) #define IPU_IOC_GETBUF _IOWR('A', 4, struct ipu_psys_buffer) #define IPU_IOC_PUTBUF _IOWR('A', 5, struct ipu_psys_buffer) #define IPU_IOC_DQEVENT _IOWR('A', 6, struct ipu_psys_event) #define IPU_IOC_GRAPH_OPEN _IOWR('A', 7, struct ipu_psys_graph_info) #define IPU_IOC_TASK_REQUEST _IOWR('A', 8, struct ipu_psys_task_request) #define IPU_IOC_GRAPH_CLOSE _IOWR('A', 9, int) #endif /* _UAPI_IPU_PSYS_H */ ipu7-drivers-0~git202407090310.f40d5a70/patch/000077500000000000000000000000001465530421300200445ustar00rootroot00000000000000ipu7-drivers-0~git202407090310.f40d5a70/patch/v6.11/000077500000000000000000000000001465530421300206175ustar00rootroot00000000000000ipu7-drivers-0~git202407090310.f40d5a70/patch/v6.11/in-tree-build/000077500000000000000000000000001465530421300232575ustar00rootroot000000000000000001-media-pci-intel-Add-IPU7-Kconfig-Makefile.patch000066400000000000000000000021521465530421300340410ustar00rootroot00000000000000ipu7-drivers-0~git202407090310.f40d5a70/patch/v6.11/in-tree-buildFrom 60309890288bd0cc6b179eacdcbffce83f50cd1e Mon Sep 17 00:00:00 2001 From: Hao Yao Date: Tue, 2 Jul 2024 17:26:08 +0800 Subject: [PATCH] media: pci: intel: Add IPU7 Kconfig & Makefile --- drivers/media/pci/intel/Kconfig | 1 + drivers/media/pci/intel/Makefile | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/media/pci/intel/Kconfig b/drivers/media/pci/intel/Kconfig index d9fcddce028b..948cda08fff5 100644 --- a/drivers/media/pci/intel/Kconfig +++ b/drivers/media/pci/intel/Kconfig @@ -2,6 +2,7 @@ source "drivers/media/pci/intel/ipu3/Kconfig" source "drivers/media/pci/intel/ipu6/Kconfig" +source "drivers/media/pci/intel/ipu7/Kconfig" source "drivers/media/pci/intel/ivsc/Kconfig" config IPU_BRIDGE diff --git a/drivers/media/pci/intel/Makefile b/drivers/media/pci/intel/Makefile index 3a2cc6567159..ff0fea13422d 100644 --- a/drivers/media/pci/intel/Makefile +++ b/drivers/media/pci/intel/Makefile @@ -6,3 +6,4 @@ obj-$(CONFIG_IPU_BRIDGE) += ipu-bridge.o obj-y += ipu3/ obj-y += ivsc/ obj-$(CONFIG_VIDEO_INTEL_IPU6) += ipu6/ +obj-$(CONFIG_VIDEO_INTEL_IPU7) += ipu7/ -- 2.43.0 ipu7-drivers-0~git202407090310.f40d5a70/patch/v6.4/000077500000000000000000000000001465530421300205415ustar00rootroot00000000000000ipu7-drivers-0~git202407090310.f40d5a70/patch/v6.4/0001-LNL-Enable-GPIO-pinctrl-driver.patch000066400000000000000000000230471465530421300274550ustar00rootroot00000000000000From c96cc019f5a9081eafc96ae1299f09cb079ccfc1 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 13 Nov 2023 14:28:47 +0200 Subject: [PATCH 1/3] LNL: Enable GPIO pinctrl driver It is still under review here: * https://lore.kernel.org/all/20231113123147.4075203-1- andriy.shevchenko@linux.intel.com/ Including changes: * pinctrl: intel: Revert "Unexport intel_pinctrl_probe()" * pinctrl: intel: Add a generic Intel pin control platform driver Signed-off-by: Andy Shevchenko --- drivers/pinctrl/intel/Kconfig | 10 + drivers/pinctrl/intel/Makefile | 1 + .../pinctrl/intel/pinctrl-intel-platform.c | 231 ++++++++++++++++++ drivers/pinctrl/intel/pinctrl-intel.c | 5 +- drivers/pinctrl/intel/pinctrl-intel.h | 3 + 5 files changed, 248 insertions(+), 2 deletions(-) create mode 100644 drivers/pinctrl/intel/pinctrl-intel-platform.c diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig index b3ec00624416..e14353d706cc 100644 --- a/drivers/pinctrl/intel/Kconfig +++ b/drivers/pinctrl/intel/Kconfig @@ -66,6 +66,16 @@ config PINCTRL_INTEL select GPIOLIB select GPIOLIB_IRQCHIP +config PINCTRL_INTEL_PLATFORM + tristate "Intel pinctrl and GPIO platform driver" + depends on ACPI + select PINCTRL_INTEL + help + This pinctrl driver provides an interface that allows configuring + of Intel PCH pins and using them as GPIOs. Currently the following + Intel SoCs / platforms require this to be functional: + - Lunar Lake + config PINCTRL_ALDERLAKE tristate "Intel Alder Lake pinctrl and GPIO driver" depends on ACPI diff --git a/drivers/pinctrl/intel/Makefile b/drivers/pinctrl/intel/Makefile index 906dd6c8d837..bf14268de763 100644 --- a/drivers/pinctrl/intel/Makefile +++ b/drivers/pinctrl/intel/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_PINCTRL_LYNXPOINT) += pinctrl-lynxpoint.o obj-$(CONFIG_PINCTRL_MERRIFIELD) += pinctrl-merrifield.o obj-$(CONFIG_PINCTRL_MOOREFIELD) += pinctrl-moorefield.o obj-$(CONFIG_PINCTRL_INTEL) += pinctrl-intel.o +obj-$(CONFIG_PINCTRL_INTEL_PLATFORM) += pinctrl-intel-platform.o obj-$(CONFIG_PINCTRL_ALDERLAKE) += pinctrl-alderlake.o obj-$(CONFIG_PINCTRL_BROXTON) += pinctrl-broxton.o obj-$(CONFIG_PINCTRL_CANNONLAKE) += pinctrl-cannonlake.o diff --git a/drivers/pinctrl/intel/pinctrl-intel-platform.c b/drivers/pinctrl/intel/pinctrl-intel-platform.c new file mode 100644 index 000000000000..ff8a01fc9fa4 --- /dev/null +++ b/drivers/pinctrl/intel/pinctrl-intel-platform.c @@ -0,0 +1,231 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel PCH pinctrl/GPIO driver + * + * Copyright (C) 2021-2023, Intel Corporation + * Author: Andy Shevchenko + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include "pinctrl-intel.h" + +struct intel_platform_pins { + struct pinctrl_pin_desc *pins; + size_t npins; +}; + +static int intel_platform_pinctrl_prepare_pins(struct device *dev, size_t base, + const char *name, u32 size, + struct intel_platform_pins *pins) +{ + struct pinctrl_pin_desc *descs; + char **pin_names; + unsigned int i; + size_t bytes; + + pin_names = devm_kasprintf_strarray(dev, name, size); + if (IS_ERR(pin_names)) + return PTR_ERR(pin_names); + + if (unlikely(check_mul_overflow(base + size, sizeof(*descs), &bytes))) + return -ENOMEM; + + descs = devm_krealloc(dev, pins->pins, bytes, GFP_KERNEL); + if (!descs) + return -ENOMEM; + + for (i = 0; i < size; i++) { + unsigned int pin_number = base + i; + char *pin_name = pin_names[i]; + struct pinctrl_pin_desc *desc; + + /* Unify delimiter for pin name */ + strreplace(pin_name, '-', '_'); + + desc = &descs[pin_number]; + desc->number = pin_number; + desc->name = pin_name; + } + + pins->pins = descs; + pins->npins = base + size; + + return 0; +} + +static int intel_platform_pinctrl_prepare_group(struct device *dev, + struct fwnode_handle *child, + struct intel_padgroup *gpp, + struct intel_platform_pins *pins) +{ + size_t base = pins->npins; + const char *name; + u32 size; + int ret; + + ret = fwnode_property_read_string(child, "intc-gpio-group-name", &name); + if (ret) + return ret; + + ret = fwnode_property_read_u32(child, "intc-gpio-pad-count", &size); + if (ret) + return ret; + + ret = intel_platform_pinctrl_prepare_pins(dev, base, name, size, pins); + if (ret) + return ret; + + gpp->base = base; + gpp->size = size; + gpp->gpio_base = INTEL_GPIO_BASE_MATCH; + + return 0; +} + +static int intel_platform_pinctrl_prepare_community(struct device *dev, + struct intel_community *community, + struct intel_platform_pins *pins) +{ + struct fwnode_handle *child; + struct intel_padgroup *gpps; + unsigned int group; + size_t ngpps; + u32 offset; + int ret; + + ret = device_property_read_u32(dev, "intc-gpio-pad-ownership-offset", &offset); + if (ret) + return ret; + community->padown_offset = offset; + + ret = device_property_read_u32(dev, "intc-gpio-pad-configuration-lock-offset", &offset); + if (ret) + return ret; + community->padcfglock_offset = offset; + + ret = device_property_read_u32(dev, "intc-gpio-host-software-pad-ownership-offset", &offset); + if (ret) + return ret; + community->hostown_offset = offset; + + ret = device_property_read_u32(dev, "intc-gpio-gpi-interrupt-status-offset", &offset); + if (ret) + return ret; + community->is_offset = offset; + + ret = device_property_read_u32(dev, "intc-gpio-gpi-interrupt-enable-offset", &offset); + if (ret) + return ret; + community->ie_offset = offset; + + ngpps = device_get_child_node_count(dev); + if (!ngpps) + return -ENODEV; + + gpps = devm_kcalloc(dev, ngpps, sizeof(*gpps), GFP_KERNEL); + if (!gpps) + return -ENOMEM; + + group = 0; + device_for_each_child_node(dev, child) { + struct intel_padgroup *gpp = &gpps[group]; + + gpp->reg_num = group; + + ret = intel_platform_pinctrl_prepare_group(dev, child, gpp, pins); + if (ret) + return ret; + + group++; + } + + community->ngpps = ngpps; + community->gpps = gpps; + + return 0; +} + +static int intel_platform_pinctrl_prepare_soc_data(struct device *dev, + struct intel_pinctrl_soc_data *data) +{ + struct intel_platform_pins pins = {}; + struct intel_community *communities; + size_t ncommunities; + unsigned int i; + int ret; + + /* Version 1.0 of the specification assumes only a single community per device node */ + ncommunities = 1, + communities = devm_kcalloc(dev, ncommunities, sizeof(*communities), GFP_KERNEL); + if (!communities) + return -ENOMEM; + + for (i = 0; i < ncommunities; i++) { + struct intel_community *community = &communities[i]; + + community->barno = i; + community->pin_base = pins.npins; + + ret = intel_platform_pinctrl_prepare_community(dev, community, &pins); + if (ret) + return ret; + + community->npins = pins.npins - community->pin_base; + } + + data->ncommunities = ncommunities; + data->communities = communities; + + data->npins = pins.npins; + data->pins = pins.pins; + + return 0; +} + +static int intel_platform_pinctrl_probe(struct platform_device *pdev) +{ + struct intel_pinctrl_soc_data *data; + struct device *dev = &pdev->dev; + int ret; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + ret = intel_platform_pinctrl_prepare_soc_data(dev, data); + if (ret) + return ret; + + return intel_pinctrl_probe(pdev, data); +} + +static const struct acpi_device_id intel_platform_pinctrl_acpi_match[] = { + { "INTC105F" }, + { } +}; +MODULE_DEVICE_TABLE(acpi, intel_platform_pinctrl_acpi_match); + +INTEL_PINCTRL_PM_OPS(intel_pinctrl_pm_ops); + +static struct platform_driver intel_platform_pinctrl_driver = { + .probe = intel_platform_pinctrl_probe, + .driver = { + .name = "intel-pinctrl", + .acpi_match_table = intel_platform_pinctrl_acpi_match, + .pm = pm_sleep_ptr(&intel_pinctrl_pm_ops), + }, +}; +module_platform_driver(intel_platform_pinctrl_driver); + +MODULE_AUTHOR("Andy Shevchenko "); +MODULE_DESCRIPTION("Intel PCH pinctrl/GPIO driver"); +MODULE_LICENSE("GPL v2"); +MODULE_IMPORT_NS(PINCTRL_INTEL); diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index c7a71c49df0a..37f2bd8dea45 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c @@ -1532,8 +1532,8 @@ static int intel_pinctrl_probe_pwm(struct intel_pinctrl *pctrl, return PTR_ERR_OR_ZERO(pwm); } -static int intel_pinctrl_probe(struct platform_device *pdev, - const struct intel_pinctrl_soc_data *soc_data) +int intel_pinctrl_probe(struct platform_device *pdev, + const struct intel_pinctrl_soc_data *soc_data) { struct device *dev = &pdev->dev; struct intel_pinctrl *pctrl; @@ -1651,6 +1651,7 @@ static int intel_pinctrl_probe(struct platform_device *pdev, return 0; } +EXPORT_SYMBOL_NS_GPL(intel_pinctrl_probe, PINCTRL_INTEL); int intel_pinctrl_probe_by_hid(struct platform_device *pdev) { diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h index 1faf2ada480a..0278c9331de7 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.h +++ b/drivers/pinctrl/intel/pinctrl-intel.h @@ -252,6 +252,9 @@ struct intel_pinctrl { int irq; }; +int intel_pinctrl_probe(struct platform_device *pdev, + const struct intel_pinctrl_soc_data *soc_data); + int intel_pinctrl_probe_by_hid(struct platform_device *pdev); int intel_pinctrl_probe_by_uid(struct platform_device *pdev); -- 2.43.2 0002-mfd-intel-lpss-Add-Intel-Lunar-Lake-M-PCI-IDs.patch000066400000000000000000000033531465530421300316740ustar00rootroot00000000000000ipu7-drivers-0~git202407090310.f40d5a70/patch/v6.4From d7e0360636dbc3cbd59b25eedd682dfb52aeef07 Mon Sep 17 00:00:00 2001 From: Jarkko Nikula Date: Mon, 2 Oct 2023 11:33:44 +0300 Subject: [PATCH 2/3] mfd: intel-lpss: Add Intel Lunar Lake-M PCI IDs Add Intel Lunar Lake-M SoC PCI IDs. Signed-off-by: Jarkko Nikula Signed-off-by: Lee Jones --- drivers/mfd/intel-lpss-pci.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c index 699f44ffff0e..ae5759200622 100644 --- a/drivers/mfd/intel-lpss-pci.c +++ b/drivers/mfd/intel-lpss-pci.c @@ -561,6 +561,19 @@ static const struct pci_device_id intel_lpss_pci_ids[] = { { PCI_VDEVICE(INTEL, 0xa3e2), (kernel_ulong_t)&spt_i2c_info }, { PCI_VDEVICE(INTEL, 0xa3e3), (kernel_ulong_t)&spt_i2c_info }, { PCI_VDEVICE(INTEL, 0xa3e6), (kernel_ulong_t)&spt_uart_info }, + /* LNL-M */ + { PCI_VDEVICE(INTEL, 0xa825), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0xa826), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0xa827), (kernel_ulong_t)&tgl_info }, + { PCI_VDEVICE(INTEL, 0xa830), (kernel_ulong_t)&tgl_info }, + { PCI_VDEVICE(INTEL, 0xa846), (kernel_ulong_t)&tgl_info }, + { PCI_VDEVICE(INTEL, 0xa850), (kernel_ulong_t)&ehl_i2c_info }, + { PCI_VDEVICE(INTEL, 0xa851), (kernel_ulong_t)&ehl_i2c_info }, + { PCI_VDEVICE(INTEL, 0xa852), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0xa878), (kernel_ulong_t)&ehl_i2c_info }, + { PCI_VDEVICE(INTEL, 0xa879), (kernel_ulong_t)&ehl_i2c_info }, + { PCI_VDEVICE(INTEL, 0xa87a), (kernel_ulong_t)&ehl_i2c_info }, + { PCI_VDEVICE(INTEL, 0xa87b), (kernel_ulong_t)&ehl_i2c_info }, { } }; MODULE_DEVICE_TABLE(pci, intel_lpss_pci_ids); -- 2.43.2 ipu7-drivers-0~git202407090310.f40d5a70/patch/v6.4/0003-Add-IPU7-Kconfig-and-Makefile.patch000066400000000000000000000027301465530421300272070ustar00rootroot00000000000000From a00700ad5e7de77e03572b9a357a464071a7326a Mon Sep 17 00:00:00 2001 From: Hao Yao Date: Fri, 26 Apr 2024 17:44:20 +0800 Subject: [PATCH 3/3] Add IPU7 Kconfig and Makefile --- drivers/media/pci/Kconfig | 2 +- drivers/media/pci/intel/Kconfig | 4 ++++ drivers/media/pci/intel/Makefile | 1 + 3 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 drivers/media/pci/intel/Kconfig diff --git a/drivers/media/pci/Kconfig b/drivers/media/pci/Kconfig index 480194543d05..ee095bde0b68 100644 --- a/drivers/media/pci/Kconfig +++ b/drivers/media/pci/Kconfig @@ -73,7 +73,7 @@ config VIDEO_PCI_SKELETON Enable build of the skeleton PCI driver, used as a reference when developing new drivers. -source "drivers/media/pci/intel/ipu3/Kconfig" +source "drivers/media/pci/intel/Kconfig" endif #MEDIA_PCI_SUPPORT endif #PCI diff --git a/drivers/media/pci/intel/Kconfig b/drivers/media/pci/intel/Kconfig new file mode 100644 index 000000000000..1fc42fd3a733 --- /dev/null +++ b/drivers/media/pci/intel/Kconfig @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only + +source "drivers/media/pci/intel/ipu3/Kconfig" +source "drivers/media/pci/intel/ipu7/Kconfig" diff --git a/drivers/media/pci/intel/Makefile b/drivers/media/pci/intel/Makefile index 0b4236c4db49..f4cad3411e22 100644 --- a/drivers/media/pci/intel/Makefile +++ b/drivers/media/pci/intel/Makefile @@ -4,3 +4,4 @@ # obj-y += ipu3/ +obj-$(CONFIG_VIDEO_INTEL_IPU7) += ipu7/ -- 2.43.2 ipu7-drivers-0~git202407090310.f40d5a70/patch/v6.8/000077500000000000000000000000001465530421300205455ustar00rootroot000000000000000001-media-pci-ipu-bridge-Support-ov13b10-camera.patch000066400000000000000000000015031465530421300317320ustar00rootroot00000000000000ipu7-drivers-0~git202407090310.f40d5a70/patch/v6.8From 60ddc99ffcfd25c3c980a1b4036c081217f615e5 Mon Sep 17 00:00:00 2001 From: Hao Yao Date: Tue, 2 Jul 2024 17:26:53 +0800 Subject: [PATCH 1/4] media: pci: ipu-bridge: Support ov13b10 camera --- drivers/media/pci/intel/ipu-bridge.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/media/pci/intel/ipu-bridge.c b/drivers/media/pci/intel/ipu-bridge.c index f980e3125a7b..ea51537c6b4e 100644 --- a/drivers/media/pci/intel/ipu-bridge.c +++ b/drivers/media/pci/intel/ipu-bridge.c @@ -58,6 +58,7 @@ static const struct ipu_sensor_config ipu_supported_sensors[] = { IPU_SENSOR_CONFIG("INT3537", 1, 437000000), /* Omnivision ov13b10 */ IPU_SENSOR_CONFIG("OVTIDB10", 1, 560000000), + IPU_SENSOR_CONFIG("OVTI13B1", 1, 560000000), /* GalaxyCore GC0310 */ IPU_SENSOR_CONFIG("INT0310", 0), }; -- 2.45.2 0002-media-pci-ipu-bridge-Support-ov02c10-camera.patch000066400000000000000000000016201465530421300317320ustar00rootroot00000000000000ipu7-drivers-0~git202407090310.f40d5a70/patch/v6.8From 9cceb57c706b83b565a19abbb0327af58dd751f3 Mon Sep 17 00:00:00 2001 From: Hao Yao Date: Wed, 3 Jul 2024 16:06:52 +0800 Subject: [PATCH 2/4] media: pci: ipu-bridge: Support ov02c10 camera --- drivers/media/pci/intel/ipu-bridge.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/media/pci/intel/ipu-bridge.c b/drivers/media/pci/intel/ipu-bridge.c index ea51537c6b4e..1a32b91c2395 100644 --- a/drivers/media/pci/intel/ipu-bridge.c +++ b/drivers/media/pci/intel/ipu-bridge.c @@ -56,6 +56,8 @@ static const struct ipu_sensor_config ipu_supported_sensors[] = { IPU_SENSOR_CONFIG("INT3474", 1, 180000000), /* Hynix hi556 */ IPU_SENSOR_CONFIG("INT3537", 1, 437000000), + /* Omnivision ov02c10 */ + IPU_SENSOR_CONFIG("OVTI02C1", 1, 400000000), /* Omnivision ov13b10 */ IPU_SENSOR_CONFIG("OVTIDB10", 1, 560000000), IPU_SENSOR_CONFIG("OVTI13B1", 1, 560000000), -- 2.45.2 ipu7-drivers-0~git202407090310.f40d5a70/patch/v6.8/in-tree-build/000077500000000000000000000000001465530421300232055ustar00rootroot000000000000000003-media-pci-intel-Add-IPU7-Kconfig-Makefile.patch000066400000000000000000000020521465530421300337700ustar00rootroot00000000000000ipu7-drivers-0~git202407090310.f40d5a70/patch/v6.8/in-tree-buildFrom 7704cde6485721b1d9a90c00a7681d344abdb54a Mon Sep 17 00:00:00 2001 From: Hao Yao Date: Tue, 2 Jul 2024 17:26:08 +0800 Subject: [PATCH 3/4] media: pci: intel: Add IPU7 Kconfig & Makefile --- drivers/media/pci/intel/Kconfig | 1 + drivers/media/pci/intel/Makefile | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/media/pci/intel/Kconfig b/drivers/media/pci/intel/Kconfig index ee4684159d3d..3b325d2b095d 100644 --- a/drivers/media/pci/intel/Kconfig +++ b/drivers/media/pci/intel/Kconfig @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only source "drivers/media/pci/intel/ipu3/Kconfig" +source "drivers/media/pci/intel/ipu7/Kconfig" source "drivers/media/pci/intel/ivsc/Kconfig" config IPU_BRIDGE diff --git a/drivers/media/pci/intel/Makefile b/drivers/media/pci/intel/Makefile index f199a97e1d78..213056608aca 100644 --- a/drivers/media/pci/intel/Makefile +++ b/drivers/media/pci/intel/Makefile @@ -4,4 +4,5 @@ # obj-$(CONFIG_IPU_BRIDGE) += ipu-bridge.o obj-y += ipu3/ +obj-y += ipu7/ obj-y += ivsc/ -- 2.45.2 0004-media-i2c-Add-ov02c10-Kconfig-Makefile.patch000066400000000000000000000027541465530421300331410ustar00rootroot00000000000000ipu7-drivers-0~git202407090310.f40d5a70/patch/v6.8/in-tree-buildFrom 710b52437ed6d109306fd35e886dedcd76d735a6 Mon Sep 17 00:00:00 2001 From: Hao Yao Date: Wed, 3 Jul 2024 16:04:10 +0800 Subject: [PATCH 4/4] media: i2c: Add ov02c10 Kconfig & Makefile --- drivers/media/i2c/Kconfig | 9 +++++++++ drivers/media/i2c/Makefile | 1 + 2 files changed, 10 insertions(+) diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig index 4c3435921f19..bf7dc2514666 100644 --- a/drivers/media/i2c/Kconfig +++ b/drivers/media/i2c/Kconfig @@ -322,6 +322,15 @@ config VIDEO_OV02A10 To compile this driver as a module, choose M here: the module will be called ov02a10. +config VIDEO_OV02C10 + tristate "OmniVision OV02C10 sensor support" + help + This is a Video4Linux2 sensor driver for the OmniVision + OV02C10 camera. + + To compile this driver as a module, choose M here: the + module will be called ov02c10. + config VIDEO_OV08D10 tristate "OmniVision OV08D10 sensor support" help diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile index dfbe6448b549..9ffc67e7a812 100644 --- a/drivers/media/i2c/Makefile +++ b/drivers/media/i2c/Makefile @@ -77,6 +77,7 @@ obj-$(CONFIG_VIDEO_MT9V111) += mt9v111.o obj-$(CONFIG_VIDEO_OG01A1B) += og01a1b.o obj-$(CONFIG_VIDEO_OV01A10) += ov01a10.o obj-$(CONFIG_VIDEO_OV02A10) += ov02a10.o +obj-$(CONFIG_VIDEO_OV02C10) += ov02c10.o obj-$(CONFIG_VIDEO_OV08D10) += ov08d10.o obj-$(CONFIG_VIDEO_OV08X40) += ov08x40.o obj-$(CONFIG_VIDEO_OV13858) += ov13858.o -- 2.45.2