pax_global_header00006660000000000000000000000064147170254550014524gustar00rootroot0000000000000052 comment=0ad4988248d7e9382498a0b47fc78bb990b29a58 ipu6-drivers-0~git202411190607.0ad49882/000077500000000000000000000000001471702545500167115ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/.github/000077500000000000000000000000001471702545500202515ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/.github/workflows/000077500000000000000000000000001471702545500223065ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/.github/workflows/test.yml000066400000000000000000000347521471702545500240230ustar00rootroot00000000000000name: test on: push: pull_request: schedule: - cron: '30 1 * * *' #every day at 1:30am permissions: {} jobs: Ubuntu-2204-dkms: runs-on: ubuntu-latest container: ubuntu:22.04 steps: - name: Checkout uses: actions/checkout@v4 - name: Checkout ivsc-driver repo uses: actions/checkout@v4 with: repository: intel/ivsc-driver path: ivsc-driver - name: Merge with ivsc-driver shell: bash run: | cp -r ivsc-driver/backport-include ivsc-driver/drivers ivsc-driver/include . rm -rf ivsc-driver - name: Prepare environment shell: bash run: | apt-get update --quiet; apt-get install --yes --no-install-recommends dkms gpg-agent kmod software-properties-common sudo - name: Download header files shell: bash run: | # Add unstable kernel ppa add-apt-repository ppa:canonical-kernel-team/unstable apt-get update --quiet; # latest generic kernel headers apt-get install --yes linux-headers-generic linux-headers-generic-hwe-22.04-edge # latest oem kernel apt-get install --yes linux-headers-oem-20.04 linux-headers-oem-22.04 linux-headers-oem-22.04b linux-headers-oem-22.04c - name: Register with dkms shell: bash run: | dkms add . - name: Compile driver shell: bash run: | # run dkms build and all available kernel headers failed="" succeeded="" for kver in /lib/modules/*/build; do # skip the kernel headers from the azure kernel. These kernel headers # are preinstalled and of no interest if [[ "$kver" == *"azure"* ]]; then echo "Skipping $kver - This is the kernel of the github runner."; continue; fi; test -d $kver || continue kver=${kver%/build} kver=${kver##*/} echo "=== Testing ${kver} ==="; echo "running: dkms build -m ipu6-drivers/0.0.0 -k ${kver}"; ret=$(sudo dkms build -m ipu6-drivers/0.0.0 -k ${kver} >&2; echo $?); if [ ${ret} -eq 0 ]; then succeeded="${succeeded} ${kver}" else echo "#### Skipped unexpected failure ${kver}"; failed="${failed} ${kver}"; fi; done if [ "x${failed}" != "x" ]; then echo "#### Failed kernels: ${failed}"; exit 1 fi echo "#### Successful builds for kernels: ${succeeded}"; Ubuntu-2404-dkms: runs-on: ubuntu-latest container: ubuntu:24.04 steps: - name: Checkout uses: actions/checkout@v4 - name: Prepare environment shell: bash run: | sed -i 's/noble noble-updates noble-backports/noble noble-updates noble-backports noble-proposed/' /etc/apt/sources.list.d/ubuntu.sources apt-get update --quiet; apt-get install --yes --no-install-recommends dkms gpg-agent kmod software-properties-common sudo - name: Download header files shell: bash run: | # latest generic kernel headers apt-get install --yes linux-headers-generic linux-headers-generic-hwe-24.04-edge # latest oem kernel apt-get install --yes linux-headers-oem-24.04 linux-headers-oem-24.04a linux-headers-oem-24.04b - name: Register with dkms shell: bash run: | dkms add . - name: Compile driver shell: bash run: | # run dkms build and all available kernel headers failed="" succeeded="" for kver in /lib/modules/*/build; do # skip the kernel headers from the azure kernel. These kernel headers # are preinstalled and of no interest if [[ "$kver" == *"azure"* ]]; then echo "Skipping $kver - This is the kernel of the github runner."; continue; fi; test -d $kver || continue kver=${kver%/build} kver=${kver##*/} echo "=== Testing ${kver} ==="; echo "running: dkms build -m ipu6-drivers/0.0.0 -k ${kver}"; ret=$(sudo dkms build -m ipu6-drivers/0.0.0 -k ${kver} >&2; echo $?); if [ ${ret} -eq 0 ]; then succeeded="${succeeded} ${kver}" else echo "#### Skipped unexpected failure ${kver}"; failed="${failed} ${kver}"; fi; done if [ "x${failed}" != "x" ]; then echo "#### Failed kernels: ${failed}"; exit 1 fi echo "#### Successful builds for kernels: ${succeeded}"; Ubuntu-rolling-dkms: runs-on: ubuntu-latest container: ubuntu:rolling steps: - name: Checkout uses: actions/checkout@v4 - name: Checkout ivsc-driver repo uses: actions/checkout@v4 with: repository: intel/ivsc-driver path: ivsc-driver - name: Merge with ivsc-driver shell: bash run: | cp -r ivsc-driver/backport-include ivsc-driver/drivers ivsc-driver/include . rm -rf ivsc-driver - name: Prepare environment shell: bash run: | apt-get update --quiet; apt-get install --yes --no-install-recommends dkms gpg-agent kmod software-properties-common sudo - name: Download header files shell: bash run: | # Add unstable kernel ppa add-apt-repository ppa:canonical-kernel-team/unstable apt-get update --quiet; # latest generic kernel headers apt-get install --yes linux-headers-generic # latest oem kernel apt-get install --yes linux-headers-oem-22.04 - name: Register with dkms shell: bash run: | dkms add . - name: Compile driver shell: bash run: | # run dkms build and all available kernel headers failed="" succeeded="" for kver in /lib/modules/*/build; do # skip the kernel headers from the azure kernel. These kernel headers # are preinstalled and of no interest if [[ "$kver" == *"azure"* ]]; then echo "Skipping $kver - This is the kernel of the github runner."; continue; fi; test -d $kver || continue kver=${kver%/build} kver=${kver##*/} echo "=== Testing ${kver} ==="; echo "running: dkms build -m ipu6-drivers/0.0.0 -k ${kver}"; ret=$(sudo dkms build -m ipu6-drivers/0.0.0 -k ${kver} >&2; echo $?); if [ ${ret} -eq 0 ]; then succeeded="${succeeded} ${kver}" else echo "#### Skipped unexpected failure ${kver}"; failed="${failed} ${kver}"; fi; done if [ "x${failed}" != "x" ]; then echo "#### Failed kernels: ${failed}"; exit 1 fi echo "#### Successful builds for kernels: ${succeeded}"; Debian-sid-dkms: runs-on: ubuntu-latest container: debian:sid steps: - name: Checkout uses: actions/checkout@v4 - name: Prepare environment shell: bash run: | apt-get update --quiet; apt-get install --yes --no-install-recommends dkms gpg-agent kmod software-properties-common sudo - name: Download header files shell: bash run: | apt-get update --quiet; # latest kernel apt-get install --yes linux-headers-amd64 - name: Register with dkms shell: bash run: | dkms add . - name: Compile driver shell: bash run: | # run dkms build and all available kernel headers failed="" succeeded="" for kver in /lib/modules/*/build; do # skip the kernel headers from the azure kernel. These kernel headers # are preinstalled and of no interest if [[ "$kver" == *"azure"* ]]; then echo "Skipping $kver - This is the kernel of the github runner."; continue; fi; test -d $kver || continue kver=${kver%/build} kver=${kver##*/} echo "=== Testing ${kver} ==="; echo "running: dkms build -m ipu6-drivers/0.0.0 -k ${kver}"; ret=$(sudo dkms build -m ipu6-drivers/0.0.0 -k ${kver} >&2; echo $?); if [ ${ret} -eq 0 ]; then succeeded="${succeeded} ${kver}" elif [ ${ret} -eq 77 ]; then echo "#### Config of ${kver} doesn't support"; else echo "#### Skipped unexpected failure ${kver}"; failed="${failed} ${kver}"; fi; done if [ "x${failed}" != "x" ]; then echo "#### Failed kernels: ${failed}"; exit 1 fi echo "#### Successful builds for kernels: ${succeeded}"; ipu6-drivers-0~git202411190607.0ad49882/LICENSE000066400000000000000000000431341471702545500177230ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc. 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License. ipu6-drivers-0~git202411190607.0ad49882/Makefile000066400000000000000000000066021471702545500203550ustar00rootroot00000000000000# SPDX-License-Identifier: GPL-2.0 # Copyright (c) 2022 Intel Corporation. KERNELRELEASE ?= $(shell uname -r) KERNEL_VERSION := $(shell echo $(KERNELRELEASE) | sed 's/[^0-9.]*\([0-9.]*\).*/\1/') version_lt = $(shell \ v1=$(1); \ v2=$(2); \ IFS='.'; \ set -- $$v1; i=$$1; j=$$2; k=$$3; \ set -- $$v2; a=$$1; b=$$2; c=$$3; \ if [ "$$i" -lt "$$a" ]; then \ echo "true"; \ elif [ "$$i" -eq "$$a" ] && [ "$$j" -lt "$$b" ]; then \ echo "true"; \ elif [ "$$i" -eq "$$a" ] && [ "$$j" -eq "$$b" ] && [ "$$k" -lt "$$c" ]; then \ echo "true"; \ else \ echo "false"; \ fi) KV_IVSC := 6.6.0 KV_IPU_BRIDGE := 6.6.0 KV_OV2740 := 6.8.0 KV_OV05C10 := 6.8.0 KV_IPU6_ISYS := 6.10.0 KERNEL_SRC ?= /lib/modules/$(KERNELRELEASE)/build MODSRC := $(shell pwd) ifeq ($(call version_lt,$(KERNEL_VERSION),$(KV_IVSC)),true) $(warning build ljca ivsc) obj-m += ljca.o ljca-y := drivers/mfd/ljca.o obj-m += spi-ljca.o spi-ljca-y := drivers/spi/spi-ljca.o obj-m += gpio-ljca.o gpio-ljca-y := drivers/gpio/gpio-ljca.o obj-m += i2c-ljca.o i2c-ljca-y := drivers/i2c/busses/i2c-ljca.o obj-m += mei-vsc.o mei-vsc-y := drivers/misc/mei/spi-vsc.o mei-vsc-y += drivers/misc/mei/hw-vsc.o obj-m += intel_vsc.o intel_vsc-y := drivers/misc/ivsc/intel_vsc.o obj-m += mei_csi.o mei_csi-y := drivers/misc/ivsc/mei_csi.o obj-m += mei_ace.o mei_ace-y := drivers/misc/ivsc/mei_ace.o obj-m += mei_pse.o mei_pse-y := drivers/misc/ivsc/mei_pse.o obj-m += mei_ace_debug.o mei_ace_debug-y := drivers/misc/ivsc/mei_ace_debug.o export CONFIG_INTEL_VSC = y endif export CONFIG_VIDEO_INTEL_IPU6 = m export CONFIG_IPU_SINGLE_BE_SOC_DEVICE = n export CONFIG_INTEL_SKL_INT3472 = m # export CONFIG_POWER_CTRL_LOGIC = m ifeq ($(call version_lt,$(KERNEL_VERSION),$(KV_IPU_BRIDGE)),true) export CONFIG_IPU_ISYS_BRIDGE = y export CONFIG_IPU_BRIDGE = n endif export EXTERNAL_BUILD = 1 ifeq ($(call version_lt,$(KERNEL_VERSION),$(KV_IPU6_ISYS)),true) obj-y += drivers/media/pci/intel/ipu6/ else obj-y += drivers/media/pci/intel/ipu6/psys/ endif export CONFIG_VIDEO_HM11B1 = m export CONFIG_VIDEO_OV01A1S = m export CONFIG_VIDEO_OV01A10 = m export CONFIG_VIDEO_OV02C10 = m export CONFIG_VIDEO_OV02E10 = m export CONFIG_VIDEO_HM2170 = m export CONFIG_VIDEO_HM2172 = m export CONFIG_VIDEO_HI556 = m ifeq ($(call version_lt,$(KERNEL_VERSION),$(KV_OV2740)),true) export CONFIG_VIDEO_OV2740 = m export CONFIG_VIDEO_GC5035 = m endif ifeq ($(call version_lt,$(KERNEL_VERSION),$(KV_OV05C10)),false) export CONFIG_VIDEO_OV05C10 = m endif obj-y += drivers/media/i2c/ ifeq ($(call version_lt,$(KERNEL_VERSION),$(KV_IVSC)),true) ccflags-y += -I$(src)/backport-include/drivers/misc/mei/ endif subdir-ccflags-y += -I$(src)/include/ \ -DCONFIG_VIDEO_V4L2_SUBDEV_API subdir-ccflags-$(CONFIG_INTEL_VSC) += \ -DCONFIG_INTEL_VSC subdir-ccflags-$(CONFIG_IPU_ISYS_BRIDGE) += \ -DCONFIG_IPU_ISYS_BRIDGE subdir-ccflags-$(CONFIG_IPU_BRIDGE) += \ -DCONFIG_IPU_BRIDGE subdir-ccflags-$(CONFIG_IPU_SINGLE_BE_SOC_DEVICE) += \ -DCONFIG_IPU_SINGLE_BE_SOC_DEVICE subdir-ccflags-$(CONFIG_INTEL_SKL_INT3472) += \ -DCONFIG_INTEL_SKL_INT3472 subdir-ccflags-$(CONFIG_POWER_CTRL_LOGIC) += \ -DCONFIG_POWER_CTRL_LOGIC subdir-ccflags-y += $(subdir-ccflags-m) all: $(MAKE) -C $(KERNEL_SRC) M=$(MODSRC) modules modules_install: $(MAKE) INSTALL_MOD_DIR=updates -C $(KERNEL_SRC) M=$(MODSRC) modules_install clean: $(MAKE) -C $(KERNEL_SRC) M=$(MODSRC) clean ipu6-drivers-0~git202411190607.0ad49882/README.md000066400000000000000000000121371471702545500201740ustar00rootroot00000000000000# ipu6-drivers This repository supports MIPI cameras through the IPU6 on Intel Tiger Lake, Alder Lake, Raptor Lake and Meteor Lake platforms. There are 4 repositories that provide the complete setup: - https://github.com/intel/ipu6-drivers - kernel drivers for the IPU and sensors - https://github.com/intel/ipu6-camera-bins - IPU firmware and proprietary image processing libraries - https://github.com/intel/ipu6-camera-hal - HAL for processing of images in userspace - https://github.com/intel/icamerasrc/tree/icamerasrc_slim_api (branch:icamerasrc_slim_api) - Gstreamer src plugin ## Content of this repository: - IPU6 kernel driver - Kernel patches needed - Drivers for HM11B1, OV01A1S, OV01A10, OV02C10, OV02E10, OV2740, HM2170, HM2172 and HI556 sensors ## Dependencies - intel-vsc driver and LJCA USB driver (use https://github.com/intel/ivsc-driver.git for kernel version < 6.6) - intel USB-IO driver (https://github.com/intel/usbio-drivers.git) - INTEL_SKL_INT3472 should be enabled ## Build instructions: Three ways are available: 1. build with kernel source tree 2. build out of kernel source tree 3. and build with dkms ### 1. Build with kernel source tree - Tested with kernel v6.10 - Check out kernel - Apply patches (please check detail comments below): ```sh # For Meteor Lake B stepping only patch/0002-iommu-Add-passthrough-for-MTL-IPU.patch # For v5.15 <= kernel version < v5.17 and using INT3472 patch/int3472-v5.15/*.patch # For v5.17 <= kernel version < v6.1.7 and using INT3472 patch/int3472-v5.17/*.patch # For kernel version >= 6.1.7 and using INT3472 patch/int3472-v6.1.7/*.patch # For kernel version >= 6.3 and using ov13b10 patch/ov13b10-v6.3/*.patch # For kernel version v6.8, # patch/v6.8/0002-media-Add-IPU6-and-supported-sensors-config.patch # will change the related Kconfig & Makefile. patch/v6.8/*.patch # For kernel version v6.10+, # patch//in-tree-build/0001-media-ipu6-Workaround-to-build-PSYS.patch # will change the Makefile to build IPU6 PSYS driver, and # patch//in-tree-build/0002-media-i2c-Add-sensors-config.patch # will change the Makefile & Kconfig for I2C sensor drivers. patch//in-tree-build/*.patch patch//*.patch ``` - Copy IPU6 drivers to kernel source: - For kernel < 6.10, need all IPU6 drivers: ```sh cp -r drivers/media/pci/intel/ /drivers/media/pci/ cp -r include/* /include/ ``` - For kernel >= 6.10, only IPU6 PSYS driver needed: ```sh # Out-Of-Tree IPU6 PSYS driver cp -r drivers/media/pci/intel/ipu6/psys /drivers/media/pci/intel/ipu6/ cp include/uapi/linux/ipu-psys.h /include/uapi/linux/ ``` - Copy I2C sensor drivers to kernel source (depending on your need): ```sh # Remove ipu6-drivers/drivers/media/i2c/{Kconfig,Makefile} # as corresponding files in your kernel was changed by patches before rm drivers/media/i2c/Kconfig drivers/media/i2c/Makefile cp -r drivers/media/i2c /drivers/media/ ``` - Enable the following settings in .config ```conf CONFIG_VIDEO_INTEL_IPU6=m CONFIG_IPU_ISYS_BRIDGE=y # For kernel >= v6.8 please use IPU_BRIDGE instead of IPU_ISYS_BRIDGE CONFIG_IPU_BRIDGE=m CONFIG_VIDEO_OV01A1S=m CONFIG_VIDEO_OV01A10=m CONFIG_VIDEO_HM11B1=m CONFIG_VIDEO_OV02C10=m CONFIG_VIDEO_OV02E10=m CONFIG_VIDEO_HM2170=m CONFIG_VIDEO_HM2172=m # Set this only if you use only 1 camera and don't want too many device nodes in media-ctl # CONFIG_IPU_SINGLE_BE_SOC_DEVICE=y # If your kernel < 5.15 or not set CONFIG_INTEL_SKL_INT3472, please set this # CONFIG_POWER_CTRL_LOGIC=m ``` - LJCA and CVF part as below, please check details at https://github.com/intel/ivsc-driver/blob/main/README.md. ```conf CONFIG_MFD_LJCA=m CONFIG_I2C_LJCA=m CONFIG_SPI_LJCA=m CONFIG_GPIO_LJCA=m CONFIG_USB_LJCA=m CONFIG_INTEL_MEI_VSC=m CONFIG_INTEL_MEI_VSC_HW=m CONFIG_INTEL_VSC=m CONFIG_INTEL_VSC_CSI=m CONFIG_INTEL_VSC_ACE=m CONFIG_INTEL_VSC_PSE=m CONFIG_INTEL_VSC_ACE_DEBUG=m ``` ### 2. Build outside kernel source tree - Requires kernel header installed on build machine - For kernel >= v6.10, need to patch this repo by ipu6-drivers/patches/*.patch (which can be automatically applied if you use DKMS build). - For kernel >= v6.10, need to patch your kernel by patch/v6.10/*.patch. - For kernel >= v6.8, still need to patch kernel by patch/v6.8/0004 & 0005 to make upstream iVSC driver work correctly. For kernel <= v6.6, requires iVSC out-of-tree driver be built together. - To prepare out-of-tree iVSC driver under kernel <= v6.6: ```sh cd ipu6-drivers git clone https://github.com/intel/ivsc-driver.git cp -r ivsc-driver/backport-include ivsc-driver/drivers ivsc-driver/include . rm -rf ivsc-driver ``` - To build and install: ```sh make -j`nproc` && sudo make modules_install && sudo depmod -a ``` ### 3. Build with dkms - Prepare out-of-tree iVSC driver under kernel <= v6.6: ```sh cd ipu6-drivers git clone https://github.com/intel/ivsc-driver.git cp -r ivsc-driver/backport-include ivsc-driver/drivers ivsc-driver/include . rm -rf ivsc-driver ``` - Register, build and auto install: ```sh sudo dkms add . sudo dkms autoinstall ipu6-drivers/0.0.0 ``` ipu6-drivers-0~git202411190607.0ad49882/SECURITY.md000066400000000000000000000006251471702545500205050ustar00rootroot00000000000000# Security Policy Intel is committed to rapidly addressing security vulnerabilities affecting our customers and providing clear guidance on the solution, impact, severity and mitigation. ## Reporting a Vulnerability Please report any security vulnerabilities in this project utilizing the guidelines [here](https://www.intel.com/content/www/us/en/security-center/vulnerability-handling-guidelines.html). ipu6-drivers-0~git202411190607.0ad49882/dkms.conf000066400000000000000000000067171471702545500205310ustar00rootroot00000000000000PACKAGE_NAME=ipu6-drivers PACKAGE_VERSION=0.0.0 MAKE="make KERNELRELEASE=$kernelver KERNEL_SRC=$kernel_source_dir" CLEAN="make KERNELRELEASE=$kernelver KERNEL_SRC=$kernel_source_dir clean" AUTOINSTALL="yes" BUILD_EXCLUSIVE_CONFIG="CONFIG_VIDEO_V4L2_I2C" version_lt() { IFS='.' read -r i j k <<< "$1" IFS='.' read -r a b c <<< "$2" i=${i:-0} j=${j:-0} k=${k:-0} a=${a:-0} b=${b:-0} c=${c:-0} if [ "$i" -lt "$a" ]; then return 0 elif [ "$i" -eq "$a" ]; then if [ "$j" -lt "$b" ]; then return 0 elif [ "$j" -eq "$b" ]; then if [ "$k" -lt "$c" ]; then return 0 fi fi fi return 1 } KERNEL_VERSION=$(echo ${kernelver} | sed 's/[^0-9.]*\([0-9.]*\).*/\1/') KV_IVSC=6.6.0 KV_OV2740=6.8.0 KV_OV05C10=6.8.0 KV_IPU6_ISYS=6.10.0 BUILT_MODULE_NAME[0]="hm11b1" BUILT_MODULE_LOCATION[0]="drivers/media/i2c" DEST_MODULE_LOCATION[0]="/updates" BUILT_MODULE_NAME[1]="ov01a1s" BUILT_MODULE_LOCATION[1]="drivers/media/i2c" DEST_MODULE_LOCATION[1]="/updates" BUILT_MODULE_NAME[2]="ov01a10" BUILT_MODULE_LOCATION[2]="drivers/media/i2c" DEST_MODULE_LOCATION[2]="/updates" BUILT_MODULE_NAME[3]="ov02c10" BUILT_MODULE_LOCATION[3]="drivers/media/i2c" DEST_MODULE_LOCATION[3]="/updates" BUILT_MODULE_NAME[4]="ov02e10" BUILT_MODULE_LOCATION[4]="drivers/media/i2c" DEST_MODULE_LOCATION[4]="/updates" BUILT_MODULE_NAME[5]="hm2170" BUILT_MODULE_LOCATION[5]="drivers/media/i2c" DEST_MODULE_LOCATION[5]="/updates" BUILT_MODULE_NAME[6]="hm2172" BUILT_MODULE_LOCATION[6]="drivers/media/i2c" DEST_MODULE_LOCATION[6]="/updates" BUILT_MODULE_NAME[7]="hi556" BUILT_MODULE_LOCATION[7]="drivers/media/i2c" DEST_MODULE_LOCATION[7]="/updates" if ! version_lt ${KERNEL_VERSION} ${KV_OV05C10}; then BUILD_EXCLUSIVE_CONFIG="CONFIG_VIDEO_V4L2_I2C CONFIG_V4L2_CCI_I2C" BUILT_MODULE_NAME[8]="ov05c10" BUILT_MODULE_LOCATION[8]="drivers/media/i2c" DEST_MODULE_LOCATION[8]="/updates" fi if version_lt ${KERNEL_VERSION} ${KV_OV2740}; then BUILT_MODULE_NAME[8]="ov2740" BUILT_MODULE_LOCATION[8]="drivers/media/i2c" DEST_MODULE_LOCATION[8]="/updates" fi if ! version_lt ${KERNEL_VERSION} ${KV_IPU6_ISYS}; then PATCH[0]="0001-v6.10-IPU6-headers-used-by-PSYS.patch" fi BUILT_MODULE_NAME[9]="intel-ipu6-psys" BUILT_MODULE_LOCATION[9]="drivers/media/pci/intel/ipu6/psys" DEST_MODULE_LOCATION[9]="/updates" if version_lt ${KERNEL_VERSION} ${KV_IPU6_ISYS}; then BUILT_MODULE_NAME[10]="intel-ipu6" BUILT_MODULE_LOCATION[10]="drivers/media/pci/intel/ipu6" DEST_MODULE_LOCATION[10]="/updates" BUILT_MODULE_NAME[11]="intel-ipu6-isys" BUILT_MODULE_LOCATION[11]="drivers/media/pci/intel/ipu6" DEST_MODULE_LOCATION[11]="/updates" fi if version_lt ${KERNEL_VERSION} ${KV_IVSC}; then BUILT_MODULE_NAME[12]="ljca" DEST_MODULE_LOCATION[12]="/updates" BUILT_MODULE_NAME[13]="spi-ljca" DEST_MODULE_LOCATION[13]="/updates" BUILT_MODULE_NAME[14]="gpio-ljca" DEST_MODULE_LOCATION[14]="/updates" BUILT_MODULE_NAME[15]="i2c-ljca" DEST_MODULE_LOCATION[15]="/updates" BUILT_MODULE_NAME[16]="mei-vsc" DEST_MODULE_LOCATION[16]="/updates" BUILT_MODULE_NAME[17]="intel_vsc" DEST_MODULE_LOCATION[17]="/updates" BUILT_MODULE_NAME[18]="mei_csi" DEST_MODULE_LOCATION[18]="/updates" BUILT_MODULE_NAME[19]="mei_ace" DEST_MODULE_LOCATION[19]="/updates" BUILT_MODULE_NAME[20]="mei_pse" DEST_MODULE_LOCATION[20]="/updates" BUILT_MODULE_NAME[21]="mei_ace_debug" DEST_MODULE_LOCATION[21]="/updates" fi ipu6-drivers-0~git202411190607.0ad49882/drivers/000077500000000000000000000000001471702545500203675ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/drivers/media/000077500000000000000000000000001471702545500214465ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/drivers/media/i2c/000077500000000000000000000000001471702545500221235ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/drivers/media/i2c/Makefile000066400000000000000000000010661471702545500235660ustar00rootroot00000000000000# SPDX-License-Identifier: GPL-2.0 # Copyright (c) 2021 Intel Corporation. obj-$(CONFIG_VIDEO_HM11B1) += hm11b1.o obj-$(CONFIG_VIDEO_GC5035) += gc5035.o obj-$(CONFIG_VIDEO_OV01A1S) += ov01a1s.o obj-$(CONFIG_VIDEO_OV01A10) += ov01a10.o obj-$(CONFIG_VIDEO_OV02C10) += ov02c10.o obj-$(CONFIG_VIDEO_OV02E10) += ov02e10.o obj-$(CONFIG_VIDEO_OV05C10) += ov05c10.o obj-$(CONFIG_VIDEO_OV2740) += ov2740.o obj-$(CONFIG_VIDEO_HM2170) += hm2170.o obj-$(CONFIG_VIDEO_HM2170) += hm2172.o obj-$(CONFIG_VIDEO_HI556) += hi556.o obj-$(CONFIG_POWER_CTRL_LOGIC) += power_ctrl_logic.o ipu6-drivers-0~git202411190607.0ad49882/drivers/media/i2c/gc5035.c000066400000000000000000001347231471702545500232070ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2020 Bitland Inc. // Copyright 2020 Google LLC.. // Copyright (c) 2022 Intel Corporation. #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* External clock frequency supported by the driver */ #define GC5035_MCLK_RATE 24000000UL /* Number of lanes supported by this driver */ #define GC5035_DATA_LANES 2 /* Bits per sample of sensor output */ #define GC5035_BITS_PER_SAMPLE 10 #define MIPI_FREQ 438000000LL /* pixel rate = link frequency * 2 * lanes / BITS_PER_SAMPLE */ #define GC5035_PIXEL_RATE (MIPI_FREQ * 2LL * 2LL / 10) /* System registers (accessible regardless of the page. */ /* Chip ID */ #define GC5035_REG_CHIP_ID_H 0xf0 #define GC5035_REG_CHIP_ID_L 0xf1 #define GC5035_CHIP_ID 0x5035 #define GC5035_ID(_msb, _lsb) ((_msb) << 8 | (_lsb)) /* Register page selection register */ #define GC5035_PAGE_REG 0xfe /* Page 0 registers */ /* Exposure control */ #define GC5035_REG_EXPOSURE_H 0x03 #define GC5035_REG_EXPOSURE_L 0x04 #define GC5035_EXPOSURE_H_MASK 0x3f #define GC5035_EXPOSURE_MIN 4 #define GC5035_EXPOSURE_STEP 1 /* Analog gain control */ #define GC5035_REG_ANALOG_GAIN 0xb6 #define GC5035_ANALOG_GAIN_MIN 256 #define GC5035_ANALOG_GAIN_MAX (16 * GC5035_ANALOG_GAIN_MIN) #define GC5035_ANALOG_GAIN_STEP 1 #define GC5035_ANALOG_GAIN_DEFAULT GC5035_ANALOG_GAIN_MIN /* Digital gain control */ #define GC5035_REG_DIGI_GAIN_H 0xb1 #define GC5035_REG_DIGI_GAIN_L 0xb2 #define GC5035_DGAIN_H_MASK 0x0f #define GC5035_DGAIN_L_MASK 0xfc #define GC5035_DGAIN_L_SHIFT 2 #define GC5035_DIGI_GAIN_MIN 0 #define GC5035_DIGI_GAIN_MAX 1023 #define GC5035_DIGI_GAIN_STEP 1 #define GC5035_DIGI_GAIN_DEFAULT GC5035_DIGI_GAIN_MAX /* Vblank control */ #define GC5035_REG_VTS_H 0x41 #define GC5035_REG_VTS_L 0x42 #define GC5035_VTS_H_MASK 0x3f #define GC5035_VTS_MAX 16383 #define GC5035_EXPOSURE_MARGIN 16 #define GC5035_REG_CTRL_MODE 0x3e #define GC5035_MODE_SW_STANDBY 0x01 #define GC5035_MODE_STREAMING 0x91 /* Page 1 registers */ /* Test pattern control */ #define GC5035_REG_TEST_PATTERN 0x8c #define GC5035_TEST_PATTERN_ENABLE 0x11 #define GC5035_TEST_PATTERN_DISABLE 0x10 /* Page 2 registers */ /* OTP access registers */ #define GC5035_REG_OTP_MODE 0xf3 #define GC5035_OTP_PRE_READ 0x20 #define GC5035_OTP_READ_MODE 0x12 #define GC5035_OTP_READ_DONE 0x00 #define GC5035_REG_OTP_DATA 0x6c #define GC5035_REG_OTP_ACCESS_ADDR_H 0x69 #define GC5035_REG_OTP_ACCESS_ADDR_L 0x6a #define GC5035_OTP_ACCESS_ADDR_H_MASK 0x1f #define GC5035_OTP_ADDR_MASK 0x1fff #define GC5035_OTP_ADDR_SHIFT 3 #define GC5035_REG_DD_TOTALNUM_H 0x01 #define GC5035_REG_DD_TOTALNUM_L 0x02 #define GC5035_DD_TOTALNUM_H_MASK 0x07 #define GC5035_REG_DD_LOAD_STATUS 0x06 #define GC5035_OTP_BIT_LOAD BIT(0) /* OTP-related definitions */ #define GC5035_OTP_ID_SIZE 9 #define GC5035_OTP_ID_DATA_OFFSET 0x0020 #define GC5035_OTP_DATA_LENGTH 1024 /* OTP DPC parameters */ #define GC5035_OTP_DPC_FLAG_OFFSET 0x0068 #define GC5035_OTP_DPC_FLAG_MASK 0x03 #define GC5035_OTP_FLAG_EMPTY 0x00 #define GC5035_OTP_FLAG_VALID 0x01 #define GC5035_OTP_DPC_TOTAL_NUMBER_OFFSET 0x0070 #define GC5035_OTP_DPC_ERROR_NUMBER_OFFSET 0x0078 /* OTP register parameters */ #define GC5035_OTP_REG_FLAG_OFFSET 0x0880 #define GC5035_OTP_REG_DATA_OFFSET 0x0888 #define GC5035_OTP_REG_ADDR_OFFSET 1 #define GC5035_OTP_REG_VAL_OFFSET 2 #define GC5035_OTP_PAGE_FLAG_OFFSET 3 #define GC5035_OTP_PER_PAGE_SIZE 4 #define GC5035_OTP_REG_PAGE_MASK 0x07 #define GC5035_OTP_REG_MAX_GROUP 5 #define GC5035_OTP_REG_BYTE_PER_GROUP 5 #define GC5035_OTP_REG_PER_GROUP 2 #define GC5035_OTP_REG_BYTE_PER_REG 2 #define GC5035_OTP_REG_DATA_SIZE 25 #define GC5035_OTP_REG_SIZE 10 #define GC5035_DD_DELAY_US (10 * 1000) #define GC5035_DD_TIMEOUT_US (100 * 1000) /* The clock source index in INT3472 CLDB */ #define INT3472_CLDB_CLKSRC_INDEX 14 /* * 82c0d13a-78c5-4244-9bb1-eb8b539a8d11 * This _DSM GUID calls CLKC and CLKF. */ static const guid_t clock_ctrl_guid = GUID_INIT(0x82c0d13a, 0x78c5, 0x4244, 0x9b, 0xb1, 0xeb, 0x8b, 0x53, 0x9a, 0x8d, 0x11); static const char * const gc5035_supplies[] = { /* * Requested separately due to power sequencing needs: * "iovdd", * Power supply for I/O circuits * */ "dvdd12", /* Digital core power */ "avdd21", /* Analog power */ }; struct gc5035_regval { u8 addr; u8 val; }; struct gc5035_reg { u8 page; struct gc5035_regval regval; }; struct gc5035_otp_regs { unsigned int num_regs; struct gc5035_reg regs[GC5035_OTP_REG_SIZE]; }; struct gc5035_dpc { bool valid; unsigned int total_num; }; struct gc5035_mode { u32 width; u32 height; u32 max_fps; u32 hts_def; u32 vts_def; u32 exp_def; const struct gc5035_regval *reg_list; size_t num_regs; }; struct gc5035_power_ctrl { /* Control Logic ACPI device */ struct acpi_device *ctrl_logic; /* GPIO for reset */ struct gpio_desc *reset_gpio; /* GPIO for power enable */ struct gpio_desc *pwren_gpio; /* GPIO for privacy LED */ struct gpio_desc *pled_gpio; int status; /* Clock source index */ u8 clk_source_index; }; struct gc5035 { struct i2c_client *client; struct clk *mclk; unsigned long mclk_rate; //struct gpio_desc *resetb_gpio; ///struct gpio_desc *pwdn_gpio; struct regulator *iovdd_supply; struct regulator_bulk_data supplies[ARRAY_SIZE(gc5035_supplies)]; struct v4l2_subdev subdev; struct media_pad pad; struct v4l2_ctrl_handler ctrl_handler; struct v4l2_ctrl *exposure; struct v4l2_ctrl *hblank; struct v4l2_ctrl *vblank; u32 Dgain_ratio; bool otp_read; u8 otp_id[GC5035_OTP_ID_SIZE]; struct gc5035_dpc dpc; struct gc5035_otp_regs otp_regs; /* * Serialize control access, get/set format, get selection * and start streaming. */ struct mutex mutex; struct gc5035_power_ctrl power; bool streaming; const struct gc5035_mode *cur_mode; }; static inline struct gc5035 *to_gc5035(struct v4l2_subdev *sd) { return container_of(sd, struct gc5035, subdev); } static const struct gc5035_regval gc5035_otp_init_regs[] = { {0xfc, 0x01}, {0xf4, 0x40}, {0xf5, 0xe9}, {0xf6, 0x14}, {0xf8, 0x49}, {0xf9, 0x82}, {0xfa, 0x00}, {0xfc, 0x81}, {0xfe, 0x00}, {0x36, 0x01}, {0xd3, 0x87}, {0x36, 0x00}, {0x33, 0x00}, {0xf7, 0x01}, {0xfc, 0x8e}, {0xfe, 0x00}, {0xee, 0x30}, {0xfa, 0x10}, {0xf5, 0xe9}, {0xfe, 0x02}, {0x67, 0xc0}, {0x59, 0x3f}, {0x55, 0x84}, {0x65, 0x80}, {0x66, 0x03}, {0xfe, 0x00}, }; static const struct gc5035_regval gc5035_otp_exit_regs[] = { {0xfe, 0x02}, {0x67, 0x00}, {0xfe, 0x00}, {0xfa, 0x00}, }; static const struct gc5035_regval gc5035_dd_auto_load_regs[] = { {0xfe, 0x02}, {0xbe, 0x00}, {0xa9, 0x01}, {0x09, 0x33}, }; static const struct gc5035_regval gc5035_otp_dd_regs[] = { {0x03, 0x00}, {0x04, 0x80}, {0x95, 0x0a}, {0x96, 0x30}, {0x97, 0x0a}, {0x98, 0x32}, {0x99, 0x07}, {0x9a, 0xa9}, {0xf3, 0x80}, }; static const struct gc5035_regval gc5035_otp_dd_enable_regs[] = { {0xbe, 0x01}, {0x09, 0x00}, {0xfe, 0x01}, {0x80, 0x02}, {0xfe, 0x00}, }; /* * Xclk 24Mhz * Pclk 87.6Mhz * grabwindow_width 2592 * grabwindow_height 1944 * max_framerate 30fps * mipi_datarate per lane 876Mbps */ static const struct gc5035_regval gc5035_global_regs[] = { /*init*/ {0xfc, 0x01}, {0xf4, 0x40}, {0xf5, 0xe9}, {0xf6, 0x14}, {0xf8, 0x49}, {0xf9, 0x82}, {0xfa, 0x00}, {0xfc, 0x81}, {0xfe, 0x00}, {0x36, 0x01}, {0xd3, 0x87}, {0x36, 0x00}, {0x33, 0x00}, {0xfe, 0x03}, {0x01, 0xe7}, {0xf7, 0x01}, {0xfc, 0x8f}, {0xfc, 0x8f}, {0xfc, 0x8e}, {0xfe, 0x00}, {0xee, 0x30}, {0x87, 0x18}, {0xfe, 0x01}, {0x8c, 0x90}, {0xfe, 0x00}, /* Analog & CISCTL */ {0xfe, 0x00}, {0x05, 0x02}, {0x06, 0xda}, {0x9d, 0x0c}, {0x09, 0x00}, {0x0a, 0x04}, {0x0b, 0x00}, {0x0c, 0x03}, {0x0d, 0x07}, {0x0e, 0xa8}, {0x0f, 0x0a}, {0x10, 0x30}, {0x11, 0x02}, {0x17, 0x80}, {0x19, 0x05}, {0xfe, 0x02}, {0x30, 0x03}, {0x31, 0x03}, {0xfe, 0x00}, {0xd9, 0xc0}, {0x1b, 0x20}, {0x21, 0x48}, {0x28, 0x22}, {0x29, 0x58}, {0x44, 0x20}, {0x4b, 0x10}, {0x4e, 0x1a}, {0x50, 0x11}, {0x52, 0x33}, {0x53, 0x44}, {0x55, 0x10}, {0x5b, 0x11}, {0xc5, 0x02}, {0x8c, 0x1a}, {0xfe, 0x02}, {0x33, 0x05}, {0x32, 0x38}, {0xfe, 0x00}, {0x91, 0x80}, {0x92, 0x28}, {0x93, 0x20}, {0x95, 0xa0}, {0x96, 0xe0}, {0xd5, 0xfc}, {0x97, 0x28}, {0x16, 0x0c}, {0x1a, 0x1a}, {0x1f, 0x11}, {0x20, 0x10}, {0x46, 0x83}, {0x4a, 0x04}, {0x54, 0x02}, {0x62, 0x00}, {0x72, 0x8f}, {0x73, 0x89}, {0x7a, 0x05}, {0x7d, 0xcc}, {0x90, 0x00}, {0xce, 0x18}, {0xd0, 0xb2}, {0xd2, 0x40}, {0xe6, 0xe0}, {0xfe, 0x02}, {0x12, 0x01}, {0x13, 0x01}, {0x14, 0x01}, {0x15, 0x02}, {0x22, 0x7c}, {0x91, 0x00}, {0x92, 0x00}, {0x93, 0x00}, {0x94, 0x00}, {0xfe, 0x00}, {0xfc, 0x88}, {0xfe, 0x10}, {0xfe, 0x00}, {0xfc, 0x8e}, {0xfe, 0x00}, {0xfe, 0x00}, {0xfe, 0x00}, {0xfc, 0x88}, {0xfe, 0x10}, {0xfe, 0x00}, {0xfc, 0x8e}, /* Gain */ {0xfe, 0x00}, {0xb0, 0x6e}, {0xb1, 0x01}, {0xb2, 0x00}, {0xb3, 0x00}, {0xb4, 0x00}, {0xb6, 0x00}, /* ISP */ {0xfe, 0x01}, {0x53, 0x00}, {0x89, 0x03}, {0x60, 0x40}, /* BLK */ {0xfe, 0x01}, {0x42, 0x21}, {0x49, 0x03}, {0x4a, 0xff}, {0x4b, 0xc0}, {0x55, 0x00}, /* Anti_blooming */ {0xfe, 0x01}, {0x41, 0x28}, {0x4c, 0x00}, {0x4d, 0x00}, {0x4e, 0x3c}, {0x44, 0x08}, {0x48, 0x02}, /* Crop */ {0xfe, 0x01}, {0x91, 0x00}, {0x92, 0x08}, {0x93, 0x00}, {0x94, 0x07}, {0x95, 0x07}, {0x96, 0x98}, {0x97, 0x0a}, {0x98, 0x20}, {0x99, 0x00}, /* MIPI */ {0xfe, 0x03}, {0x02, 0x57}, {0x03, 0xb7}, {0x15, 0x14}, {0x18, 0x0f}, {0x21, 0x22}, {0x22, 0x06}, {0x23, 0x48}, {0x24, 0x12}, {0x25, 0x28}, {0x26, 0x08}, {0x29, 0x06}, {0x2a, 0x58}, {0x2b, 0x08}, {0xfe, 0x01}, {0x8c, 0x10}, {0xfe, 0x00}, {0x3e, 0x01}, }; /* * Xclk 24Mhz * Pclk 87.6Mhz * grabwindow_width 2592 * grabwindow_height 1944 * max_framerate 30fps * mipi_datarate per lane 876Mbps */ static const struct gc5035_regval gc5035_2592x1944_regs[] = { /* System */ {0xfe, 0x00}, {0x3e, 0x01}, {0xfc, 0x01}, {0xf4, 0x40}, {0xf5, 0xe9}, {0xf6, 0x14}, {0xf8, 0x58}, {0xf9, 0x82}, {0xfa, 0x00}, {0xfc, 0x81}, {0xfe, 0x00}, {0x36, 0x01}, {0xd3, 0x87}, {0x36, 0x00}, {0x33, 0x00}, {0xfe, 0x03}, {0x01, 0xe7}, {0xf7, 0x01}, {0xfc, 0x8f}, {0xfc, 0x8f}, {0xfc, 0x8e}, {0xfe, 0x00}, {0xee, 0x30}, {0x87, 0x18}, {0xfe, 0x01}, {0x8c, 0x90}, {0xfe, 0x00}, /*Analog & CISCTL*/ {0x03, 0x03},//0x06 {0x04, 0xd8}, {0x41, 0x07},//08 {0x42, 0xd8},//58 {0x05, 0x02}, {0x06, 0xda}, {0x9d, 0x18}, {0x09, 0x00}, {0x0a, 0x04}, {0x0b, 0x00}, {0x0c, 0x03}, {0x0d, 0x07}, {0x0e, 0xa8}, {0x0f, 0x0a}, {0x10, 0x30}, {0x11, 0x02}, {0x17, 0x80}, {0x19, 0x05}, {0xfe, 0x02}, {0x30, 0x03}, {0x31, 0x03}, {0xfe, 0x00}, {0xd9, 0xc0}, {0x1b, 0x20}, {0x21, 0x40}, {0x28, 0x22}, {0x29, 0x56}, {0x44, 0x20}, {0x4b, 0x10}, {0x4e, 0x1a}, {0x50, 0x11}, {0x52, 0x33}, {0x53, 0x44}, {0x55, 0x10}, {0x5b, 0x11}, {0xc5, 0x02}, {0x8c, 0x1a}, {0xfe, 0x02}, {0x33, 0x05}, {0x32, 0x38}, {0xfe, 0x00}, {0x91, 0x80}, {0x92, 0x28}, {0x93, 0x20}, {0x95, 0xa0}, {0x96, 0xe0}, {0xd5, 0xfc}, {0x97, 0x28}, {0x16, 0x0c}, {0x1a, 0x1a}, {0x1f, 0x11}, {0x20, 0x10}, {0x46, 0x83}, {0x4a, 0x04}, {0x54, 0x02}, {0x62, 0x00}, {0x72, 0x8f}, {0x73, 0x89}, {0x7a, 0x05}, {0x7d, 0xcc}, {0x90, 0x00}, {0xce, 0x18}, {0xd0, 0xb2}, {0xd2, 0x40}, {0xe6, 0xe0}, {0xfe, 0x02}, {0x12, 0x01}, {0x13, 0x01}, {0x14, 0x01}, {0x15, 0x02}, {0x22, 0x7c}, {0xfe, 0x00}, {0xfc, 0x88}, {0xfe, 0x10}, {0xfe, 0x00}, {0xfc, 0x8e}, {0xfe, 0x00}, {0xfe, 0x00}, {0xfe, 0x00}, {0xfc, 0x88}, {0xfe, 0x10}, {0xfe, 0x00}, {0xfc, 0x8e}, /*GAIN*/ {0xfe, 0x00}, {0xb0, 0x6e}, {0xb1, 0x01}, {0xb2, 0x00}, {0xb3, 0x00}, {0xb4, 0x00}, {0xb6, 0x00}, /*ISP*/ {0xfe, 0x01}, {0x53, 0x00}, {0x89, 0x03}, {0x60, 0x40}, /*BLK*/ {0xfe, 0x01}, {0x42, 0x21}, {0x49, 0x03}, {0x4a, 0xff}, {0x4b, 0xc0}, {0x55, 0x00}, /*anti_blooming*/ {0xfe, 0x01}, {0x41, 0x28}, {0x4c, 0x00}, {0x4d, 0x00}, {0x4e, 0x3c}, {0x44, 0x08}, {0x48, 0x02}, /*CROP*/ {0xfe, 0x01}, {0x91, 0x00}, {0x92, 0x08}, {0x93, 0x00}, {0x94, 0x08}, {0x95, 0x07}, {0x96, 0x98}, {0x97, 0x0a}, {0x98, 0x20}, {0x99, 0x00}, /*MIPI*/ {0xfe, 0x03}, {0x02, 0x57}, {0x03, 0xb7}, {0x15, 0x14}, {0x18, 0x0f}, {0x21, 0x22}, {0x22, 0x06}, {0x23, 0x48}, {0x24, 0x12}, {0x25, 0x28}, {0x26, 0x08}, {0x29, 0x06}, {0x2a, 0x58}, {0x2b, 0x08}, {0xfe, 0x01}, {0x8c, 0x10}, {0xfe, 0x00}, {0x3e, 0x01}, }; /* * Xclk 24Mhz * Pclk 87.6Mhz * grabwindow_width 1296 * grabwindow_height 972 * mipi_datarate per lane 876Mbps */ static const struct gc5035_regval gc5035_1296x972_regs[] = { /*NULL*/ {0xfe, 0x00}, {0x3e, 0x01}, {0xfc, 0x01}, {0xf4, 0x40}, {0xf5, 0xe4}, {0xf6, 0x14}, {0xf8, 0x49}, {0xf9, 0x12}, {0xfa, 0x01}, {0xfc, 0x81}, {0xfe, 0x00}, {0x36, 0x01}, {0xd3, 0x87}, {0x36, 0x00}, {0x33, 0x20}, {0xfe, 0x03}, {0x01, 0x87}, {0xf7, 0x11}, {0xfc, 0x8f}, {0xfc, 0x8f}, {0xfc, 0x8e}, {0xfe, 0x00}, {0xee, 0x30}, {0x87, 0x18}, {0xfe, 0x01}, {0x8c, 0x90}, {0xfe, 0x00}, /* Analog & CISCTL */ {0xfe, 0x00}, {0x05, 0x02}, {0x06, 0xda}, {0x9d, 0x0c}, {0x09, 0x00}, {0x0a, 0x04}, {0x0b, 0x00}, {0x0c, 0x03}, {0x0d, 0x07}, {0x0e, 0xa8}, {0x0f, 0x0a}, {0x10, 0x30}, {0x21, 0x60}, {0x29, 0x30}, {0x44, 0x18}, {0x4e, 0x20}, {0x8c, 0x20}, {0x91, 0x15}, {0x92, 0x3a}, {0x93, 0x20}, {0x95, 0x45}, {0x96, 0x35}, {0xd5, 0xf0}, {0x97, 0x20}, {0x1f, 0x19}, {0xce, 0x18}, {0xd0, 0xb3}, {0xfe, 0x02}, {0x14, 0x02}, {0x15, 0x00}, {0xfe, 0x00}, {0xfc, 0x88}, {0xfe, 0x10}, {0xfe, 0x00}, {0xfc, 0x8e}, {0xfe, 0x00}, {0xfe, 0x00}, {0xfe, 0x00}, {0xfc, 0x88}, {0xfe, 0x10}, {0xfe, 0x00}, {0xfc, 0x8e}, /* BLK */ {0xfe, 0x01}, {0x49, 0x00}, {0x4a, 0x01}, {0x4b, 0xf8}, /* Anti_blooming */ {0xfe, 0x01}, {0x4e, 0x06}, {0x44, 0x02}, /* Crop */ {0xfe, 0x01}, {0x91, 0x00}, {0x92, 0x04}, {0x93, 0x00}, {0x94, 0x03}, {0x95, 0x03}, {0x96, 0xcc}, {0x97, 0x05}, {0x98, 0x10}, {0x99, 0x00}, /* MIPI */ {0xfe, 0x03}, {0x02, 0x58}, {0x22, 0x03}, {0x26, 0x06}, {0x29, 0x03}, {0x2b, 0x06}, {0xfe, 0x01}, {0x8c, 0x10}, }; /* * Xclk 24Mhz * Pclk 87.6Mhz * linelength 672{0x2a0) * framelength 2232{0x8b8) * grabwindow_width 1280 * grabwindow_height 720 * max_framerate 30fps * mipi_datarate per lane 876Mbps */ static const struct gc5035_regval gc5035_1280x720_regs[] = { /* System */ {0xfe, 0x00}, {0x3e, 0x01}, {0xfc, 0x01}, {0xf4, 0x40}, {0xf5, 0xe4}, {0xf6, 0x14}, {0xf8, 0x49}, {0xf9, 0x12}, {0xfa, 0x01}, {0xfc, 0x81}, {0xfe, 0x00}, {0x36, 0x01}, {0xd3, 0x87}, {0x36, 0x00}, {0x33, 0x20}, {0xfe, 0x03}, {0x01, 0x87}, {0xf7, 0x11}, {0xfc, 0x8f}, {0xfc, 0x8f}, {0xfc, 0x8e}, {0xfe, 0x00}, {0xee, 0x30}, {0x87, 0x18}, {0xfe, 0x01}, {0x8c, 0x90}, {0xfe, 0x00}, /* Analog & CISCTL */ {0xfe, 0x00}, {0x05, 0x02}, {0x06, 0xda}, {0x9d, 0x0c}, {0x09, 0x00}, {0x0a, 0x04}, {0x0b, 0x00}, {0x0c, 0x03}, {0x0d, 0x07}, {0x0e, 0xa8}, {0x0f, 0x0a}, {0x10, 0x30}, {0x21, 0x60}, {0x29, 0x30}, {0x44, 0x18}, {0x4e, 0x20}, {0x8c, 0x20}, {0x91, 0x15}, {0x92, 0x3a}, {0x93, 0x20}, {0x95, 0x45}, {0x96, 0x35}, {0xd5, 0xf0}, {0x97, 0x20}, {0x1f, 0x19}, {0xce, 0x18}, {0xd0, 0xb3}, {0xfe, 0x02}, {0x14, 0x02}, {0x15, 0x00}, {0xfe, 0x00}, {0xfc, 0x88}, {0xfe, 0x10}, {0xfe, 0x00}, {0xfc, 0x8e}, {0xfe, 0x00}, {0xfe, 0x00}, {0xfe, 0x00}, {0xfc, 0x88}, {0xfe, 0x10}, {0xfe, 0x00}, {0xfc, 0x8e}, /* BLK */ {0xfe, 0x01}, {0x49, 0x00}, {0x4a, 0x01}, {0x4b, 0xf8}, /* Anti_blooming */ {0xfe, 0x01}, {0x4e, 0x06}, {0x44, 0x02}, /* Crop */ {0xfe, 0x01}, {0x91, 0x00}, {0x92, 0x0a}, {0x93, 0x00}, {0x94, 0x0b}, {0x95, 0x02}, {0x96, 0xd0}, {0x97, 0x05}, {0x98, 0x00}, {0x99, 0x00}, /* MIPI */ {0xfe, 0x03}, {0x02, 0x58}, {0x22, 0x03}, {0x26, 0x06}, {0x29, 0x03}, {0x2b, 0x06}, {0xfe, 0x01}, {0x8c, 0x10}, {0xfe, 0x00}, {0x3e, 0x91}, }; static const struct gc5035_mode gc5035_modes[] = { { .width = 2592, .height = 1944, .max_fps = 30, .exp_def = 0x258, .hts_def = 2920, .vts_def = 2008, .reg_list = gc5035_2592x1944_regs, .num_regs = ARRAY_SIZE(gc5035_2592x1944_regs), }, { .width = 1296, .height = 972, .max_fps = 30, .exp_def = 0x258, .hts_def = 1460, .vts_def = 2008, .reg_list = gc5035_1296x972_regs, .num_regs = ARRAY_SIZE(gc5035_1296x972_regs), }, { .width = 1280, .height = 720, .max_fps = 60, .exp_def = 0x258, .hts_def = 1896, .vts_def = 1536, .reg_list = gc5035_1280x720_regs, .num_regs = ARRAY_SIZE(gc5035_1280x720_regs), }, }; static const char * const gc5035_test_pattern_menu[] = { "Disabled", "Color Bar", }; static const s64 gc5035_link_freqs[] = { 438000000, }; static u64 __maybe_unused gc5035_link_to_pixel_rate(u32 f_index) { u64 pixel_rate = gc5035_link_freqs[f_index] * 2 * GC5035_DATA_LANES; do_div(pixel_rate, GC5035_BITS_PER_SAMPLE); return pixel_rate; } static struct gpio_desc* gc5035_get_gpio(struct gc5035 *gc5035, const char* name) { struct device *dev = &gc5035->client->dev; struct gpio_desc* gpio; int ret; gpio = devm_gpiod_get(dev, name, GPIOD_OUT_HIGH); ret = PTR_ERR_OR_ZERO(gpio); if (ret < 0) { gpio = NULL; dev_warn(dev, "failed to get %s gpio: %d\n", name, ret); } return gpio; } static void gc5035_init_power_ctrl(struct gc5035 *gc5035) { struct gc5035_power_ctrl* power = &gc5035->power; acpi_handle handle = ACPI_HANDLE(&gc5035->client->dev); struct acpi_handle_list dep_devices; acpi_status status; int i = 0; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; power->ctrl_logic = NULL; if (!acpi_has_method(handle, "_DEP")) return; /* Call _DEP method of OV13B */ status = acpi_evaluate_reference(handle, "_DEP", NULL, &dep_devices); if (ACPI_FAILURE(status)) { acpi_handle_debug(handle, "Failed to evaluate _DEP.\n"); return; } /* Find INT3472 in _DEP */ for (i = 0; i < dep_devices.count; i++) { struct acpi_device *dep_device = NULL; const char* dep_hid = NULL; if (dep_devices.handles[i]) { #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0) acpi_bus_get_device(dep_devices.handles[i], &dep_device); #else dep_device = acpi_fetch_acpi_dev(dep_devices.handles[i]); #endif } if (dep_device) dep_hid = acpi_device_hid(dep_device); if (dep_hid && strcmp("INT3472", dep_hid) == 0) { power->ctrl_logic = dep_device; break; } } /* If we got INT3472 acpi device, read which clock source we'll use */ if (power->ctrl_logic == NULL) return; status = acpi_evaluate_object(power->ctrl_logic->handle, "CLDB", NULL, &buffer); if (ACPI_FAILURE(status)) { dev_warn(&gc5035->client->dev, "Read INT3472 CLDB failed"); return; } obj = buffer.pointer; if (!obj) dev_warn(&gc5035->client->dev, "INT3472 CLDB return NULL"); if (obj->type != ACPI_TYPE_BUFFER) { acpi_handle_err(power->ctrl_logic->handle, "CLDB object is not an ACPI buffer\n"); kfree(obj); return; } if (obj->buffer.length < INT3472_CLDB_CLKSRC_INDEX + 1) { acpi_handle_err(power->ctrl_logic->handle, "The CLDB buffer size is wrong\n"); kfree(obj); return; } /* Get the clock source index */ gc5035->power.clk_source_index = obj->buffer.pointer[INT3472_CLDB_CLKSRC_INDEX]; kfree(obj); /* Get gpios mapped by INT3472 driver */ power->reset_gpio = gc5035_get_gpio(gc5035, "reset"); power->pwren_gpio = gc5035_get_gpio(gc5035, "pwren"); power->pled_gpio = gc5035_get_gpio(gc5035, "pled"); power->status = 0; } static void gc5035_set_power(struct gc5035 *gc5035, int on) { struct gc5035_power_ctrl* power = &gc5035->power; on = (on ? 1 : 0); if (on == power->status) return; /* First, set reset pin as low */ if (power->reset_gpio) { gpiod_set_value_cansleep(power->reset_gpio, 0); msleep(5); } /* Use _DSM of INT3472 to enable clock */ if (power->ctrl_logic) { u8 clock_args[] = { power->clk_source_index, on, 0x01,}; union acpi_object clock_ctrl_args = { .buffer = { .type = ACPI_TYPE_BUFFER, .length = 3, .pointer = clock_args, }, }; acpi_evaluate_dsm(power->ctrl_logic->handle, &clock_ctrl_guid, 0x00, 0x01, &clock_ctrl_args); } /* Set power pin and privacy LED pin */ if (power->pwren_gpio) gpiod_set_value_cansleep(power->pwren_gpio, on); if (power->pled_gpio) gpiod_set_value_cansleep(power->pled_gpio, on); /* If we need to power on, set reset pin to high at last */ if (on && power->reset_gpio) { gpiod_set_value_cansleep(power->reset_gpio, 1); msleep(5); } power->status = on; } static int gc5035_write_reg(struct gc5035 *gc5035, u8 reg, u8 val) { return i2c_smbus_write_byte_data(gc5035->client, reg, val); } static int gc5035_write_array(struct gc5035 *gc5035, const struct gc5035_regval *regs, size_t num_regs) { unsigned int i; int ret; for (i = 0; i < num_regs; i++) { ret = gc5035_write_reg(gc5035, regs[i].addr, regs[i].val); if (ret) return ret; } return 0; } static int gc5035_read_reg(struct gc5035 *gc5035, u8 reg, u8 *val) { int ret; ret = i2c_smbus_read_byte_data(gc5035->client, reg); if (ret < 0) return ret; *val = (unsigned char)ret; return 0; } static int gc5035_otp_read_data(struct gc5035 *gc5035, u16 bit_addr, u8 *data, size_t length) { size_t i; int ret; if (WARN_ON(bit_addr % 8)) return -EINVAL; if (WARN_ON(bit_addr / 8 + length > GC5035_OTP_DATA_LENGTH)) return -EINVAL; ret = gc5035_write_reg(gc5035, GC5035_PAGE_REG, 2); if (ret) return ret; ret = gc5035_write_reg(gc5035, GC5035_REG_OTP_ACCESS_ADDR_H, (bit_addr >> 8) & GC5035_OTP_ACCESS_ADDR_H_MASK); if (ret) return ret; ret = gc5035_write_reg(gc5035, GC5035_REG_OTP_ACCESS_ADDR_L, bit_addr & 0xff); if (ret) return ret; ret = gc5035_write_reg(gc5035, GC5035_REG_OTP_MODE, GC5035_OTP_PRE_READ); if (ret) goto out_read_done; ret = gc5035_write_reg(gc5035, GC5035_REG_OTP_MODE, GC5035_OTP_READ_MODE); if (ret) goto out_read_done; for (i = 0; i < length; i++) { ret = gc5035_read_reg(gc5035, GC5035_REG_OTP_DATA, &data[i]); if (ret) goto out_read_done; } out_read_done: gc5035_write_reg(gc5035, GC5035_REG_OTP_MODE, GC5035_OTP_READ_DONE); return ret; } static int gc5035_read_otp_regs(struct gc5035 *gc5035) { struct device *dev = &gc5035->client->dev; struct gc5035_otp_regs *otp_regs = &gc5035->otp_regs; u8 regs[GC5035_OTP_REG_DATA_SIZE] = {0}; unsigned int i, j; u8 flag; int ret; ret = gc5035_otp_read_data(gc5035, GC5035_OTP_REG_FLAG_OFFSET, &flag, 1); if (ret) { dev_err(dev, "failed to read otp reg flag\n"); return ret; } dev_dbg(dev, "register update flag = 0x%x\n", flag); gc5035->otp_regs.num_regs = 0; if (flag != GC5035_OTP_FLAG_VALID) return 0; ret = gc5035_otp_read_data(gc5035, GC5035_OTP_REG_DATA_OFFSET, regs, sizeof(regs)); if (ret) { dev_err(dev, "failed to read otp reg data\n"); return ret; } for (i = 0; i < GC5035_OTP_REG_MAX_GROUP; i++) { unsigned int base_group = i * GC5035_OTP_REG_BYTE_PER_GROUP; for (j = 0; j < GC5035_OTP_REG_PER_GROUP; j++) { struct gc5035_reg *reg; if (!(regs[base_group] & BIT((GC5035_OTP_PER_PAGE_SIZE * j + GC5035_OTP_PAGE_FLAG_OFFSET)))) continue; reg = &otp_regs->regs[otp_regs->num_regs++]; reg->page = (regs[base_group] >> (GC5035_OTP_PER_PAGE_SIZE * j)) & GC5035_OTP_REG_PAGE_MASK; reg->regval.addr = regs[base_group + j * GC5035_OTP_REG_BYTE_PER_REG + GC5035_OTP_REG_ADDR_OFFSET]; reg->regval.val = regs[base_group + j * GC5035_OTP_REG_BYTE_PER_REG + GC5035_OTP_REG_VAL_OFFSET]; } } return 0; } static int gc5035_read_dpc(struct gc5035 *gc5035) { struct device *dev = &gc5035->client->dev; struct gc5035_dpc *dpc = &gc5035->dpc; u8 dpc_flag = 0; u8 error_number = 0; u8 total_number = 0; int ret; ret = gc5035_otp_read_data(gc5035, GC5035_OTP_DPC_FLAG_OFFSET, &dpc_flag, 1); if (ret) { dev_err(dev, "failed to read dpc flag\n"); return ret; } dev_dbg(dev, "dpc flag = 0x%x\n", dpc_flag); dpc->valid = false; switch (dpc_flag & GC5035_OTP_DPC_FLAG_MASK) { case GC5035_OTP_FLAG_EMPTY: dev_dbg(dev, "dpc info is empty!!\n"); break; case GC5035_OTP_FLAG_VALID: dev_dbg(dev, "dpc info is valid!\n"); ret = gc5035_otp_read_data(gc5035, GC5035_OTP_DPC_TOTAL_NUMBER_OFFSET, &total_number, 1); if (ret) { dev_err(dev, "failed to read dpc total number\n"); return ret; } ret = gc5035_otp_read_data(gc5035, GC5035_OTP_DPC_ERROR_NUMBER_OFFSET, &error_number, 1); if (ret) { dev_err(dev, "failed to read dpc error number\n"); return ret; } dpc->total_num = total_number + error_number; dpc->valid = true; dev_dbg(dev, "total_num = %d\n", dpc->total_num); break; default: break; } return ret; } static int gc5035_otp_read_sensor_info(struct gc5035 *gc5035) { int ret; ret = gc5035_read_dpc(gc5035); if (ret) return ret; return gc5035_read_otp_regs(gc5035); } static int gc5035_check_dd_load_status(struct gc5035 *gc5035) { u8 status; int ret; ret = gc5035_read_reg(gc5035, GC5035_REG_DD_LOAD_STATUS, &status); if (ret) return ret; if (status & GC5035_OTP_BIT_LOAD) return status; else return 0; } static int gc5035_otp_update_dd(struct gc5035 *gc5035) { struct device *dev = &gc5035->client->dev; struct gc5035_dpc *dpc = &gc5035->dpc; int val, ret; if (!dpc->valid) { dev_dbg(dev, "DPC table invalid, not updating DD.\n"); return 0; } dev_dbg(dev, "DD auto load start\n"); ret = gc5035_write_array(gc5035, gc5035_dd_auto_load_regs, ARRAY_SIZE(gc5035_dd_auto_load_regs)); if (ret) { dev_err(dev, "failed to write dd auto load reg\n"); return ret; } ret = gc5035_write_reg(gc5035, GC5035_REG_DD_TOTALNUM_H, (dpc->total_num >> 8) & GC5035_DD_TOTALNUM_H_MASK); if (ret) return ret; ret = gc5035_write_reg(gc5035, GC5035_REG_DD_TOTALNUM_L, dpc->total_num & 0xff); if (ret) return ret; ret = gc5035_write_array(gc5035, gc5035_otp_dd_regs, ARRAY_SIZE(gc5035_otp_dd_regs)); if (ret) return ret; /* Wait for DD to finish loading automatically */ ret = readx_poll_timeout(gc5035_check_dd_load_status, gc5035, val, val <= 0, GC5035_DD_DELAY_US, GC5035_DD_TIMEOUT_US); if (ret < 0) { dev_err(dev, "DD load timeout\n"); return -EFAULT; } if (val < 0) { dev_err(dev, "DD load failure\n"); return val; } ret = gc5035_write_array(gc5035, gc5035_otp_dd_enable_regs, ARRAY_SIZE(gc5035_otp_dd_enable_regs)); if (ret) return ret; return 0; } static int gc5035_otp_update_regs(struct gc5035 *gc5035) { struct device *dev = &gc5035->client->dev; struct gc5035_otp_regs *otp_regs = &gc5035->otp_regs; unsigned int i; int ret; dev_dbg(dev, "reg count = %d\n", otp_regs->num_regs); for (i = 0; i < otp_regs->num_regs; i++) { ret = gc5035_write_reg(gc5035, GC5035_PAGE_REG, otp_regs->regs[i].page); if (ret) return ret; ret = gc5035_write_reg(gc5035, otp_regs->regs[i].regval.addr, otp_regs->regs[i].regval.val); if (ret) return ret; } return 0; } static int gc5035_otp_update(struct gc5035 *gc5035) { struct device *dev = &gc5035->client->dev; int ret; ret = gc5035_otp_update_dd(gc5035); if (ret) { dev_err(dev, "failed to update otp dd\n"); return ret; } ret = gc5035_otp_update_regs(gc5035); if (ret) { dev_err(dev, "failed to update otp regs\n"); return ret; } return ret; } static int gc5035_set_otp_config(struct gc5035 *gc5035) { struct device *dev = &gc5035->client->dev; u8 otp_id[GC5035_OTP_ID_SIZE]; int ret; ret = gc5035_write_array(gc5035, gc5035_otp_init_regs, ARRAY_SIZE(gc5035_otp_init_regs)); if (ret) { dev_err(dev, "failed to write otp init reg\n"); return ret; } ret = gc5035_otp_read_data(gc5035, GC5035_OTP_ID_DATA_OFFSET, &otp_id[0], GC5035_OTP_ID_SIZE); if (ret) { dev_err(dev, "failed to read otp id\n"); goto out_otp_exit; } if (!gc5035->otp_read || memcmp(gc5035->otp_id, otp_id, sizeof(otp_id))) { dev_dbg(dev, "reading OTP configuration\n"); memset(&gc5035->otp_regs, 0, sizeof(gc5035->otp_regs)); memset(&gc5035->dpc, 0, sizeof(gc5035->dpc)); memcpy(gc5035->otp_id, otp_id, sizeof(gc5035->otp_id)); ret = gc5035_otp_read_sensor_info(gc5035); if (ret < 0) { dev_err(dev, "failed to read otp info\n"); goto out_otp_exit; } gc5035->otp_read = true; } ret = gc5035_otp_update(gc5035); if (ret < 0) return ret; out_otp_exit: ret = gc5035_write_array(gc5035, gc5035_otp_exit_regs, ARRAY_SIZE(gc5035_otp_exit_regs)); if (ret) { dev_err(dev, "failed to write otp exit reg\n"); return ret; } return ret; } static int gc5035_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { struct gc5035 *gc5035 = to_gc5035(sd); const struct gc5035_mode *mode; s64 h_blank, vblank_def; mode = v4l2_find_nearest_size(gc5035_modes, ARRAY_SIZE(gc5035_modes), width, height, fmt->format.width, fmt->format.height); //fmt->format.code = MEDIA_BUS_FMT_SRGGB10_1X10; fmt->format.code = MEDIA_BUS_FMT_SGRBG10_1X10; fmt->format.width = mode->width; fmt->format.height = mode->height; fmt->format.field = V4L2_FIELD_NONE; mutex_lock(&gc5035->mutex); if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format; } else { gc5035->cur_mode = mode; h_blank = mode->hts_def - mode->width; __v4l2_ctrl_modify_range(gc5035->hblank, h_blank, h_blank, 1, h_blank); vblank_def = round_up(mode->vts_def, 4) - mode->height; __v4l2_ctrl_modify_range(gc5035->vblank, vblank_def, GC5035_VTS_MAX - mode->height, 1, vblank_def); } mutex_unlock(&gc5035->mutex); return 0; } static int gc5035_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { struct gc5035 *gc5035 = to_gc5035(sd); const struct gc5035_mode *mode = gc5035->cur_mode; mutex_lock(&gc5035->mutex); if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { fmt->format = *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad); } else { fmt->format.width = mode->width; fmt->format.height = mode->height; //fmt->format.code = MEDIA_BUS_FMT_SRGGB10_1X10; fmt->format.code = MEDIA_BUS_FMT_SGRBG10_1X10; fmt->format.field = V4L2_FIELD_NONE; } mutex_unlock(&gc5035->mutex); return 0; } static int gc5035_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *code) { if (code->index != 0) return -EINVAL; //code->code = MEDIA_BUS_FMT_SRGGB10_1X10; code->code = MEDIA_BUS_FMT_SGRBG10_1X10; return 0; } static int gc5035_enum_frame_sizes(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_frame_size_enum *fse) { if (fse->index >= ARRAY_SIZE(gc5035_modes)) return -EINVAL; //if (fse->code != MEDIA_BUS_FMT_SRGGB10_1X10) if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10) return -EINVAL; fse->min_width = gc5035_modes[fse->index].width; fse->max_width = gc5035_modes[fse->index].width; fse->max_height = gc5035_modes[fse->index].height; fse->min_height = gc5035_modes[fse->index].height; return 0; } static int __gc5035_start_stream(struct gc5035 *gc5035) { int ret; gc5035_set_power(gc5035, 1); ret = gc5035_write_array(gc5035, gc5035_global_regs, ARRAY_SIZE(gc5035_global_regs)); if (ret) return ret; ret = gc5035_set_otp_config(gc5035); if (ret) return ret; ret = gc5035_write_array(gc5035, gc5035->cur_mode->reg_list, gc5035->cur_mode->num_regs); if (ret) return ret; /* In case these controls are set before streaming */ ret = __v4l2_ctrl_handler_setup(&gc5035->ctrl_handler); if (ret) return ret; gc5035_write_reg(gc5035, GC5035_PAGE_REG, 0); if (ret) return ret; return gc5035_write_reg(gc5035, GC5035_REG_CTRL_MODE, GC5035_MODE_STREAMING); } static void __gc5035_stop_stream(struct gc5035 *gc5035) { int ret; struct i2c_client *client = gc5035->client; ret = gc5035_write_reg(gc5035, GC5035_PAGE_REG, 0); if (ret) dev_err(&client->dev, "failed to stop streaming!"); if(gc5035_write_reg(gc5035, GC5035_REG_CTRL_MODE, GC5035_MODE_SW_STANDBY)) dev_err(&client->dev, "failed to stop streaming"); gc5035_set_power(gc5035, 0); } static int gc5035_s_stream(struct v4l2_subdev *sd, int on) { struct gc5035 *gc5035 = to_gc5035(sd); struct i2c_client *client = gc5035->client; int ret = 0; mutex_lock(&gc5035->mutex); on = !!on; if (on == gc5035->streaming) goto unlock_and_return; if (on) { ret = pm_runtime_get_sync(&client->dev); if (ret < 0) { pm_runtime_put_noidle(&client->dev); goto unlock_and_return; } ret = __gc5035_start_stream(gc5035); if (ret) { dev_err(&client->dev, "start stream failed\n"); pm_runtime_put(&client->dev); goto unlock_and_return; } } else { __gc5035_stop_stream(gc5035); pm_runtime_put(&client->dev); } gc5035->streaming = on; unlock_and_return: mutex_unlock(&gc5035->mutex); return ret; } static int gc5035_runtime_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct v4l2_subdev *sd = i2c_get_clientdata(client); struct gc5035 *gc5035 = to_gc5035(sd); int ret; if (gc5035->streaming) { ret = __gc5035_start_stream(gc5035); if (ret) goto error; } return 0; error: __gc5035_stop_stream(gc5035); gc5035->streaming = false; return ret; } static int gc5035_runtime_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct v4l2_subdev *sd = i2c_get_clientdata(client); struct gc5035 *gc5035 = to_gc5035(sd); if (gc5035->streaming) __gc5035_stop_stream(gc5035); return 0; } static int gc5035_entity_init_cfg(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state) { struct v4l2_subdev_format fmt = { .which = V4L2_SUBDEV_FORMAT_TRY, .format = { .width = 2592, .height = 1944, } }; gc5035_set_fmt(subdev, sd_state, &fmt); return 0; } static const struct dev_pm_ops gc5035_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) SET_RUNTIME_PM_OPS(gc5035_runtime_suspend, gc5035_runtime_resume, NULL) }; static const struct v4l2_subdev_video_ops gc5035_video_ops = { .s_stream = gc5035_s_stream, }; static const struct v4l2_subdev_pad_ops gc5035_pad_ops = { .init_cfg = gc5035_entity_init_cfg, .enum_mbus_code = gc5035_enum_mbus_code, .enum_frame_size = gc5035_enum_frame_sizes, .get_fmt = gc5035_get_fmt, .set_fmt = gc5035_set_fmt, }; static const struct v4l2_subdev_ops gc5035_subdev_ops = { .video = &gc5035_video_ops, .pad = &gc5035_pad_ops, }; static const struct media_entity_operations gc5035_subdev_entity_ops = { .link_validate = v4l2_subdev_link_validate, }; static int gc5035_set_exposure(struct gc5035 *gc5035, u32 val) { u32 caltime = 0; int ret = 0; caltime = val / 2; caltime = caltime * 2; gc5035->Dgain_ratio = 256 * val / caltime; ret = gc5035_write_reg(gc5035, GC5035_PAGE_REG, 0); if (ret) return ret; ret = gc5035_write_reg(gc5035, GC5035_REG_EXPOSURE_H, (val >> 8) & GC5035_EXPOSURE_H_MASK); if (ret) return ret; return gc5035_write_reg(gc5035, GC5035_REG_EXPOSURE_L, val & 0xff); } static u32 GC5035_AGC_Param[17][2] = { { 256, 0 }, { 302, 1 }, { 358, 2 }, { 425, 3 }, { 502, 8 }, { 599, 9 }, { 717, 10 }, { 845, 11 }, { 998, 12 }, { 1203, 13 }, { 1434, 14 }, { 1710, 15 }, { 1997, 16 }, { 2355, 17 }, { 2816, 18 }, { 3318, 19 }, { 3994, 20 }, }; static int gc5035_set_analogue_gain(struct gc5035 *gc5035, u32 a_gain) { int ret = 0, i = 0; u32 temp_gain = 0; if (a_gain < 0x100) a_gain = 0x100; else if (a_gain > GC5035_ANALOG_GAIN_MAX) a_gain = GC5035_ANALOG_GAIN_MAX; for (i = 16; i >= 0; i--) { if (a_gain >= GC5035_AGC_Param[i][0]) break; } ret = gc5035_write_reg(gc5035, GC5035_PAGE_REG, 0); if (ret) return ret; ret |= gc5035_write_reg(gc5035, GC5035_REG_ANALOG_GAIN, GC5035_AGC_Param[i][1]); temp_gain = a_gain; temp_gain = temp_gain * gc5035->Dgain_ratio / GC5035_AGC_Param[i][0]; ret |= gc5035_write_reg(gc5035, GC5035_REG_DIGI_GAIN_H, (temp_gain >> 8) & 0x0f); ret |= gc5035_write_reg(gc5035, GC5035_REG_DIGI_GAIN_L, temp_gain & 0xfc); return ret; } static int gc5035_set_vblank(struct gc5035 *gc5035, u32 val) { int frame_length = 0; int ret; frame_length = val + gc5035->cur_mode->height; ret = gc5035_write_reg(gc5035, GC5035_PAGE_REG, 0); if (ret) return ret; ret = gc5035_write_reg(gc5035, GC5035_REG_VTS_H, (frame_length >> 8) & GC5035_VTS_H_MASK); if (ret) return ret; return gc5035_write_reg(gc5035, GC5035_REG_VTS_L, frame_length & 0xff); } static int gc5035_enable_test_pattern(struct gc5035 *gc5035, u32 pattern) { int ret; if (pattern) pattern = GC5035_TEST_PATTERN_ENABLE; else pattern = GC5035_TEST_PATTERN_DISABLE; ret = gc5035_write_reg(gc5035, GC5035_PAGE_REG, 1); if (ret) return ret; ret = gc5035_write_reg(gc5035, GC5035_REG_TEST_PATTERN, pattern); if (ret) return ret; return gc5035_write_reg(gc5035, GC5035_PAGE_REG, 0); } static int gc5035_set_ctrl(struct v4l2_ctrl *ctrl) { struct gc5035 *gc5035 = container_of(ctrl->handler, struct gc5035, ctrl_handler); struct i2c_client *client = gc5035->client; s64 max; int ret = 0; /* Propagate change of current control to all related controls */ switch (ctrl->id) { case V4L2_CID_VBLANK: /* Update max exposure while meeting expected vblanking */ max = gc5035->cur_mode->height + ctrl->val - GC5035_EXPOSURE_MARGIN; __v4l2_ctrl_modify_range(gc5035->exposure, gc5035->exposure->minimum, max, gc5035->exposure->step, gc5035->exposure->default_value); break; } if (!pm_runtime_get_if_in_use(&client->dev)) return 0; switch (ctrl->id) { case V4L2_CID_EXPOSURE: ret = gc5035_set_exposure(gc5035, ctrl->val); break; case V4L2_CID_ANALOGUE_GAIN: ret = gc5035_set_analogue_gain(gc5035, ctrl->val); break; case V4L2_CID_DIGITAL_GAIN: case V4L2_CID_HFLIP: case V4L2_CID_VFLIP: break; case V4L2_CID_VBLANK: ret = gc5035_set_vblank(gc5035, ctrl->val); break; case V4L2_CID_TEST_PATTERN: ret = gc5035_enable_test_pattern(gc5035, ctrl->val); break; default: ret = -EINVAL; break; }; pm_runtime_put(&client->dev); return ret; } static const struct v4l2_ctrl_ops gc5035_ctrl_ops = { .s_ctrl = gc5035_set_ctrl, }; static int gc5035_initialize_controls(struct gc5035 *gc5035) { const struct gc5035_mode *mode; struct v4l2_ctrl_handler *handler; struct v4l2_ctrl *ctrl; u64 exposure_max; u32 h_blank, vblank_def; int ret; handler = &gc5035->ctrl_handler; mode = gc5035->cur_mode; ret = v4l2_ctrl_handler_init(handler, 8); if (ret) return ret; handler->lock = &gc5035->mutex; ctrl = v4l2_ctrl_new_int_menu(handler, NULL, V4L2_CID_LINK_FREQ, 0, 0, gc5035_link_freqs); if (ctrl) ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY; v4l2_ctrl_new_std(handler, NULL, V4L2_CID_PIXEL_RATE, 0, GC5035_PIXEL_RATE, 1, GC5035_PIXEL_RATE); h_blank = mode->hts_def - mode->width; gc5035->hblank = v4l2_ctrl_new_std(handler, NULL, V4L2_CID_HBLANK, h_blank, h_blank, 1, h_blank); if (gc5035->hblank) gc5035->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY; vblank_def = round_up(mode->vts_def, 4) - mode->height; gc5035->vblank = v4l2_ctrl_new_std(handler, &gc5035_ctrl_ops, V4L2_CID_VBLANK, vblank_def, GC5035_VTS_MAX - mode->height, 4, vblank_def); exposure_max = mode->vts_def - GC5035_EXPOSURE_MARGIN; gc5035->exposure = v4l2_ctrl_new_std(handler, &gc5035_ctrl_ops, V4L2_CID_EXPOSURE, GC5035_EXPOSURE_MIN, exposure_max, GC5035_EXPOSURE_STEP, mode->exp_def); v4l2_ctrl_new_std(handler, &gc5035_ctrl_ops, V4L2_CID_ANALOGUE_GAIN, GC5035_ANALOG_GAIN_MIN, GC5035_ANALOG_GAIN_MAX, GC5035_ANALOG_GAIN_STEP, GC5035_ANALOG_GAIN_DEFAULT); v4l2_ctrl_new_std(handler, &gc5035_ctrl_ops, V4L2_CID_DIGITAL_GAIN, GC5035_DIGI_GAIN_MIN, GC5035_DIGI_GAIN_MAX, GC5035_DIGI_GAIN_STEP, GC5035_DIGI_GAIN_DEFAULT); v4l2_ctrl_new_std_menu_items(handler, &gc5035_ctrl_ops, V4L2_CID_TEST_PATTERN, ARRAY_SIZE(gc5035_test_pattern_menu) - 1, 0, 0, gc5035_test_pattern_menu); if (handler->error) { ret = handler->error; dev_err(&gc5035->client->dev, "Failed to init controls(%d)\n", ret); goto err_free_handler; } gc5035->subdev.ctrl_handler = handler; return 0; err_free_handler: v4l2_ctrl_handler_free(handler); return ret; } static int gc5035_check_sensor_id(struct gc5035 *gc5035, struct i2c_client *client) { struct device *dev = &gc5035->client->dev; u16 id; u8 pid = 0; u8 ver = 0; int ret; ret = gc5035_read_reg(gc5035, GC5035_REG_CHIP_ID_H, &pid); if (ret) return ret; ret = gc5035_read_reg(gc5035, GC5035_REG_CHIP_ID_L, &ver); if (ret) return ret; id = GC5035_ID(pid, ver); if (id != GC5035_CHIP_ID) { dev_err(dev, "Unexpected sensor id(%04x)\n", id); return -EINVAL; } dev_dbg(dev, "Detected GC%04x sensor\n", id); return 0; } static int __maybe_unused gc5035_get_hwcfg(struct gc5035 *gc5035) { struct device *dev = &gc5035->client->dev; struct v4l2_fwnode_endpoint bus_cfg = { .bus_type = V4L2_MBUS_CSI2_DPHY }; struct fwnode_handle *ep; struct fwnode_handle *fwnode = dev_fwnode(dev); unsigned long link_freq_mask = 0; unsigned int i, j; int ret; if (!fwnode) return -ENODEV; ep = fwnode_graph_get_next_endpoint(fwnode, NULL); if (!ep) return -ENODEV; ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg); if (ret) goto out; dev_dbg(dev, "num of link freqs: %d", bus_cfg.nr_of_link_frequencies); if (!bus_cfg.nr_of_link_frequencies) { dev_warn(dev, "no link frequencies defined"); goto out; } for (i = 0; i < ARRAY_SIZE(gc5035_link_freqs); ++i) { for (j = 0; j < bus_cfg.nr_of_link_frequencies; j++) { if (bus_cfg.link_frequencies[j] == gc5035_link_freqs[i]) { link_freq_mask |= BIT(i); dev_dbg(dev, "Link frequency %lld supported\n", gc5035_link_freqs[i]); break; } } } if (!link_freq_mask) { dev_err(dev, "No supported link frequencies found\n"); ret = -EINVAL; goto out; } out: v4l2_fwnode_endpoint_free(&bus_cfg); fwnode_handle_put(ep); return ret; } static int gc5035_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct gc5035 *gc5035; struct v4l2_subdev *sd; int ret, i; u32 freq = 192000000UL; gc5035 = devm_kzalloc(dev, sizeof(*gc5035), GFP_KERNEL); if (!gc5035) return -ENOMEM; gc5035->client = client; gc5035_init_power_ctrl(gc5035); gc5035_set_power(gc5035, 1); gc5035->cur_mode = &gc5035_modes[0]; ret = clk_set_rate(gc5035->mclk, freq); if (ret < 0) return dev_err_probe(dev, ret, "Failed to set mclk rate\n"); gc5035->mclk_rate = clk_get_rate(gc5035->mclk); if (gc5035->mclk_rate != freq) dev_warn(dev, "mclk rate set to %lu instead of requested %u\n", gc5035->mclk_rate, freq); gc5035->iovdd_supply = devm_regulator_get(dev, "iovdd"); if (IS_ERR(gc5035->iovdd_supply)) return dev_err_probe(dev, PTR_ERR(gc5035->iovdd_supply), "Failed to get iovdd regulator\n"); for (i = 0; i < ARRAY_SIZE(gc5035_supplies); i++) gc5035->supplies[i].supply = gc5035_supplies[i]; ret = devm_regulator_bulk_get(&gc5035->client->dev, ARRAY_SIZE(gc5035_supplies), gc5035->supplies); if (ret) return dev_err_probe(dev, ret, "Failed to get regulators\n"); mutex_init(&gc5035->mutex); sd = &gc5035->subdev; v4l2_i2c_subdev_init(sd, client, &gc5035_subdev_ops); ret = gc5035_initialize_controls(gc5035); if (ret) { dev_err_probe(dev, ret, "Failed to initialize controls\n"); goto err_destroy_mutex; } ret = gc5035_runtime_resume(dev); if (ret) { dev_err_probe(dev, ret, "Failed to power on\n"); goto err_free_handler; } ret = gc5035_check_sensor_id(gc5035, client); if (ret) { dev_err_probe(dev, ret, "Sensor ID check failed\n"); goto err_power_off; } sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; sd->entity.ops = &gc5035_subdev_entity_ops; sd->entity.function = MEDIA_ENT_F_CAM_SENSOR; gc5035->pad.flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_pads_init(&sd->entity, 1, &gc5035->pad); if (ret < 0) { dev_err_probe(dev, ret, "Failed to initialize pads\n"); goto err_power_off; } ret = v4l2_async_register_subdev_sensor(sd); if (ret) { dev_err_probe(dev, ret, "v4l2 async register subdev failed\n"); goto err_clean_entity; } pm_runtime_set_active(dev); pm_runtime_enable(dev); pm_runtime_idle(dev); gc5035_set_power(gc5035, 0); return 0; err_clean_entity: media_entity_cleanup(&sd->entity); err_power_off: gc5035_runtime_suspend(dev); err_free_handler: v4l2_ctrl_handler_free(&gc5035->ctrl_handler); gc5035_set_power(gc5035, 0); err_destroy_mutex: mutex_destroy(&gc5035->mutex); return ret; } #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) static int gc5035_remove(struct i2c_client *client) #else static void gc5035_remove(struct i2c_client *client) #endif { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct gc5035 *gc5035 = to_gc5035(sd); v4l2_async_unregister_subdev(sd); media_entity_cleanup(&sd->entity); v4l2_ctrl_handler_free(&gc5035->ctrl_handler); mutex_destroy(&gc5035->mutex); pm_runtime_disable(&client->dev); if (!pm_runtime_status_suspended(&client->dev)) gc5035_runtime_suspend(&client->dev); pm_runtime_set_suspended(&client->dev); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) return 0; #endif } #ifdef CONFIG_ACPI static const struct acpi_device_id gc5035_acpi_ids[] = { {"GCTI5035"}, {} }; MODULE_DEVICE_TABLE(acpi, gc5035_acpi_ids); #endif static const struct of_device_id gc5035_of_match[] = { { .compatible = "galaxycore,gc5035" }, {}, }; MODULE_DEVICE_TABLE(of, gc5035_of_match); static struct i2c_driver gc5035_i2c_driver = { .driver = { .name = "gc5035", .pm = &gc5035_pm_ops, .acpi_match_table = ACPI_PTR(gc5035_acpi_ids), .of_match_table = gc5035_of_match, }, #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) .probe_new = gc5035_probe, #else .probe = gc5035_probe, #endif .remove = gc5035_remove, }; module_i2c_driver(gc5035_i2c_driver); MODULE_AUTHOR("Liang Wang "); MODULE_AUTHOR("Hao He "); MODULE_AUTHOR("Xingyu Wu "); MODULE_AUTHOR("Tomasz Figa "); MODULE_DESCRIPTION("GalaxyCore gc5035 sensor driver"); MODULE_LICENSE("GPL v2"); ipu6-drivers-0~git202411190607.0ad49882/drivers/media/i2c/hi556.c000066400000000000000000001046121471702545500231330ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2019 Intel Corporation. #include #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 12, 0) #include #else #include #endif #include #include #include #include #include #include #include #include #include #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) #include static const struct acpi_device_id cvfd_ids[] = { { "INTC1059", 0 }, { "INTC1095", 0 }, { "INTC100A", 0 }, { "INTC10CF", 0 }, {} }; #endif #define HI556_REG_VALUE_08BIT 1 #define HI556_REG_VALUE_16BIT 2 #define HI556_REG_VALUE_24BIT 3 #define HI556_LINK_FREQ_437MHZ 437000000ULL #define HI556_MCLK 19200000 #define HI556_DATA_LANES 2 #define HI556_RGB_DEPTH 10 #define HI556_REG_CHIP_ID 0x0f16 #define HI556_CHIP_ID 0x0556 #define HI556_REG_MODE_SELECT 0x0a00 #define HI556_MODE_STANDBY 0x0000 #define HI556_MODE_STREAMING 0x0100 /* vertical-timings from sensor */ #define HI556_REG_FLL 0x0006 #define HI556_FLL_30FPS 0x0814 #define HI556_FLL_30FPS_MIN 0x0814 #define HI556_FLL_MAX 0x7fff /* horizontal-timings from sensor */ #define HI556_REG_LLP 0x0008 /* Exposure controls from sensor */ #define HI556_REG_EXPOSURE 0x0074 #define HI556_EXPOSURE_MIN 6 #define HI556_EXPOSURE_MAX_MARGIN 2 #define HI556_EXPOSURE_STEP 1 /* Analog gain controls from sensor */ #define HI556_REG_ANALOG_GAIN 0x0077 #define HI556_ANAL_GAIN_MIN 0 #define HI556_ANAL_GAIN_MAX 240 #define HI556_ANAL_GAIN_STEP 1 /* Digital gain controls from sensor */ #define HI556_REG_MWB_GR_GAIN 0x0078 #define HI556_REG_MWB_GB_GAIN 0x007a #define HI556_REG_MWB_R_GAIN 0x007c #define HI556_REG_MWB_B_GAIN 0x007e #define HI556_DGTL_GAIN_MIN 0 #define HI556_DGTL_GAIN_MAX 2048 #define HI556_DGTL_GAIN_STEP 1 #define HI556_DGTL_GAIN_DEFAULT 256 /* Test Pattern Control */ #define HI556_REG_ISP 0X0a05 #define HI556_REG_ISP_TPG_EN 0x01 #define HI556_REG_TEST_PATTERN 0x0201 enum { HI556_LINK_FREQ_437MHZ_INDEX, }; struct hi556_reg { u16 address; u16 val; }; struct hi556_reg_list { u32 num_of_regs; const struct hi556_reg *regs; }; struct hi556_link_freq_config { const struct hi556_reg_list reg_list; }; struct hi556_mode { /* Frame width in pixels */ u32 width; /* Frame height in pixels */ u32 height; /* Horizontal timining size */ u32 llp; /* Default vertical timining size */ u32 fll_def; /* Min vertical timining size */ u32 fll_min; /* Link frequency needed for this resolution */ u32 link_freq_index; /* Sensor register settings for this resolution */ const struct hi556_reg_list reg_list; }; #define to_hi556(_sd) container_of(_sd, struct hi556, sd) //SENSOR_INITIALIZATION static const struct hi556_reg mipi_data_rate_874mbps[] = { {0x0e00, 0x0102}, {0x0e02, 0x0102}, {0x0e0c, 0x0100}, {0x2000, 0x7400}, {0x2002, 0x001c}, {0x2004, 0x0242}, {0x2006, 0x0942}, {0x2008, 0x7007}, {0x200a, 0x0fd9}, {0x200c, 0x0259}, {0x200e, 0x7008}, {0x2010, 0x160e}, {0x2012, 0x0047}, {0x2014, 0x2118}, {0x2016, 0x0041}, {0x2018, 0x00d8}, {0x201a, 0x0145}, {0x201c, 0x0006}, {0x201e, 0x0181}, {0x2020, 0x13cc}, {0x2022, 0x2057}, {0x2024, 0x7001}, {0x2026, 0x0fca}, {0x2028, 0x00cb}, {0x202a, 0x009f}, {0x202c, 0x7002}, {0x202e, 0x13cc}, {0x2030, 0x019b}, {0x2032, 0x014d}, {0x2034, 0x2987}, {0x2036, 0x2766}, {0x2038, 0x0020}, {0x203a, 0x2060}, {0x203c, 0x0e5d}, {0x203e, 0x181d}, {0x2040, 0x2066}, {0x2042, 0x20c4}, {0x2044, 0x5000}, {0x2046, 0x0005}, {0x2048, 0x0000}, {0x204a, 0x01db}, {0x204c, 0x025a}, {0x204e, 0x00c0}, {0x2050, 0x0005}, {0x2052, 0x0006}, {0x2054, 0x0ad9}, {0x2056, 0x0259}, {0x2058, 0x0618}, {0x205a, 0x0258}, {0x205c, 0x2266}, {0x205e, 0x20c8}, {0x2060, 0x2060}, {0x2062, 0x707b}, {0x2064, 0x0fdd}, {0x2066, 0x81b8}, {0x2068, 0x5040}, {0x206a, 0x0020}, {0x206c, 0x5060}, {0x206e, 0x3143}, {0x2070, 0x5081}, {0x2072, 0x025c}, {0x2074, 0x7800}, {0x2076, 0x7400}, {0x2078, 0x001c}, {0x207a, 0x0242}, {0x207c, 0x0942}, {0x207e, 0x0bd9}, {0x2080, 0x0259}, {0x2082, 0x7008}, {0x2084, 0x160e}, {0x2086, 0x0047}, {0x2088, 0x2118}, {0x208a, 0x0041}, {0x208c, 0x00d8}, {0x208e, 0x0145}, {0x2090, 0x0006}, {0x2092, 0x0181}, {0x2094, 0x13cc}, {0x2096, 0x2057}, {0x2098, 0x7001}, {0x209a, 0x0fca}, {0x209c, 0x00cb}, {0x209e, 0x009f}, {0x20a0, 0x7002}, {0x20a2, 0x13cc}, {0x20a4, 0x019b}, {0x20a6, 0x014d}, {0x20a8, 0x2987}, {0x20aa, 0x2766}, {0x20ac, 0x0020}, {0x20ae, 0x2060}, {0x20b0, 0x0e5d}, {0x20b2, 0x181d}, {0x20b4, 0x2066}, {0x20b6, 0x20c4}, {0x20b8, 0x50a0}, {0x20ba, 0x0005}, {0x20bc, 0x0000}, {0x20be, 0x01db}, {0x20c0, 0x025a}, {0x20c2, 0x00c0}, {0x20c4, 0x0005}, {0x20c6, 0x0006}, {0x20c8, 0x0ad9}, {0x20ca, 0x0259}, {0x20cc, 0x0618}, {0x20ce, 0x0258}, {0x20d0, 0x2266}, {0x20d2, 0x20c8}, {0x20d4, 0x2060}, {0x20d6, 0x707b}, {0x20d8, 0x0fdd}, {0x20da, 0x86b8}, {0x20dc, 0x50e0}, {0x20de, 0x0020}, {0x20e0, 0x5100}, {0x20e2, 0x3143}, {0x20e4, 0x5121}, {0x20e6, 0x7800}, {0x20e8, 0x3140}, {0x20ea, 0x01c4}, {0x20ec, 0x01c1}, {0x20ee, 0x01c0}, {0x20f0, 0x01c4}, {0x20f2, 0x2700}, {0x20f4, 0x3d40}, {0x20f6, 0x7800}, {0x20f8, 0xffff}, {0x27fe, 0xe000}, {0x3000, 0x60f8}, {0x3002, 0x187f}, {0x3004, 0x7060}, {0x3006, 0x0114}, {0x3008, 0x60b0}, {0x300a, 0x1473}, {0x300c, 0x0013}, {0x300e, 0x140f}, {0x3010, 0x0040}, {0x3012, 0x100f}, {0x3014, 0x60f8}, {0x3016, 0x187f}, {0x3018, 0x7060}, {0x301a, 0x0114}, {0x301c, 0x60b0}, {0x301e, 0x1473}, {0x3020, 0x0013}, {0x3022, 0x140f}, {0x3024, 0x0040}, {0x3026, 0x000f}, {0x0b00, 0x0000}, {0x0b02, 0x0045}, {0x0b04, 0xb405}, {0x0b06, 0xc403}, {0x0b08, 0x0081}, {0x0b0a, 0x8252}, {0x0b0c, 0xf814}, {0x0b0e, 0xc618}, {0x0b10, 0xa828}, {0x0b12, 0x004c}, {0x0b14, 0x4068}, {0x0b16, 0x0000}, {0x0f30, 0x5b15}, {0x0f32, 0x7067}, {0x0954, 0x0009}, {0x0956, 0x0000}, {0x0958, 0xbb80}, {0x095a, 0x5140}, {0x0c00, 0x1110}, {0x0c02, 0x0011}, {0x0c04, 0x0000}, {0x0c06, 0x0200}, {0x0c10, 0x0040}, {0x0c12, 0x0040}, {0x0c14, 0x0040}, {0x0c16, 0x0040}, {0x0a10, 0x4000}, {0x3068, 0xf800}, {0x306a, 0xf876}, {0x006c, 0x0000}, {0x005e, 0x0200}, {0x000e, 0x0100}, {0x0e0a, 0x0001}, {0x004a, 0x0100}, {0x004c, 0x0000}, {0x004e, 0x0100}, {0x000c, 0x0022}, {0x0008, 0x0b00}, {0x005a, 0x0202}, {0x0012, 0x000e}, {0x0018, 0x0a33}, {0x0022, 0x0008}, {0x0028, 0x0017}, {0x0024, 0x0028}, {0x002a, 0x002d}, {0x0026, 0x0030}, {0x002c, 0x07c9}, {0x002e, 0x1111}, {0x0030, 0x1111}, {0x0032, 0x1111}, {0x0006, 0x07bc}, {0x0a22, 0x0000}, {0x0a12, 0x0a20}, {0x0a14, 0x0798}, {0x003e, 0x0000}, {0x0074, 0x080e}, {0x0070, 0x0407}, {0x0002, 0x0000}, {0x0a02, 0x0100}, {0x0a24, 0x0100}, {0x0046, 0x0000}, {0x0076, 0x0000}, {0x0060, 0x0000}, {0x0062, 0x0530}, {0x0064, 0x0500}, {0x0066, 0x0530}, {0x0068, 0x0500}, {0x0122, 0x0300}, {0x015a, 0xff08}, {0x0804, 0x0300}, {0x0806, 0x0100}, {0x005c, 0x0102}, {0x0a1a, 0x0800}, }; static const struct hi556_reg mode_2592x1944_regs[] = { {0x0a00, 0x0000}, {0x0b0a, 0x8252}, {0x0f30, 0x5b15}, {0x0f32, 0x7067}, {0x004a, 0x0100}, {0x004c, 0x0000}, {0x004e, 0x0100}, {0x000c, 0x0022}, {0x0008, 0x0b00}, {0x005a, 0x0202}, {0x0012, 0x000e}, {0x0018, 0x0a33}, {0x0022, 0x0008}, {0x0028, 0x0017}, {0x0024, 0x0028}, {0x002a, 0x002d}, {0x0026, 0x0030}, {0x002c, 0x07c9}, {0x002e, 0x1111}, {0x0030, 0x1111}, {0x0032, 0x1111}, {0x0006, 0x0814}, {0x0a22, 0x0000}, {0x0a12, 0x0a20}, {0x0a14, 0x0798}, {0x003e, 0x0000}, {0x0074, 0x0812}, {0x0070, 0x0409}, {0x0804, 0x0300}, {0x0806, 0x0100}, {0x0a04, 0x014a}, {0x090c, 0x0fdc}, {0x090e, 0x002d}, {0x0902, 0x4319}, {0x0914, 0xc10a}, {0x0916, 0x071f}, {0x0918, 0x0408}, {0x091a, 0x0c0d}, {0x091c, 0x0f09}, {0x091e, 0x0a00}, {0x0958, 0xbb80}, }; static const struct hi556_reg mode_1296x972_regs[] = { {0x0a00, 0x0000}, {0x0b0a, 0x8259}, {0x0f30, 0x5b15}, {0x0f32, 0x7167}, {0x004a, 0x0100}, {0x004c, 0x0000}, {0x004e, 0x0100}, {0x000c, 0x0122}, {0x0008, 0x0b00}, {0x005a, 0x0404}, {0x0012, 0x000c}, {0x0018, 0x0a33}, {0x0022, 0x0008}, {0x0028, 0x0017}, {0x0024, 0x0022}, {0x002a, 0x002b}, {0x0026, 0x0030}, {0x002c, 0x07c9}, {0x002e, 0x3311}, {0x0030, 0x3311}, {0x0032, 0x3311}, {0x0006, 0x0814}, {0x0a22, 0x0000}, {0x0a12, 0x0510}, {0x0a14, 0x03cc}, {0x003e, 0x0000}, {0x0074, 0x0812}, {0x0070, 0x0409}, {0x0804, 0x0308}, {0x0806, 0x0100}, {0x0a04, 0x016a}, {0x090e, 0x0010}, {0x090c, 0x09c0}, {0x0902, 0x4319}, {0x0914, 0xc106}, {0x0916, 0x040e}, {0x0918, 0x0304}, {0x091a, 0x0708}, {0x091c, 0x0e06}, {0x091e, 0x0300}, {0x0958, 0xbb80}, }; static const char * const hi556_test_pattern_menu[] = { "Disabled", "Solid Colour", "100% Colour Bars", "Fade To Grey Colour Bars", "PN9", "Gradient Horizontal", "Gradient Vertical", "Check Board", "Slant Pattern", }; static const s64 link_freq_menu_items[] = { HI556_LINK_FREQ_437MHZ, }; static const struct hi556_link_freq_config link_freq_configs[] = { [HI556_LINK_FREQ_437MHZ_INDEX] = { .reg_list = { .num_of_regs = ARRAY_SIZE(mipi_data_rate_874mbps), .regs = mipi_data_rate_874mbps, } } }; static const struct hi556_mode supported_modes[] = { { .width = 2592, .height = 1944, .fll_def = HI556_FLL_30FPS, .fll_min = HI556_FLL_30FPS_MIN, .llp = 0x0b00, .reg_list = { .num_of_regs = ARRAY_SIZE(mode_2592x1944_regs), .regs = mode_2592x1944_regs, }, .link_freq_index = HI556_LINK_FREQ_437MHZ_INDEX, }, { .width = 1296, .height = 972, .fll_def = HI556_FLL_30FPS, .fll_min = HI556_FLL_30FPS_MIN, .llp = 0x0b00, .reg_list = { .num_of_regs = ARRAY_SIZE(mode_1296x972_regs), .regs = mode_1296x972_regs, }, .link_freq_index = HI556_LINK_FREQ_437MHZ_INDEX, } }; struct hi556 { struct v4l2_subdev sd; struct media_pad pad; struct v4l2_ctrl_handler ctrl_handler; /* V4L2 Controls */ struct v4l2_ctrl *link_freq; struct v4l2_ctrl *pixel_rate; struct v4l2_ctrl *vblank; struct v4l2_ctrl *hblank; struct v4l2_ctrl *exposure; /* GPIO for reset */ struct gpio_desc *reset; /* GPIO for Lattice handshake */ struct gpio_desc *handshake; /* regulator */ struct regulator *avdd; /* Clock provider */ struct clk *img_clk; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) struct vsc_mipi_config conf; struct vsc_camera_status status; struct v4l2_ctrl *privacy_status; #endif /* Current mode */ const struct hi556_mode *cur_mode; /* To serialize asynchronus callbacks */ struct mutex mutex; /* Streaming on/off */ bool streaming; /* True if the device has been identified */ bool identified; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) bool use_intel_vsc; #endif }; static u64 to_pixel_rate(u32 f_index) { u64 pixel_rate = link_freq_menu_items[f_index] * 2 * HI556_DATA_LANES; do_div(pixel_rate, HI556_RGB_DEPTH); return pixel_rate; } static int hi556_read_reg(struct hi556 *hi556, u16 reg, u16 len, u32 *val) { struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd); struct i2c_msg msgs[2]; u8 addr_buf[2]; u8 data_buf[4] = {0}; int ret; if (len > 4) return -EINVAL; put_unaligned_be16(reg, addr_buf); msgs[0].addr = client->addr; msgs[0].flags = 0; msgs[0].len = sizeof(addr_buf); msgs[0].buf = addr_buf; msgs[1].addr = client->addr; msgs[1].flags = I2C_M_RD; msgs[1].len = len; msgs[1].buf = &data_buf[4 - len]; ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); if (ret != ARRAY_SIZE(msgs)) return -EIO; *val = get_unaligned_be32(data_buf); return 0; } static int hi556_write_reg(struct hi556 *hi556, u16 reg, u16 len, u32 val) { struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd); u8 buf[6]; if (len > 4) return -EINVAL; put_unaligned_be16(reg, buf); put_unaligned_be32(val << 8 * (4 - len), buf + 2); if (i2c_master_send(client, buf, len + 2) != len + 2) return -EIO; return 0; } static int hi556_write_reg_list(struct hi556 *hi556, const struct hi556_reg_list *r_list) { struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd); unsigned int i; int ret; for (i = 0; i < r_list->num_of_regs; i++) { ret = hi556_write_reg(hi556, r_list->regs[i].address, HI556_REG_VALUE_16BIT, r_list->regs[i].val); if (ret) { dev_err_ratelimited(&client->dev, "failed to write reg 0x%4.4x. error = %d", r_list->regs[i].address, ret); return ret; } } return 0; } static int hi556_update_digital_gain(struct hi556 *hi556, u32 d_gain) { int ret; ret = hi556_write_reg(hi556, HI556_REG_MWB_GR_GAIN, HI556_REG_VALUE_16BIT, d_gain); if (ret) return ret; ret = hi556_write_reg(hi556, HI556_REG_MWB_GB_GAIN, HI556_REG_VALUE_16BIT, d_gain); if (ret) return ret; ret = hi556_write_reg(hi556, HI556_REG_MWB_R_GAIN, HI556_REG_VALUE_16BIT, d_gain); if (ret) return ret; return hi556_write_reg(hi556, HI556_REG_MWB_B_GAIN, HI556_REG_VALUE_16BIT, d_gain); } static int hi556_test_pattern(struct hi556 *hi556, u32 pattern) { int ret; u32 val; if (pattern) { ret = hi556_read_reg(hi556, HI556_REG_ISP, HI556_REG_VALUE_08BIT, &val); if (ret) return ret; ret = hi556_write_reg(hi556, HI556_REG_ISP, HI556_REG_VALUE_08BIT, val | HI556_REG_ISP_TPG_EN); if (ret) return ret; } return hi556_write_reg(hi556, HI556_REG_TEST_PATTERN, HI556_REG_VALUE_08BIT, pattern); } static int hi556_set_ctrl(struct v4l2_ctrl *ctrl) { struct hi556 *hi556 = container_of(ctrl->handler, struct hi556, ctrl_handler); struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd); s64 exposure_max; int ret = 0; /* Propagate change of current control to all related controls */ if (ctrl->id == V4L2_CID_VBLANK) { /* Update max exposure while meeting expected vblanking */ exposure_max = hi556->cur_mode->height + ctrl->val - HI556_EXPOSURE_MAX_MARGIN; __v4l2_ctrl_modify_range(hi556->exposure, hi556->exposure->minimum, exposure_max, hi556->exposure->step, exposure_max); } /* V4L2 controls values will be applied only when power is already up */ if (!pm_runtime_get_if_in_use(&client->dev)) return 0; switch (ctrl->id) { case V4L2_CID_ANALOGUE_GAIN: ret = hi556_write_reg(hi556, HI556_REG_ANALOG_GAIN, HI556_REG_VALUE_16BIT, ctrl->val); break; case V4L2_CID_DIGITAL_GAIN: ret = hi556_update_digital_gain(hi556, ctrl->val); break; case V4L2_CID_EXPOSURE: ret = hi556_write_reg(hi556, HI556_REG_EXPOSURE, HI556_REG_VALUE_16BIT, ctrl->val); break; case V4L2_CID_VBLANK: /* Update FLL that meets expected vertical blanking */ ret = hi556_write_reg(hi556, HI556_REG_FLL, HI556_REG_VALUE_16BIT, hi556->cur_mode->height + ctrl->val); break; case V4L2_CID_TEST_PATTERN: ret = hi556_test_pattern(hi556, ctrl->val); break; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) case V4L2_CID_PRIVACY: dev_dbg(&client->dev, "set privacy to %d", ctrl->val); break; #endif default: ret = -EINVAL; break; } pm_runtime_put(&client->dev); return ret; } static const struct v4l2_ctrl_ops hi556_ctrl_ops = { .s_ctrl = hi556_set_ctrl, }; static int hi556_init_controls(struct hi556 *hi556) { struct v4l2_ctrl_handler *ctrl_hdlr; s64 exposure_max, h_blank; int ret; ctrl_hdlr = &hi556->ctrl_handler; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) ret = v4l2_ctrl_handler_init(ctrl_hdlr, 9); #else ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8); #endif if (ret) return ret; ctrl_hdlr->lock = &hi556->mutex; hi556->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, &hi556_ctrl_ops, V4L2_CID_LINK_FREQ, ARRAY_SIZE(link_freq_menu_items) - 1, 0, link_freq_menu_items); if (hi556->link_freq) hi556->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY; hi556->pixel_rate = v4l2_ctrl_new_std (ctrl_hdlr, &hi556_ctrl_ops, V4L2_CID_PIXEL_RATE, 0, to_pixel_rate(HI556_LINK_FREQ_437MHZ_INDEX), 1, to_pixel_rate(HI556_LINK_FREQ_437MHZ_INDEX)); hi556->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &hi556_ctrl_ops, V4L2_CID_VBLANK, hi556->cur_mode->fll_min - hi556->cur_mode->height, HI556_FLL_MAX - hi556->cur_mode->height, 1, hi556->cur_mode->fll_def - hi556->cur_mode->height); h_blank = hi556->cur_mode->llp - hi556->cur_mode->width; hi556->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &hi556_ctrl_ops, V4L2_CID_HBLANK, h_blank, h_blank, 1, h_blank); if (hi556->hblank) hi556->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) hi556->privacy_status = v4l2_ctrl_new_std(ctrl_hdlr, &hi556_ctrl_ops, V4L2_CID_PRIVACY, 0, 1, 1, !(hi556->status.status)); #endif v4l2_ctrl_new_std(ctrl_hdlr, &hi556_ctrl_ops, V4L2_CID_ANALOGUE_GAIN, HI556_ANAL_GAIN_MIN, HI556_ANAL_GAIN_MAX, HI556_ANAL_GAIN_STEP, HI556_ANAL_GAIN_MIN); v4l2_ctrl_new_std(ctrl_hdlr, &hi556_ctrl_ops, V4L2_CID_DIGITAL_GAIN, HI556_DGTL_GAIN_MIN, HI556_DGTL_GAIN_MAX, HI556_DGTL_GAIN_STEP, HI556_DGTL_GAIN_DEFAULT); exposure_max = hi556->cur_mode->fll_def - HI556_EXPOSURE_MAX_MARGIN; hi556->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &hi556_ctrl_ops, V4L2_CID_EXPOSURE, HI556_EXPOSURE_MIN, exposure_max, HI556_EXPOSURE_STEP, exposure_max); v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &hi556_ctrl_ops, V4L2_CID_TEST_PATTERN, ARRAY_SIZE(hi556_test_pattern_menu) - 1, 0, 0, hi556_test_pattern_menu); if (ctrl_hdlr->error) return ctrl_hdlr->error; hi556->sd.ctrl_handler = ctrl_hdlr; return 0; } static void hi556_assign_pad_format(const struct hi556_mode *mode, struct v4l2_mbus_framefmt *fmt) { fmt->width = mode->width; fmt->height = mode->height; fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10; fmt->field = V4L2_FIELD_NONE; } static int hi556_identify_module(struct hi556 *hi556) { struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd); int ret; u32 val; if (hi556->identified) return 0; ret = hi556_read_reg(hi556, HI556_REG_CHIP_ID, HI556_REG_VALUE_16BIT, &val); if (ret) return ret; if (val != HI556_CHIP_ID) { dev_err(&client->dev, "chip id mismatch: %x!=%x", HI556_CHIP_ID, val); return -ENXIO; } hi556->identified = true; return 0; } #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) static void hi556_vsc_privacy_callback(void *handle, enum vsc_privacy_status status) { struct hi556 *hi556 = handle; v4l2_ctrl_s_ctrl(hi556->privacy_status, !status); } #endif static int hi556_start_streaming(struct hi556 *hi556) { struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd); const struct hi556_reg_list *reg_list; int link_freq_index, ret; ret = hi556_identify_module(hi556); if (ret) return ret; link_freq_index = hi556->cur_mode->link_freq_index; reg_list = &link_freq_configs[link_freq_index].reg_list; ret = hi556_write_reg_list(hi556, reg_list); if (ret) { dev_err(&client->dev, "failed to set plls"); return ret; } reg_list = &hi556->cur_mode->reg_list; ret = hi556_write_reg_list(hi556, reg_list); if (ret) { dev_err(&client->dev, "failed to set mode"); return ret; } ret = __v4l2_ctrl_handler_setup(hi556->sd.ctrl_handler); if (ret) return ret; ret = hi556_write_reg(hi556, HI556_REG_MODE_SELECT, HI556_REG_VALUE_16BIT, HI556_MODE_STREAMING); if (ret) { dev_err(&client->dev, "failed to set stream"); return ret; } return 0; } static void hi556_stop_streaming(struct hi556 *hi556) { struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd); if (hi556_write_reg(hi556, HI556_REG_MODE_SELECT, HI556_REG_VALUE_16BIT, HI556_MODE_STANDBY)) dev_err(&client->dev, "failed to set stream"); } static int hi556_set_stream(struct v4l2_subdev *sd, int enable) { struct hi556 *hi556 = to_hi556(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); int ret = 0; if (hi556->streaming == enable) return 0; mutex_lock(&hi556->mutex); if (enable) { ret = pm_runtime_resume_and_get(&client->dev); if (ret < 0) { mutex_unlock(&hi556->mutex); return ret; } ret = hi556_start_streaming(hi556); if (ret) { enable = 0; hi556_stop_streaming(hi556); pm_runtime_put(&client->dev); } } else { hi556_stop_streaming(hi556); pm_runtime_put(&client->dev); } hi556->streaming = enable; mutex_unlock(&hi556->mutex); return ret; } static int hi556_power_off(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct hi556 *hi556 = to_hi556(sd); int ret = 0; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) if (hi556->use_intel_vsc) { ret = vsc_release_camera_sensor(&hi556->status); if (ret && ret != -EAGAIN) dev_err(dev, "Release VSC failed"); return ret; } #endif gpiod_set_value_cansleep(hi556->reset, 1); gpiod_set_value_cansleep(hi556->handshake, 0); if (hi556->avdd) ret = regulator_disable(hi556->avdd); clk_disable_unprepare(hi556->img_clk); return ret; } static int hi556_power_on(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct hi556 *hi556 = to_hi556(sd); int ret; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) if (hi556->use_intel_vsc) { hi556->conf.lane_num = HI556_DATA_LANES; /* frequency unit 100k */ hi556->conf.freq = HI556_LINK_FREQ_437MHZ / 100000; ret = vsc_acquire_camera_sensor(&hi556->conf, hi556_vsc_privacy_callback, hi556, &hi556->status); if (ret == -EAGAIN) return -EPROBE_DEFER; if (ret) { dev_err(dev, "Acquire VSC failed"); return ret; } if (hi556->privacy_status) __v4l2_ctrl_s_ctrl(hi556->privacy_status, !(hi556->status.status)); return ret; } #endif ret = clk_prepare_enable(hi556->img_clk); if (ret < 0) { dev_err(dev, "failed to enable imaging clock: %d", ret); return ret; } if (hi556->avdd) { ret = regulator_enable(hi556->avdd); if (ret < 0) { dev_err(dev, "failed to enable avdd: %d", ret); clk_disable_unprepare(hi556->img_clk); return ret; } } gpiod_set_value_cansleep(hi556->handshake, 1); gpiod_set_value_cansleep(hi556->reset, 0); /* Lattice MIPI aggregator with some version FW needs longer delay after handshake triggered. We set 25ms as a safe value and wait for a stable version FW. */ msleep_interruptible(25); return ret; } static int __maybe_unused hi556_suspend(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct hi556 *hi556 = to_hi556(sd); mutex_lock(&hi556->mutex); if (hi556->streaming) hi556_stop_streaming(hi556); mutex_unlock(&hi556->mutex); return 0; } static int __maybe_unused hi556_resume(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct hi556 *hi556 = to_hi556(sd); int ret; mutex_lock(&hi556->mutex); if (hi556->streaming) { ret = hi556_start_streaming(hi556); if (ret) goto error; } mutex_unlock(&hi556->mutex); return 0; error: hi556_stop_streaming(hi556); hi556->streaming = 0; mutex_unlock(&hi556->mutex); return ret; } static int hi556_set_format(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *sd_state, #endif struct v4l2_subdev_format *fmt) { struct hi556 *hi556 = to_hi556(sd); const struct hi556_mode *mode; s32 vblank_def, h_blank; mode = v4l2_find_nearest_size(supported_modes, ARRAY_SIZE(supported_modes), width, height, fmt->format.width, fmt->format.height); mutex_lock(&hi556->mutex); hi556_assign_pad_format(mode, &fmt->format); if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format; #elif LINUX_VERSION_CODE < KERNEL_VERSION(6, 8, 0) *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format; #else *v4l2_subdev_state_get_format(sd_state, fmt->pad) = fmt->format; #endif } else { hi556->cur_mode = mode; __v4l2_ctrl_s_ctrl(hi556->link_freq, mode->link_freq_index); __v4l2_ctrl_s_ctrl_int64(hi556->pixel_rate, to_pixel_rate(mode->link_freq_index)); /* Update limits and set FPS to default */ vblank_def = mode->fll_def - mode->height; __v4l2_ctrl_modify_range(hi556->vblank, mode->fll_min - mode->height, HI556_FLL_MAX - mode->height, 1, vblank_def); __v4l2_ctrl_s_ctrl(hi556->vblank, vblank_def); h_blank = hi556->cur_mode->llp - hi556->cur_mode->width; __v4l2_ctrl_modify_range(hi556->hblank, h_blank, h_blank, 1, h_blank); } mutex_unlock(&hi556->mutex); return 0; } static int hi556_get_format(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *sd_state, #endif struct v4l2_subdev_format *fmt) { struct hi556 *hi556 = to_hi556(sd); mutex_lock(&hi556->mutex); if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) fmt->format = *v4l2_subdev_get_try_format(&hi556->sd, cfg, fmt->pad); #elif LINUX_VERSION_CODE < KERNEL_VERSION(6, 8, 0) fmt->format = *v4l2_subdev_get_try_format(&hi556->sd, sd_state, fmt->pad); #else fmt->format = *v4l2_subdev_state_get_format( sd_state, fmt->pad); #endif else hi556_assign_pad_format(hi556->cur_mode, &fmt->format); mutex_unlock(&hi556->mutex); return 0; } static int hi556_enum_mbus_code(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *sd_state, #endif struct v4l2_subdev_mbus_code_enum *code) { if (code->index > 0) return -EINVAL; code->code = MEDIA_BUS_FMT_SGRBG10_1X10; return 0; } static int hi556_enum_frame_size(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *sd_state, #endif struct v4l2_subdev_frame_size_enum *fse) { if (fse->index >= ARRAY_SIZE(supported_modes)) return -EINVAL; if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10) return -EINVAL; fse->min_width = supported_modes[fse->index].width; fse->max_width = fse->min_width; fse->min_height = supported_modes[fse->index].height; fse->max_height = fse->min_height; return 0; } static int hi556_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { struct hi556 *hi556 = to_hi556(sd); mutex_lock(&hi556->mutex); #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) hi556_assign_pad_format(&supported_modes[0], v4l2_subdev_get_try_format(sd, fh->pad, 0)); #elif LINUX_VERSION_CODE < KERNEL_VERSION(6, 8, 0) hi556_assign_pad_format(&supported_modes[0], v4l2_subdev_get_try_format(sd, fh->state, 0)); #else hi556_assign_pad_format(&supported_modes[0], v4l2_subdev_state_get_format(fh->state, 0)); #endif mutex_unlock(&hi556->mutex); return 0; } static const struct v4l2_subdev_video_ops hi556_video_ops = { .s_stream = hi556_set_stream, }; static const struct v4l2_subdev_pad_ops hi556_pad_ops = { .set_fmt = hi556_set_format, .get_fmt = hi556_get_format, .enum_mbus_code = hi556_enum_mbus_code, .enum_frame_size = hi556_enum_frame_size, }; static const struct v4l2_subdev_ops hi556_subdev_ops = { .video = &hi556_video_ops, .pad = &hi556_pad_ops, }; static const struct media_entity_operations hi556_subdev_entity_ops = { .link_validate = v4l2_subdev_link_validate, }; static const struct v4l2_subdev_internal_ops hi556_internal_ops = { .open = hi556_open, }; static int hi556_get_pm_resources(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct hi556 *hi556 = to_hi556(sd); int ret; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) acpi_handle handle = ACPI_HANDLE(dev); struct acpi_handle_list dep_devices; acpi_status status; int i = 0; hi556->use_intel_vsc = false; if (!acpi_has_method(handle, "_DEP")) return false; status = acpi_evaluate_reference(handle, "_DEP", NULL, &dep_devices); if (ACPI_FAILURE(status)) { acpi_handle_debug(handle, "Failed to evaluate _DEP.\n"); return false; } for (i = 0; i < dep_devices.count; i++) { struct acpi_device *dep_device = NULL; if (dep_devices.handles[i]) #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0) acpi_bus_get_device(dep_devices.handles[i], &dep_device); #else dep_device = acpi_fetch_acpi_dev(dep_devices.handles[i]); #endif if (dep_device && acpi_match_device_ids(dep_device, cvfd_ids) == 0) { hi556->use_intel_vsc = true; return 0; } } #endif hi556->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(hi556->reset)) return dev_err_probe(dev, PTR_ERR(hi556->reset), "failed to get reset gpio\n"); hi556->handshake = devm_gpiod_get_optional(dev, "handshake", GPIOD_OUT_LOW); if (IS_ERR(hi556->handshake)) return dev_err_probe(dev, PTR_ERR(hi556->handshake), "failed to get handshake gpio\n"); hi556->img_clk = devm_clk_get_optional(dev, NULL); if (IS_ERR(hi556->img_clk)) return dev_err_probe(dev, PTR_ERR(hi556->img_clk), "failed to get imaging clock\n"); hi556->avdd = devm_regulator_get_optional(dev, "avdd"); if (IS_ERR(hi556->avdd)) { ret = PTR_ERR(hi556->avdd); hi556->avdd = NULL; if (ret != -ENODEV) return dev_err_probe(dev, ret, "failed to get avdd regulator\n"); } return 0; } static int __maybe_unused hi556_check_hwcfg(struct device *dev) { struct fwnode_handle *ep; struct fwnode_handle *fwnode = dev_fwnode(dev); struct v4l2_fwnode_endpoint bus_cfg = { .bus_type = V4L2_MBUS_CSI2_DPHY }; u32 mclk; int ret = 0; unsigned int i, j; ep = fwnode_graph_get_next_endpoint(fwnode, NULL); if (!ep) return -EPROBE_DEFER; ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk); if (ret) { dev_err(dev, "can't get clock frequency"); return ret; } if (mclk != HI556_MCLK) { dev_err(dev, "external clock %d is not supported", mclk); return -EINVAL; } ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg); fwnode_handle_put(ep); if (ret) return ret; if (bus_cfg.bus.mipi_csi2.num_data_lanes != 2) { dev_err(dev, "number of CSI2 data lanes %d is not supported", bus_cfg.bus.mipi_csi2.num_data_lanes); ret = -EINVAL; goto check_hwcfg_error; } if (!bus_cfg.nr_of_link_frequencies) { dev_err(dev, "no link frequencies defined"); ret = -EINVAL; goto check_hwcfg_error; } for (i = 0; i < ARRAY_SIZE(link_freq_menu_items); i++) { for (j = 0; j < bus_cfg.nr_of_link_frequencies; j++) { if (link_freq_menu_items[i] == bus_cfg.link_frequencies[j]) break; } if (j == bus_cfg.nr_of_link_frequencies) { dev_err(dev, "no link frequency %lld supported", link_freq_menu_items[i]); ret = -EINVAL; goto check_hwcfg_error; } } check_hwcfg_error: v4l2_fwnode_endpoint_free(&bus_cfg); return ret; } #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) static int hi556_remove(struct i2c_client *client) #else static void hi556_remove(struct i2c_client *client) #endif { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct hi556 *hi556 = to_hi556(sd); v4l2_async_unregister_subdev(sd); media_entity_cleanup(&sd->entity); v4l2_ctrl_handler_free(sd->ctrl_handler); pm_runtime_disable(&client->dev); mutex_destroy(&hi556->mutex); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) return 0; #endif } static int hi556_probe(struct i2c_client *client) { struct hi556 *hi556; bool full_power; int ret; ret = hi556_check_hwcfg(&client->dev); if (ret) return dev_err_probe(&client->dev, ret, "failed to check HW configuration: %d", ret); hi556 = devm_kzalloc(&client->dev, sizeof(*hi556), GFP_KERNEL); if (!hi556) { ret = -ENOMEM; goto probe_error_ret; } v4l2_i2c_subdev_init(&hi556->sd, client, &hi556_subdev_ops); #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0) full_power = acpi_dev_state_d0(&client->dev); #else full_power = true; #endif if (full_power) { ret = hi556_get_pm_resources(&client->dev); if (ret) return ret; ret = hi556_power_on(&client->dev); if (ret) { dev_err_probe(&client->dev, ret, "failed to power on\n"); goto probe_error_ret; } ret = hi556_identify_module(hi556); if (ret) { dev_err(&client->dev, "failed to find sensor: %d", ret); goto probe_error_ret; } } mutex_init(&hi556->mutex); hi556->cur_mode = &supported_modes[0]; ret = hi556_init_controls(hi556); if (ret) { dev_err(&client->dev, "failed to init controls: %d", ret); goto probe_error_v4l2_ctrl_handler_free; } hi556->sd.internal_ops = &hi556_internal_ops; hi556->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; hi556->sd.entity.ops = &hi556_subdev_entity_ops; hi556->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; hi556->pad.flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_pads_init(&hi556->sd.entity, 1, &hi556->pad); if (ret) { dev_err(&client->dev, "failed to init entity pads: %d", ret); goto probe_error_v4l2_ctrl_handler_free; } ret = v4l2_async_register_subdev_sensor(&hi556->sd); if (ret < 0) { dev_err(&client->dev, "failed to register V4L2 subdev: %d", ret); goto probe_error_media_entity_cleanup; } /* Set the device's state to active if it's in D0 state. */ if (full_power) pm_runtime_set_active(&client->dev); pm_runtime_enable(&client->dev); pm_runtime_idle(&client->dev); return 0; probe_error_media_entity_cleanup: media_entity_cleanup(&hi556->sd.entity); probe_error_v4l2_ctrl_handler_free: v4l2_ctrl_handler_free(hi556->sd.ctrl_handler); mutex_destroy(&hi556->mutex); probe_error_ret: hi556_power_off(&client->dev); return ret; } static const struct dev_pm_ops hi556_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(hi556_suspend, hi556_resume) SET_RUNTIME_PM_OPS(hi556_power_off, hi556_power_on, NULL) }; #ifdef CONFIG_ACPI static const struct acpi_device_id hi556_acpi_ids[] = { {"INT3537"}, {} }; MODULE_DEVICE_TABLE(acpi, hi556_acpi_ids); #endif static struct i2c_driver hi556_i2c_driver = { .driver = { .name = "hi556", .pm = &hi556_pm_ops, .acpi_match_table = ACPI_PTR(hi556_acpi_ids), }, #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) .probe_new = hi556_probe, #else .probe = hi556_probe, #endif .remove = hi556_remove, #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0) .flags = I2C_DRV_ACPI_WAIVE_D0_PROBE, #endif }; module_i2c_driver(hi556_i2c_driver); MODULE_AUTHOR("Shawn Tu "); MODULE_DESCRIPTION("Hynix HI556 sensor driver"); MODULE_LICENSE("GPL v2"); ipu6-drivers-0~git202411190607.0ad49882/drivers/media/i2c/hm11b1.c000066400000000000000000000746061471702545500232750ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2020-2022 Intel Corporation. #include #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 12, 0) #include #else #include #endif #include #include #include #include #include #include #include #include #if IS_ENABLED(CONFIG_INTEL_SKL_INT3472) #include #include #elif IS_ENABLED(CONFIG_POWER_CTRL_LOGIC) #include "power_ctrl_logic.h" #endif #define HM11B1_LINK_FREQ_384MHZ 384000000ULL #define HM11B1_SCLK 72000000LL #define HM11B1_MCLK 19200000 #define HM11B1_DATA_LANES 1 #define HM11B1_RGB_DEPTH 10 #define HM11B1_REG_CHIP_ID 0x0000 #define HM11B1_CHIP_ID 0x11B1 #define HM11B1_REG_MODE_SELECT 0x0100 #define HM11B1_MODE_STANDBY 0x00 #define HM11B1_MODE_STREAMING 0x01 /* vertical-timings from sensor */ #define HM11B1_REG_VTS 0x3402 #define HM11B1_VTS_DEF 0x037d #define HM11B1_VTS_MIN 0x0346 #define HM11B1_VTS_MAX 0xffff /* horizontal-timings from sensor */ #define HM11B1_REG_HTS 0x3404 /* Exposure controls from sensor */ #define HM11B1_REG_EXPOSURE 0x0202 #define HM11B1_EXPOSURE_MIN 2 #define HM11B1_EXPOSURE_MAX_MARGIN 2 #define HM11B1_EXPOSURE_STEP 1 /* Analog gain controls from sensor */ #define HM11B1_REG_ANALOG_GAIN 0x0205 #define HM11B1_REG_ANALOG_GAIN_IR 0x0206 #define HM11B1_ANAL_GAIN_MIN 0 #define HM11B1_ANAL_GAIN_MAX 0xFF #define HM11B1_ANAL_GAIN_STEP 1 /* Digital gain controls from sensor */ #define HM11B1_REG_DGTL_GAIN 0x0207 #define HM11B1_REG_DGTL_GAIN_IR 0x0209 #define HM11B1_DGTL_GAIN_MIN 0x0 #define HM11B1_DGTL_GAIN_MAX 0x3FF #define HM11B1_DGTL_GAIN_STEP 1 #define HM11B1_DGTL_GAIN_DEFAULT 0x100 /* register update control */ #define HM11B1_REG_COMMAND_UPDATE 0x104 /* Test Pattern Control */ #define HM11B1_REG_TEST_PATTERN 0x0601 #define HM11B1_TEST_PATTERN_ENABLE 1 #define HM11B1_TEST_PATTERN_BAR_SHIFT 1 enum { HM11B1_LINK_FREQ_384MHZ_INDEX, }; struct hm11b1_reg { u16 address; u8 val; }; struct hm11b1_reg_list { u32 num_of_regs; const struct hm11b1_reg *regs; }; struct hm11b1_link_freq_config { const struct hm11b1_reg_list reg_list; }; struct hm11b1_mode { /* Frame width in pixels */ u32 width; /* Frame height in pixels */ u32 height; /* Horizontal timining size */ u32 hts; /* Default vertical timining size */ u32 vts_def; /* Min vertical timining size */ u32 vts_min; /* Link frequency needed for this resolution */ u32 link_freq_index; /* Sensor register settings for this resolution */ const struct hm11b1_reg_list reg_list; }; static const struct hm11b1_reg mipi_data_rate_384mbps[] = { }; //RAW 10bit 1292x800_30fps_MIPI 384Mbps/lane static const struct hm11b1_reg sensor_1292x800_30fps_setting[] = { {0x0103, 0x00}, {0x0102, 0x01}, {0x0202, 0x03}, {0x0203, 0x7C}, {0x0205, 0x20}, {0x0207, 0x01}, {0x0208, 0x00}, {0x0209, 0x01}, {0x020A, 0x00}, {0x0300, 0x91}, {0x0301, 0x0A}, {0x0302, 0x02}, {0x0303, 0x2E}, {0x0304, 0x43}, {0x0306, 0x00}, {0x0307, 0x00}, {0x0340, 0x03}, {0x0341, 0x60}, {0x0342, 0x05}, {0x0343, 0xA0}, {0x0344, 0x00}, {0x0345, 0x00}, {0x0346, 0x03}, {0x0347, 0x2F}, {0x0350, 0xFF}, {0x0351, 0x00}, {0x0352, 0x00}, {0x0370, 0x00}, {0x0371, 0x00}, {0x0380, 0x00}, {0x0381, 0x00}, {0x0382, 0x00}, {0x1000, 0xC3}, {0x1001, 0xD0}, {0x100A, 0x13}, {0x2000, 0x00}, {0x2061, 0x01}, {0x2062, 0x00}, {0x2063, 0xC8}, {0x2100, 0x03}, {0x2101, 0xF0}, {0x2102, 0xF0}, {0x2103, 0x01}, {0x2104, 0x10}, {0x2105, 0x10}, {0x2106, 0x02}, {0x2107, 0x0A}, {0x2108, 0x10}, {0x2109, 0x15}, {0x210A, 0x1A}, {0x210B, 0x20}, {0x210C, 0x08}, {0x210D, 0x0A}, {0x210E, 0x0F}, {0x210F, 0x12}, {0x2110, 0x1C}, {0x2111, 0x20}, {0x2112, 0x23}, {0x2113, 0x2A}, {0x2114, 0x30}, {0x2115, 0x10}, {0x2116, 0x00}, {0x2117, 0x01}, {0x2118, 0x00}, {0x2119, 0x06}, {0x211A, 0x00}, {0x211B, 0x00}, {0x2615, 0x08}, {0x2616, 0x00}, {0x2700, 0x01}, {0x2711, 0x01}, {0x272F, 0x01}, {0x2800, 0x29}, {0x2821, 0xCE}, {0x2839, 0x27}, {0x283A, 0x01}, {0x2842, 0x01}, {0x2843, 0x00}, {0x3022, 0x11}, {0x3024, 0x30}, {0x3025, 0x12}, {0x3026, 0x00}, {0x3027, 0x81}, {0x3028, 0x01}, {0x3029, 0x00}, {0x302A, 0x30}, {0x3030, 0x00}, {0x3032, 0x00}, {0x3035, 0x01}, {0x303E, 0x00}, {0x3051, 0x00}, {0x3082, 0x0E}, {0x3084, 0x0D}, {0x30A8, 0x03}, {0x30C4, 0xA0}, {0x30D5, 0xC1}, {0x30D8, 0x00}, {0x30D9, 0x0D}, {0x30DB, 0xC2}, {0x30DE, 0x25}, {0x30E1, 0xC3}, {0x30E4, 0x25}, {0x30E7, 0xC4}, {0x30EA, 0x25}, {0x30ED, 0xC5}, {0x30F0, 0x25}, {0x30F2, 0x0C}, {0x30F3, 0x85}, {0x30F6, 0x25}, {0x30F8, 0x0C}, {0x30F9, 0x05}, {0x30FB, 0x40}, {0x30FC, 0x25}, {0x30FD, 0x54}, {0x30FE, 0x0C}, {0x3100, 0xC2}, {0x3103, 0x00}, {0x3104, 0x2B}, {0x3106, 0xC3}, {0x3109, 0x25}, {0x310C, 0xC4}, {0x310F, 0x25}, {0x3112, 0xC5}, {0x3115, 0x25}, {0x3117, 0x0C}, {0x3118, 0x85}, {0x311B, 0x25}, {0x311D, 0x0C}, {0x311E, 0x05}, {0x3121, 0x25}, {0x3123, 0x0C}, {0x3124, 0x0D}, {0x3126, 0x40}, {0x3127, 0x25}, {0x3128, 0x54}, {0x3129, 0x0C}, {0x3130, 0x20}, {0x3134, 0x60}, {0x3135, 0xC2}, {0x3139, 0x12}, {0x313A, 0x07}, {0x313F, 0x52}, {0x3140, 0x34}, {0x3141, 0x2E}, {0x314F, 0x07}, {0x3151, 0x47}, {0x3153, 0xB0}, {0x3154, 0x4A}, {0x3155, 0xC0}, {0x3157, 0x55}, {0x3158, 0x01}, {0x3165, 0xFF}, {0x316B, 0x12}, {0x316E, 0x12}, {0x3176, 0x12}, {0x3178, 0x01}, {0x317C, 0x10}, {0x317D, 0x05}, {0x317F, 0x07}, {0x3182, 0x07}, {0x3183, 0x11}, {0x3184, 0x88}, {0x3186, 0x28}, {0x3191, 0x00}, {0x3192, 0x20}, {0x3400, 0x48}, {0x3401, 0x00}, {0x3402, 0x06}, {0x3403, 0xFA}, {0x3404, 0x05}, {0x3405, 0x40}, {0x3406, 0x00}, {0x3407, 0x00}, {0x3408, 0x03}, {0x3409, 0x2F}, {0x340A, 0x00}, {0x340B, 0x00}, {0x340C, 0x00}, {0x340D, 0x00}, {0x340E, 0x00}, {0x340F, 0x00}, {0x3410, 0x00}, {0x3411, 0x01}, {0x3412, 0x00}, {0x3413, 0x03}, {0x3414, 0xB0}, {0x3415, 0x4A}, {0x3416, 0xC0}, {0x3418, 0x55}, {0x3419, 0x03}, {0x341B, 0x7D}, {0x341C, 0x00}, {0x341F, 0x03}, {0x3420, 0x00}, {0x3421, 0x02}, {0x3422, 0x00}, {0x3423, 0x02}, {0x3424, 0x01}, {0x3425, 0x02}, {0x3426, 0x00}, {0x3427, 0xA2}, {0x3428, 0x01}, {0x3429, 0x06}, {0x342A, 0xF8}, {0x3440, 0x01}, {0x3441, 0xBE}, {0x3442, 0x02}, {0x3443, 0x18}, {0x3444, 0x03}, {0x3445, 0x0C}, {0x3446, 0x06}, {0x3447, 0x18}, {0x3448, 0x09}, {0x3449, 0x24}, {0x344A, 0x08}, {0x344B, 0x08}, {0x345C, 0x00}, {0x345D, 0x44}, {0x345E, 0x02}, {0x345F, 0x43}, {0x3460, 0x04}, {0x3461, 0x3B}, {0x3466, 0xF8}, {0x3467, 0x43}, {0x347D, 0x02}, {0x3483, 0x05}, {0x3484, 0x0C}, {0x3485, 0x03}, {0x3486, 0x20}, {0x3487, 0x00}, {0x3488, 0x00}, {0x3489, 0x00}, {0x348A, 0x09}, {0x348B, 0x00}, {0x348C, 0x00}, {0x348D, 0x02}, {0x348E, 0x01}, {0x348F, 0x40}, {0x3490, 0x00}, {0x3491, 0xC8}, {0x3492, 0x00}, {0x3493, 0x02}, {0x3494, 0x00}, {0x3495, 0x02}, {0x3496, 0x02}, {0x3497, 0x06}, {0x3498, 0x05}, {0x3499, 0x04}, {0x349A, 0x09}, {0x349B, 0x05}, {0x349C, 0x17}, {0x349D, 0x05}, {0x349E, 0x00}, {0x349F, 0x00}, {0x34A0, 0x00}, {0x34A1, 0x00}, {0x34A2, 0x08}, {0x34A3, 0x08}, {0x34A4, 0x00}, {0x34A5, 0x0B}, {0x34A6, 0x0C}, {0x34A7, 0x32}, {0x34A8, 0x10}, {0x34A9, 0xE0}, {0x34AA, 0x52}, {0x34AB, 0x00}, {0x34AC, 0x60}, {0x34AD, 0x2B}, {0x34AE, 0x25}, {0x34AF, 0x48}, {0x34B1, 0x06}, {0x34B2, 0xF8}, {0x34C3, 0xB0}, {0x34C4, 0x4A}, {0x34C5, 0xC0}, {0x34C7, 0x55}, {0x34C8, 0x03}, {0x34CB, 0x00}, {0x353A, 0x00}, {0x355E, 0x48}, {0x3572, 0xB0}, {0x3573, 0x4A}, {0x3574, 0xC0}, {0x3576, 0x55}, {0x3577, 0x03}, {0x357A, 0x00}, {0x35DA, 0x00}, {0x4003, 0x02}, {0x4004, 0x02}, }; static const char * const hm11b1_test_pattern_menu[] = { "Disabled", "Solid Color", "Color Bar", "Color Bar Blending", "PN11", }; static const s64 link_freq_menu_items[] = { HM11B1_LINK_FREQ_384MHZ, }; static const struct hm11b1_link_freq_config link_freq_configs[] = { [HM11B1_LINK_FREQ_384MHZ_INDEX] = { .reg_list = { .num_of_regs = ARRAY_SIZE(mipi_data_rate_384mbps), .regs = mipi_data_rate_384mbps, } }, }; static const struct hm11b1_mode supported_modes[] = { { .width = 1292, .height = 800, .hts = 1344, .vts_def = HM11B1_VTS_DEF, .vts_min = HM11B1_VTS_MIN, .reg_list = { .num_of_regs = ARRAY_SIZE(sensor_1292x800_30fps_setting), .regs = sensor_1292x800_30fps_setting, }, .link_freq_index = HM11B1_LINK_FREQ_384MHZ_INDEX, }, }; struct hm11b1 { struct v4l2_subdev sd; struct media_pad pad; struct v4l2_ctrl_handler ctrl_handler; /* V4L2 Controls */ struct v4l2_ctrl *link_freq; struct v4l2_ctrl *pixel_rate; struct v4l2_ctrl *vblank; struct v4l2_ctrl *hblank; struct v4l2_ctrl *exposure; /* Current mode */ const struct hm11b1_mode *cur_mode; /* To serialize asynchronus callbacks */ struct mutex mutex; /* i2c client */ struct i2c_client *client; #if IS_ENABLED(CONFIG_INTEL_SKL_INT3472) /* GPIO for reset */ struct gpio_desc *reset_gpio; /* GPIO for powerdown */ struct gpio_desc *powerdown_gpio; /* Clock provider */ struct clk *clk; #endif /* Streaming on/off */ bool streaming; }; static inline struct hm11b1 *to_hm11b1(struct v4l2_subdev *subdev) { return container_of(subdev, struct hm11b1, sd); } static u64 to_pixel_rate(u32 f_index) { u64 pixel_rate = link_freq_menu_items[f_index] * 2 * HM11B1_DATA_LANES; do_div(pixel_rate, HM11B1_RGB_DEPTH); return pixel_rate; } static u64 to_pixels_per_line(u32 hts, u32 f_index) { u64 ppl = hts * to_pixel_rate(f_index); do_div(ppl, HM11B1_SCLK); return ppl; } static int hm11b1_read_reg(struct hm11b1 *hm11b1, u16 reg, u16 len, u32 *val) { struct i2c_client *client = hm11b1->client; struct i2c_msg msgs[2]; u8 addr_buf[2]; u8 data_buf[4] = {0}; int ret = 0; if (len > sizeof(data_buf)) return -EINVAL; put_unaligned_be16(reg, addr_buf); msgs[0].addr = client->addr; msgs[0].flags = 0; msgs[0].len = sizeof(addr_buf); msgs[0].buf = addr_buf; msgs[1].addr = client->addr; msgs[1].flags = I2C_M_RD; msgs[1].len = len; msgs[1].buf = &data_buf[sizeof(data_buf) - len]; ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); if (ret != ARRAY_SIZE(msgs)) return ret < 0 ? ret : -EIO; *val = get_unaligned_be32(data_buf); return 0; } static int hm11b1_write_reg(struct hm11b1 *hm11b1, u16 reg, u16 len, u32 val) { struct i2c_client *client = hm11b1->client; u8 buf[6]; int ret = 0; if (len > 4) return -EINVAL; put_unaligned_be16(reg, buf); put_unaligned_be32(val << 8 * (4 - len), buf + 2); ret = i2c_master_send(client, buf, len + 2); if (ret != len + 2) return ret < 0 ? ret : -EIO; return 0; } static int hm11b1_write_reg_list(struct hm11b1 *hm11b1, const struct hm11b1_reg_list *r_list) { struct i2c_client *client = hm11b1->client; unsigned int i; int ret = 0; for (i = 0; i < r_list->num_of_regs; i++) { ret = hm11b1_write_reg(hm11b1, r_list->regs[i].address, 1, r_list->regs[i].val); if (ret) { dev_err_ratelimited(&client->dev, "write reg 0x%4.4x return err = %d", r_list->regs[i].address, ret); return ret; } } return 0; } static int hm11b1_update_digital_gain(struct hm11b1 *hm11b1, u32 d_gain) { struct i2c_client *client = hm11b1->client; int ret = 0; ret = hm11b1_write_reg(hm11b1, HM11B1_REG_DGTL_GAIN, 2, d_gain); if (ret) { dev_err(&client->dev, "failed to set HM11B1_REG_DGTL_GAIN"); return ret; } ret = hm11b1_write_reg(hm11b1, HM11B1_REG_DGTL_GAIN_IR, 2, d_gain); if (ret) { dev_err(&client->dev, "failed to set HM11B1_REG_DGTL_GAIN_IR"); return ret; } return ret; } static int hm11b1_test_pattern(struct hm11b1 *hm11b1, u32 pattern) { if (pattern) pattern = pattern << HM11B1_TEST_PATTERN_BAR_SHIFT | HM11B1_TEST_PATTERN_ENABLE; return hm11b1_write_reg(hm11b1, HM11B1_REG_TEST_PATTERN, 1, pattern); } static int hm11b1_set_ctrl(struct v4l2_ctrl *ctrl) { struct hm11b1 *hm11b1 = container_of(ctrl->handler, struct hm11b1, ctrl_handler); struct i2c_client *client = hm11b1->client; s64 exposure_max; int ret = 0; /* Propagate change of current control to all related controls */ if (ctrl->id == V4L2_CID_VBLANK) { /* Update max exposure while meeting expected vblanking */ exposure_max = hm11b1->cur_mode->height + ctrl->val - HM11B1_EXPOSURE_MAX_MARGIN; __v4l2_ctrl_modify_range(hm11b1->exposure, hm11b1->exposure->minimum, exposure_max, hm11b1->exposure->step, exposure_max); } /* V4L2 controls values will be applied only when power is already up */ if (!pm_runtime_get_if_in_use(&client->dev)) return 0; ret = hm11b1_write_reg(hm11b1, HM11B1_REG_COMMAND_UPDATE, 1, 1); if (ret) { dev_err(&client->dev, "failed to enable HM11B1_REG_COMMAND_UPDATE"); pm_runtime_put(&client->dev); return ret; } switch (ctrl->id) { case V4L2_CID_ANALOGUE_GAIN: ret = hm11b1_write_reg(hm11b1, HM11B1_REG_ANALOG_GAIN, 1, ctrl->val); ret |= hm11b1_write_reg(hm11b1, HM11B1_REG_ANALOG_GAIN_IR, 1, ctrl->val); break; case V4L2_CID_DIGITAL_GAIN: ret = hm11b1_update_digital_gain(hm11b1, ctrl->val); break; case V4L2_CID_EXPOSURE: /* 4 least significant bits of expsoure are fractional part */ ret = hm11b1_write_reg(hm11b1, HM11B1_REG_EXPOSURE, 2, ctrl->val); break; case V4L2_CID_VBLANK: ret = hm11b1_write_reg(hm11b1, HM11B1_REG_VTS, 2, hm11b1->cur_mode->height + ctrl->val); break; case V4L2_CID_TEST_PATTERN: ret = hm11b1_test_pattern(hm11b1, ctrl->val); break; default: ret = -EINVAL; break; } ret |= hm11b1_write_reg(hm11b1, HM11B1_REG_COMMAND_UPDATE, 1, 0); pm_runtime_put(&client->dev); return ret; } static const struct v4l2_ctrl_ops hm11b1_ctrl_ops = { .s_ctrl = hm11b1_set_ctrl, }; static int hm11b1_init_controls(struct hm11b1 *hm11b1) { struct v4l2_ctrl_handler *ctrl_hdlr; const struct hm11b1_mode *cur_mode; s64 exposure_max, h_blank, pixel_rate; u32 vblank_min, vblank_max, vblank_default; int size; int ret = 0; ctrl_hdlr = &hm11b1->ctrl_handler; ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8); if (ret) return ret; ctrl_hdlr->lock = &hm11b1->mutex; cur_mode = hm11b1->cur_mode; size = ARRAY_SIZE(link_freq_menu_items); hm11b1->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, &hm11b1_ctrl_ops, V4L2_CID_LINK_FREQ, size - 1, 0, link_freq_menu_items); if (hm11b1->link_freq) hm11b1->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY; pixel_rate = to_pixel_rate(HM11B1_LINK_FREQ_384MHZ_INDEX); hm11b1->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &hm11b1_ctrl_ops, V4L2_CID_PIXEL_RATE, 0, pixel_rate, 1, pixel_rate); vblank_min = cur_mode->vts_min - cur_mode->height; vblank_max = HM11B1_VTS_MAX - cur_mode->height; vblank_default = cur_mode->vts_def - cur_mode->height; hm11b1->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &hm11b1_ctrl_ops, V4L2_CID_VBLANK, vblank_min, vblank_max, 1, vblank_default); h_blank = to_pixels_per_line(cur_mode->hts, cur_mode->link_freq_index); h_blank -= cur_mode->width; hm11b1->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &hm11b1_ctrl_ops, V4L2_CID_HBLANK, h_blank, h_blank, 1, h_blank); if (hm11b1->hblank) hm11b1->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY; v4l2_ctrl_new_std(ctrl_hdlr, &hm11b1_ctrl_ops, V4L2_CID_ANALOGUE_GAIN, HM11B1_ANAL_GAIN_MIN, HM11B1_ANAL_GAIN_MAX, HM11B1_ANAL_GAIN_STEP, HM11B1_ANAL_GAIN_MIN); v4l2_ctrl_new_std(ctrl_hdlr, &hm11b1_ctrl_ops, V4L2_CID_DIGITAL_GAIN, HM11B1_DGTL_GAIN_MIN, HM11B1_DGTL_GAIN_MAX, HM11B1_DGTL_GAIN_STEP, HM11B1_DGTL_GAIN_DEFAULT); exposure_max = cur_mode->vts_def - HM11B1_EXPOSURE_MAX_MARGIN; hm11b1->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &hm11b1_ctrl_ops, V4L2_CID_EXPOSURE, HM11B1_EXPOSURE_MIN, exposure_max, HM11B1_EXPOSURE_STEP, exposure_max); v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &hm11b1_ctrl_ops, V4L2_CID_TEST_PATTERN, ARRAY_SIZE(hm11b1_test_pattern_menu) - 1, 0, 0, hm11b1_test_pattern_menu); if (ctrl_hdlr->error) return ctrl_hdlr->error; hm11b1->sd.ctrl_handler = ctrl_hdlr; return 0; } static void hm11b1_update_pad_format(const struct hm11b1_mode *mode, struct v4l2_mbus_framefmt *fmt) { fmt->width = mode->width; fmt->height = mode->height; fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10; fmt->field = V4L2_FIELD_NONE; } static int hm11b1_start_streaming(struct hm11b1 *hm11b1) { struct i2c_client *client = hm11b1->client; const struct hm11b1_reg_list *reg_list; int link_freq_index; int ret = 0; link_freq_index = hm11b1->cur_mode->link_freq_index; reg_list = &link_freq_configs[link_freq_index].reg_list; ret = hm11b1_write_reg_list(hm11b1, reg_list); if (ret) { dev_err(&client->dev, "failed to set plls"); return ret; } reg_list = &hm11b1->cur_mode->reg_list; ret = hm11b1_write_reg_list(hm11b1, reg_list); if (ret) { dev_err(&client->dev, "failed to set mode"); return ret; } ret = __v4l2_ctrl_handler_setup(hm11b1->sd.ctrl_handler); if (ret) return ret; ret = hm11b1_write_reg(hm11b1, HM11B1_REG_MODE_SELECT, 1, HM11B1_MODE_STREAMING); if (ret) dev_err(&client->dev, "failed to start streaming"); return ret; } static void hm11b1_stop_streaming(struct hm11b1 *hm11b1) { struct i2c_client *client = hm11b1->client; if (hm11b1_write_reg(hm11b1, HM11B1_REG_MODE_SELECT, 1, HM11B1_MODE_STANDBY)) dev_err(&client->dev, "failed to stop streaming"); } static int hm11b1_set_stream(struct v4l2_subdev *sd, int enable) { struct hm11b1 *hm11b1 = to_hm11b1(sd); struct i2c_client *client = hm11b1->client; int ret = 0; if (hm11b1->streaming == enable) return 0; mutex_lock(&hm11b1->mutex); if (enable) { ret = pm_runtime_get_sync(&client->dev); if (ret < 0) { pm_runtime_put_noidle(&client->dev); mutex_unlock(&hm11b1->mutex); return ret; } ret = hm11b1_start_streaming(hm11b1); if (ret) { enable = 0; hm11b1_stop_streaming(hm11b1); pm_runtime_put(&client->dev); } } else { hm11b1_stop_streaming(hm11b1); pm_runtime_put(&client->dev); } hm11b1->streaming = enable; mutex_unlock(&hm11b1->mutex); return ret; } static int hm11b1_power_off(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct hm11b1 *hm11b1 = to_hm11b1(sd); int ret = 0; #if IS_ENABLED(CONFIG_INTEL_SKL_INT3472) gpiod_set_value_cansleep(hm11b1->reset_gpio, 1); gpiod_set_value_cansleep(hm11b1->powerdown_gpio, 1); clk_disable_unprepare(hm11b1->clk); msleep(20); #elif IS_ENABLED(CONFIG_POWER_CTRL_LOGIC) ret = power_ctrl_logic_set_power(0); #endif return ret; } static int hm11b1_power_on(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct hm11b1 *hm11b1 = to_hm11b1(sd); int ret = 0; #if IS_ENABLED(CONFIG_INTEL_SKL_INT3472) ret = clk_prepare_enable(hm11b1->clk); gpiod_set_value_cansleep(hm11b1->powerdown_gpio, 0); gpiod_set_value_cansleep(hm11b1->reset_gpio, 0); msleep(20); #elif IS_ENABLED(CONFIG_POWER_CTRL_LOGIC) ret = power_ctrl_logic_set_power(1); #endif return ret; } static int __maybe_unused hm11b1_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct v4l2_subdev *sd = i2c_get_clientdata(client); struct hm11b1 *hm11b1 = to_hm11b1(sd); mutex_lock(&hm11b1->mutex); if (hm11b1->streaming) hm11b1_stop_streaming(hm11b1); mutex_unlock(&hm11b1->mutex); return 0; } static int __maybe_unused hm11b1_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct v4l2_subdev *sd = i2c_get_clientdata(client); struct hm11b1 *hm11b1 = to_hm11b1(sd); int ret = 0; mutex_lock(&hm11b1->mutex); if (!hm11b1->streaming) goto exit; ret = hm11b1_start_streaming(hm11b1); if (ret) { hm11b1->streaming = false; hm11b1_stop_streaming(hm11b1); } exit: mutex_unlock(&hm11b1->mutex); return ret; } static int hm11b1_set_format(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *sd_state, #endif struct v4l2_subdev_format *fmt) { struct hm11b1 *hm11b1 = to_hm11b1(sd); const struct hm11b1_mode *mode; s32 vblank_def, h_blank; mode = v4l2_find_nearest_size(supported_modes, ARRAY_SIZE(supported_modes), width, height, fmt->format.width, fmt->format.height); mutex_lock(&hm11b1->mutex); hm11b1_update_pad_format(mode, &fmt->format); if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format; #elif LINUX_VERSION_CODE < KERNEL_VERSION(6, 8, 0) *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format; #else *v4l2_subdev_state_get_format(sd_state, fmt->pad) = fmt->format; #endif } else { hm11b1->cur_mode = mode; __v4l2_ctrl_s_ctrl(hm11b1->link_freq, mode->link_freq_index); __v4l2_ctrl_s_ctrl_int64(hm11b1->pixel_rate, to_pixel_rate(mode->link_freq_index)); /* Update limits and set FPS to default */ vblank_def = mode->vts_def - mode->height; __v4l2_ctrl_modify_range(hm11b1->vblank, mode->vts_min - mode->height, HM11B1_VTS_MAX - mode->height, 1, vblank_def); __v4l2_ctrl_s_ctrl(hm11b1->vblank, vblank_def); h_blank = to_pixels_per_line(mode->hts, mode->link_freq_index) - mode->width; __v4l2_ctrl_modify_range(hm11b1->hblank, h_blank, h_blank, 1, h_blank); } mutex_unlock(&hm11b1->mutex); return 0; } static int hm11b1_get_format(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *sd_state, #endif struct v4l2_subdev_format *fmt) { struct hm11b1 *hm11b1 = to_hm11b1(sd); mutex_lock(&hm11b1->mutex); if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) fmt->format = *v4l2_subdev_get_try_format(&hm11b1->sd, cfg, fmt->pad); #elif LINUX_VERSION_CODE < KERNEL_VERSION(6, 8, 0) fmt->format = *v4l2_subdev_get_try_format(&hm11b1->sd, sd_state, fmt->pad); #else fmt->format = *v4l2_subdev_state_get_format( sd_state, fmt->pad); #endif else hm11b1_update_pad_format(hm11b1->cur_mode, &fmt->format); mutex_unlock(&hm11b1->mutex); return 0; } static int hm11b1_enum_mbus_code(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *sd_state, #endif struct v4l2_subdev_mbus_code_enum *code) { if (code->index > 0) return -EINVAL; code->code = MEDIA_BUS_FMT_SGRBG10_1X10; return 0; } static int hm11b1_enum_frame_size(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *sd_state, #endif struct v4l2_subdev_frame_size_enum *fse) { if (fse->index >= ARRAY_SIZE(supported_modes)) return -EINVAL; if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10) return -EINVAL; fse->min_width = supported_modes[fse->index].width; fse->max_width = fse->min_width; fse->min_height = supported_modes[fse->index].height; fse->max_height = fse->min_height; return 0; } static int hm11b1_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { struct hm11b1 *hm11b1 = to_hm11b1(sd); mutex_lock(&hm11b1->mutex); hm11b1_update_pad_format(&supported_modes[0], #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) v4l2_subdev_get_try_format(sd, fh->pad, 0)); #elif LINUX_VERSION_CODE < KERNEL_VERSION(6, 8, 0) v4l2_subdev_get_try_format(sd, fh->state, 0)); #else v4l2_subdev_state_get_format(fh->state, 0)); #endif mutex_unlock(&hm11b1->mutex); return 0; } static const struct v4l2_subdev_video_ops hm11b1_video_ops = { .s_stream = hm11b1_set_stream, }; static const struct v4l2_subdev_pad_ops hm11b1_pad_ops = { .set_fmt = hm11b1_set_format, .get_fmt = hm11b1_get_format, .enum_mbus_code = hm11b1_enum_mbus_code, .enum_frame_size = hm11b1_enum_frame_size, }; static const struct v4l2_subdev_ops hm11b1_subdev_ops = { .video = &hm11b1_video_ops, .pad = &hm11b1_pad_ops, }; static const struct media_entity_operations hm11b1_subdev_entity_ops = { .link_validate = v4l2_subdev_link_validate, }; static const struct v4l2_subdev_internal_ops hm11b1_internal_ops = { .open = hm11b1_open, }; static int hm11b1_identify_module(struct hm11b1 *hm11b1) { struct i2c_client *client = hm11b1->client; int ret; u32 val; ret = hm11b1_read_reg(hm11b1, HM11B1_REG_CHIP_ID, 2, &val); if (ret) return ret; if (val != HM11B1_CHIP_ID) { dev_err(&client->dev, "chip id mismatch: %x!=%x", HM11B1_CHIP_ID, val); return -ENXIO; } return 0; } static int hm11b1_check_hwcfg(struct device *dev) { struct v4l2_fwnode_endpoint bus_cfg = { .bus_type = V4L2_MBUS_CSI2_DPHY }; struct fwnode_handle *ep; struct fwnode_handle *fwnode = dev_fwnode(dev); unsigned int i, j; int ret; u32 ext_clk; if (!fwnode) return -ENXIO; ep = fwnode_graph_get_next_endpoint(fwnode, NULL); if (!ep) return -EPROBE_DEFER; ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency", &ext_clk); if (ret) { dev_err(dev, "can't get clock frequency"); return ret; } ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg); fwnode_handle_put(ep); if (ret) return ret; if (bus_cfg.bus.mipi_csi2.num_data_lanes != HM11B1_DATA_LANES) { dev_err(dev, "number of CSI2 data lanes %d is not supported", bus_cfg.bus.mipi_csi2.num_data_lanes); ret = -EINVAL; goto out_err; } if (!bus_cfg.nr_of_link_frequencies) { dev_err(dev, "no link frequencies defined"); ret = -EINVAL; goto out_err; } for (i = 0; i < ARRAY_SIZE(link_freq_menu_items); i++) { for (j = 0; j < bus_cfg.nr_of_link_frequencies; j++) { if (link_freq_menu_items[i] == bus_cfg.link_frequencies[j]) break; } if (j == bus_cfg.nr_of_link_frequencies) { dev_err(dev, "no link frequency %lld supported", link_freq_menu_items[i]); ret = -EINVAL; goto out_err; } } out_err: v4l2_fwnode_endpoint_free(&bus_cfg); return ret; } #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) static int hm11b1_remove(struct i2c_client *client) #else static void hm11b1_remove(struct i2c_client *client) #endif { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct hm11b1 *hm11b1 = to_hm11b1(sd); v4l2_async_unregister_subdev(sd); media_entity_cleanup(&sd->entity); v4l2_ctrl_handler_free(sd->ctrl_handler); pm_runtime_disable(&client->dev); mutex_destroy(&hm11b1->mutex); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) return 0; #endif } #if IS_ENABLED(CONFIG_INTEL_SKL_INT3472) static int hm11b1_parse_power(struct hm11b1 *hm11b1) { struct device *dev = &hm11b1->client->dev; long ret; hm11b1->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(hm11b1->reset_gpio)) { ret = PTR_ERR(hm11b1->reset_gpio); dev_err(dev, "error while getting reset gpio: %ld\n", ret); hm11b1->reset_gpio = NULL; return (int)ret; } hm11b1->powerdown_gpio = devm_gpiod_get_optional(dev, "powerdown", GPIOD_OUT_LOW); if (IS_ERR(hm11b1->powerdown_gpio)) { ret = PTR_ERR(hm11b1->powerdown_gpio); dev_err(dev, "error while getting powerdown gpio: %ld\n", ret); hm11b1->powerdown_gpio = NULL; return ret; } hm11b1->clk = devm_clk_get_optional(dev, "clk"); if (IS_ERR(hm11b1->clk)) { ret = PTR_ERR(hm11b1->clk); dev_err(dev, "error while getting clk: %ld\n", ret); hm11b1->clk = NULL; return ret; } return 0; } #endif static int hm11b1_probe(struct i2c_client *client) { struct hm11b1 *hm11b1; int ret = 0; /* Check HW config */ ret = hm11b1_check_hwcfg(&client->dev); if (ret) { dev_err(&client->dev, "failed to check hwcfg: %d", ret); return ret; } hm11b1 = devm_kzalloc(&client->dev, sizeof(*hm11b1), GFP_KERNEL); if (!hm11b1) return -ENOMEM; hm11b1->client = client; v4l2_i2c_subdev_init(&hm11b1->sd, client, &hm11b1_subdev_ops); #if IS_ENABLED(CONFIG_INTEL_SKL_INT3472) ret = hm11b1_parse_power(hm11b1); if (ret) return ret; ret = hm11b1_power_on(&client->dev); if (ret) return ret; #elif IS_ENABLED(CONFIG_POWER_CTRL_LOGIC) ret = power_ctrl_logic_set_power(1); if (ret) return ret; #endif ret = hm11b1_identify_module(hm11b1); if (ret) { dev_err(&client->dev, "failed to find sensor: %d", ret); goto probe_error_power_off; } mutex_init(&hm11b1->mutex); hm11b1->cur_mode = &supported_modes[0]; ret = hm11b1_init_controls(hm11b1); if (ret) { dev_err(&client->dev, "failed to init controls: %d", ret); goto probe_error_v4l2_ctrl_handler_free; } hm11b1->sd.internal_ops = &hm11b1_internal_ops; hm11b1->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; hm11b1->sd.entity.ops = &hm11b1_subdev_entity_ops; hm11b1->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; hm11b1->pad.flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_pads_init(&hm11b1->sd.entity, 1, &hm11b1->pad); if (ret) { dev_err(&client->dev, "failed to init entity pads: %d", ret); goto probe_error_v4l2_ctrl_handler_free; } #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 13, 0) ret = v4l2_async_register_subdev_sensor_common(&hm11b1->sd); #else ret = v4l2_async_register_subdev_sensor(&hm11b1->sd); #endif if (ret < 0) { dev_err(&client->dev, "failed to register V4L2 subdev: %d", ret); goto probe_error_media_entity_cleanup; } /* * Device is already turned on by i2c-core with ACPI domain PM. * Enable runtime PM and turn off the device. */ pm_runtime_set_active(&client->dev); pm_runtime_enable(&client->dev); pm_runtime_idle(&client->dev); return 0; probe_error_media_entity_cleanup: media_entity_cleanup(&hm11b1->sd.entity); probe_error_v4l2_ctrl_handler_free: v4l2_ctrl_handler_free(hm11b1->sd.ctrl_handler); mutex_destroy(&hm11b1->mutex); probe_error_power_off: hm11b1_power_off(&client->dev); return ret; } static const struct dev_pm_ops hm11b1_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(hm11b1_suspend, hm11b1_resume) SET_RUNTIME_PM_OPS(hm11b1_power_off, hm11b1_power_on, NULL) }; #ifdef CONFIG_ACPI static const struct acpi_device_id hm11b1_acpi_ids[] = { {"HIMX11B1"}, {} }; MODULE_DEVICE_TABLE(acpi, hm11b1_acpi_ids); #endif static struct i2c_driver hm11b1_i2c_driver = { .driver = { .name = "hm11b1", .pm = &hm11b1_pm_ops, .acpi_match_table = ACPI_PTR(hm11b1_acpi_ids), }, #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) .probe_new = hm11b1_probe, #else .probe = hm11b1_probe, #endif .remove = hm11b1_remove, }; module_i2c_driver(hm11b1_i2c_driver); MODULE_AUTHOR("Qiu, Tianshu "); MODULE_AUTHOR("Shawn Tu "); MODULE_AUTHOR("Bingbu Cao "); MODULE_AUTHOR("Lai, Jim "); MODULE_DESCRIPTION("Himax HM11B1 sensor driver"); MODULE_LICENSE("GPL v2"); ipu6-drivers-0~git202411190607.0ad49882/drivers/media/i2c/hm2170.c000066400000000000000000000774021471702545500232170ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2022 Intel Corporation. #include #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 12, 0) #include #else #include #endif #include #include #include #include #include #include #include #include #include #include #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) #include #endif #define HM2170_LINK_FREQ_384MHZ 384000000ULL #define HM2170_SCLK 76000000LL #define HM2170_MCLK 19200000 #define HM2170_DATA_LANES 2 #define HM2170_RGB_DEPTH 10 #define HM2170_REG_DELAY 0xffff #define HM2170_REG_CHIP_ID 0x0000 #define HM2170_CHIP_ID 0x2170 #define HM2170_REG_SILICON_REV 0x0002 #define HM2170_REG_MODE_SELECT 0x0100 #define HM2170_MODE_STANDBY 0x00 #define HM2170_MODE_STREAMING 0x01 /* vertical-timings from sensor */ #define HM2170_REG_VTS 0x4809 #define HM2170_VTS_DEF 0x0484 #define HM2170_VTS_MIN 0x0484 #define HM2170_VTS_MAX 0x7fff /* horizontal-timings from sensor */ #define HM2170_REG_HTS 0x480B /* Exposure controls from sensor */ #define HM2170_REG_EXPOSURE 0x0202 #define HM2170_EXPOSURE_MIN 2 #define HM2170_EXPOSURE_MAX_MARGIN 2 #define HM2170_EXPOSURE_STEP 1 /* Analog gain controls from sensor */ #define HM2170_REG_ANALOG_GAIN 0x0208 #define HM2170_ANAL_GAIN_MIN 0 #define HM2170_ANAL_GAIN_MAX 80 #define HM2170_ANAL_GAIN_STEP 1 /* Digital gain controls from sensor */ #define HM2170_REG_DIGITAL_GAIN 0x020A #define HM2170_DGTL_GAIN_MIN 256 #define HM2170_DGTL_GAIN_MAX 1020 #define HM2170_DGTL_GAIN_STEP 1 #define HM2170_DGTL_GAIN_DEFAULT 256 /* Register update control */ #define HM2170_REG_COMMAND_UPDATE 0x0104 #define HM2170_COMMAND_UPDATE 0x00 #define HM2170_COMMAND_HOLD 0x01 /* Test Pattern Control */ #define HM2170_REG_TEST_PATTERN 0x0601 #define HM2170_TEST_PATTERN_ENABLE BIT(0) #define HM2170_TEST_PATTERN_BAR_SHIFT 1 enum { HM2170_LINK_FREQ_384MHZ_INDEX, }; enum hm2170_silicon_revision { HM2170_SILICON_REV_B = 0, HM2170_SILICON_REV_D = 1, }; struct hm2170_reg { u16 address; u8 val; }; struct hm2170_reg_list { u32 num_of_regs; const struct hm2170_reg *regs; }; struct hm2170_link_freq_config { const struct hm2170_reg_list reg_list; }; struct hm2170_mode { /* Frame width in pixels */ u32 width; /* Frame height in pixels */ u32 height; /* Horizontal timining size */ u32 hts; /* Default vertical timining size */ u32 vts_def; /* Min vertical timining size */ u32 vts_min; /* Link frequency needed for this resolution */ u32 link_freq_index; /* Sensor register settings for this resolution */ const struct hm2170_reg_list reg_list; }; static const struct hm2170_reg mode_1928x1088_ver_b_regs[] = { {0x0103, 0x00}, {0xffff, 0x10}, {0x0202, 0x03}, {0x0203, 0x60}, {0x0300, 0x5E}, {0x0301, 0x3F}, {0x0302, 0x07}, {0x0303, 0x04}, {0x0350, 0x61}, {0x1000, 0xC3}, {0x1001, 0xC0}, {0x2000, 0x00}, {0x2088, 0x01}, {0x2089, 0x00}, {0x208A, 0xC8}, {0x2700, 0x00}, {0x2711, 0x01}, {0x2713, 0x04}, {0x272F, 0x01}, {0x2800, 0x01}, {0x2821, 0x8E}, {0x2823, 0x01}, {0x282E, 0x01}, {0x282F, 0xC0}, {0x2839, 0x13}, {0x283A, 0x01}, {0x283B, 0x0F}, {0x2842, 0x0C}, {0x2846, 0x01}, {0x2847, 0x94}, {0x3001, 0x00}, {0x3002, 0x88}, {0x3004, 0x02}, {0x3024, 0x20}, {0x3025, 0x12}, {0x3026, 0x00}, {0x3027, 0x81}, {0x3028, 0x01}, {0x3029, 0x00}, {0x302A, 0x30}, {0x3042, 0x00}, {0x3070, 0x01}, {0x307B, 0x08}, {0x30C4, 0x20}, {0x30D0, 0x01}, {0x30D2, 0x8E}, {0x30D4, 0x14}, {0x30D7, 0x21}, {0x30D9, 0x9E}, {0x30DA, 0x14}, {0x30DE, 0x41}, {0x30E0, 0x9E}, {0x30E2, 0x14}, {0x30E5, 0x61}, {0x30E7, 0x9F}, {0x30E9, 0x14}, {0x30EC, 0x43}, {0x30EE, 0x9F}, {0x30F0, 0x14}, {0x30F3, 0x63}, {0x30F5, 0x9F}, {0x30F6, 0x14}, {0x30F8, 0x00}, {0x3101, 0x02}, {0x3103, 0x9E}, {0x3104, 0x14}, {0x3108, 0x22}, {0x310A, 0x9E}, {0x310B, 0x14}, {0x310F, 0x42}, {0x3111, 0x9E}, {0x3112, 0x14}, {0x3116, 0x62}, {0x3118, 0x9F}, {0x3119, 0x14}, {0x311D, 0x44}, {0x311F, 0x9F}, {0x3120, 0x14}, {0x3124, 0x64}, {0x3126, 0x9F}, {0x3127, 0x14}, {0x3135, 0x01}, {0x3137, 0x03}, {0x313C, 0x52}, {0x313E, 0x68}, {0x3144, 0x3E}, {0x3145, 0x24}, {0x3146, 0x08}, {0x3147, 0x13}, {0x3148, 0x13}, {0x3149, 0x6C}, {0x314A, 0x13}, {0x314B, 0x03}, {0x314C, 0xF8}, {0x314D, 0x04}, {0x314E, 0x10}, {0x3161, 0x11}, {0x3171, 0x05}, {0x317A, 0x21}, {0x317B, 0xF0}, {0x317C, 0x07}, {0x317D, 0x09}, {0x3183, 0x18}, {0x3184, 0x4A}, {0x318E, 0x88}, {0x318F, 0x00}, {0x3190, 0x00}, {0x4003, 0x02}, {0x4004, 0x02}, {0x4800, 0x26}, {0x4801, 0x10}, {0x4802, 0x00}, {0x4803, 0x00}, {0x4804, 0x3F}, {0x4805, 0x7F}, {0x4806, 0x3F}, {0x4807, 0x1F}, {0x4809, 0x04}, {0x480A, 0x84}, {0x480B, 0x08}, {0x480C, 0x90}, {0x480D, 0x00}, {0x480E, 0x00}, {0x480F, 0x04}, {0x4810, 0x3F}, {0x4811, 0x00}, {0x4812, 0x00}, {0x4813, 0x00}, {0x4814, 0x00}, {0x4815, 0x00}, {0x4816, 0x00}, {0x4817, 0x00}, {0x4818, 0x00}, {0x4819, 0x03}, {0x481F, 0x00}, {0x4820, 0x0E}, {0x4821, 0x0E}, {0x4840, 0x00}, {0x4844, 0x00}, {0x4845, 0x00}, {0x4846, 0x00}, {0x4847, 0x00}, {0x4848, 0x00}, {0x4849, 0xF1}, {0x484A, 0x00}, {0x484B, 0x88}, {0x484C, 0x01}, {0x484D, 0x04}, {0x484E, 0x64}, {0x484F, 0x50}, {0x4850, 0x04}, {0x4851, 0x00}, {0x4852, 0x01}, {0x4853, 0x19}, {0x4854, 0x50}, {0x4855, 0x04}, {0x4856, 0x00}, {0x4863, 0x02}, {0x4864, 0x3D}, {0x4865, 0x02}, {0x4866, 0xB0}, {0x4880, 0x00}, {0x48A0, 0x00}, {0x48A1, 0x04}, {0x48A2, 0x01}, {0x48A3, 0xDD}, {0x48A4, 0x0C}, {0x48A5, 0x3B}, {0x48A6, 0x20}, {0x48A7, 0x20}, {0x48A8, 0x20}, {0x48A9, 0x20}, {0x48AA, 0x00}, {0x48C0, 0x3F}, {0x48C1, 0x29}, {0x48C3, 0x14}, {0x48C4, 0x00}, {0x48C5, 0x07}, {0x48C6, 0x88}, {0x48C7, 0x04}, {0x48C8, 0x40}, {0x48C9, 0x00}, {0x48CA, 0x00}, {0x48CB, 0x00}, {0x48CC, 0x1F}, {0x48F0, 0x00}, {0x48F1, 0x00}, {0x48F2, 0x04}, {0x48F3, 0x01}, {0x48F4, 0xE0}, {0x48F5, 0x01}, {0x48F6, 0x10}, {0x48F7, 0x00}, {0x48F8, 0x00}, {0x48F9, 0x00}, {0x48FA, 0x00}, {0x48FB, 0x01}, {0x4931, 0x2B}, {0x4932, 0x01}, {0x4933, 0x01}, {0x4934, 0x00}, {0x4935, 0x0F}, {0x4980, 0x00}, {0x4A72, 0x01}, {0x4A73, 0x01}, {0x4C30, 0x00}, {0x4CF2, 0x01}, {0x4CF3, 0x01}, {0x0104, 0x00}, }; static const struct hm2170_reg mode_1928x1088_ver_d_regs[] = { {0x0103, 0x00}, {0xffff, 0x10}, {0x0202, 0x03}, {0x0203, 0x60}, {0x0300, 0x5E}, {0x0301, 0x3F}, {0x0302, 0x07}, {0x0303, 0x04}, {0x0350, 0x61}, {0x1000, 0xC3}, {0x1001, 0xC0}, {0x2000, 0x00}, {0x2088, 0x01}, {0x2089, 0x00}, {0x208A, 0xC8}, {0x2700, 0x00}, {0x2711, 0x01}, {0x2713, 0x04}, {0x272F, 0x01}, {0x2800, 0x01}, {0x2821, 0x8E}, {0x2823, 0x01}, {0x282E, 0x01}, {0x282F, 0xC0}, {0x2839, 0x13}, {0x283A, 0x01}, {0x283B, 0x0F}, {0x2842, 0x0C}, {0x2846, 0x01}, {0x2847, 0x94}, {0x3001, 0x00}, {0x3002, 0x88}, {0x3004, 0x02}, {0x3024, 0x20}, {0x3025, 0x12}, {0x3026, 0x00}, {0x3027, 0x81}, {0x3028, 0x01}, {0x3029, 0x00}, {0x302A, 0x30}, {0x3042, 0x00}, {0x3070, 0x01}, {0x307B, 0x08}, {0x30C4, 0x20}, {0x30D0, 0x02}, {0x30D1, 0x03}, {0x30D2, 0x3F}, {0x30D3, 0x15}, {0x30D7, 0x03}, {0x30D8, 0x03}, {0x30D9, 0x3F}, {0x30DA, 0x15}, {0x30DE, 0x04}, {0x30DF, 0x03}, {0x30E0, 0x3F}, {0x30E1, 0x15}, {0x30E5, 0x24}, {0x30E6, 0x03}, {0x30E7, 0x3F}, {0x30E8, 0x15}, {0x30EC, 0x2C}, {0x30ED, 0x03}, {0x30EE, 0x3F}, {0x30EF, 0x15}, {0x30F3, 0x2C}, {0x30F4, 0x03}, {0x30F5, 0x3F}, {0x30F6, 0x15}, {0x30F8, 0x01}, {0x3101, 0x02}, {0x3102, 0x01}, {0x3103, 0x1F}, {0x3104, 0x15}, {0x3108, 0x03}, {0x3109, 0x01}, {0x310A, 0x1F}, {0x310B, 0x14}, {0x310F, 0x04}, {0x3110, 0x01}, {0x3111, 0x1F}, {0x3112, 0x13}, {0x3116, 0x24}, {0x3117, 0x01}, {0x3118, 0x3F}, {0x3119, 0x13}, {0x311D, 0x2C}, {0x311E, 0x01}, {0x311F, 0x3F}, {0x3120, 0x13}, {0x3121, 0x94}, {0x3124, 0x2C}, {0x3125, 0x01}, {0x3126, 0x3F}, {0x3127, 0x13}, {0x3129, 0x01}, {0x3135, 0x01}, {0x3137, 0x03}, {0x3139, 0x37}, {0x313C, 0x52}, {0x313E, 0x68}, {0x3144, 0x3E}, {0x3145, 0xE4}, {0x3146, 0x58}, {0x3147, 0x13}, {0x3148, 0x11}, {0x3149, 0x27}, {0x314A, 0x13}, {0x314B, 0x03}, {0x314C, 0x0C}, {0x314D, 0x00}, {0x314E, 0x10}, {0x3158, 0x01}, {0x3161, 0x11}, {0x3171, 0x05}, {0x317A, 0x21}, {0x317B, 0xF0}, {0x317C, 0x0C}, {0x317D, 0x09}, {0x3182, 0x88}, {0x3183, 0x18}, {0x3184, 0x40}, {0x318E, 0x88}, {0x318F, 0x00}, {0x3190, 0x00}, {0x4003, 0x02}, {0x4004, 0x02}, {0x4800, 0x26}, {0x4801, 0x21}, {0x4802, 0x10}, {0x4803, 0x00}, {0x4804, 0x3F}, {0x4805, 0x7F}, {0x4806, 0x3F}, {0x4807, 0x1F}, {0x4809, 0x04}, {0x480A, 0x84}, {0x480B, 0x04}, {0x480C, 0x48}, {0x480D, 0x00}, {0x480E, 0x00}, {0x480F, 0x04}, {0x4810, 0x3F}, {0x4811, 0x00}, {0x4812, 0x00}, {0x4813, 0x00}, {0x4814, 0x00}, {0x4815, 0x00}, {0x4816, 0x00}, {0x4817, 0x00}, {0x4818, 0x00}, {0x4819, 0x02}, {0x481F, 0x00}, {0x4820, 0x0E}, {0x4821, 0x0E}, {0x4840, 0x00}, {0x4844, 0x00}, {0x4845, 0x00}, {0x4846, 0x00}, {0x4847, 0x00}, {0x4848, 0x00}, {0x4849, 0xF1}, {0x484A, 0x00}, {0x484B, 0x88}, {0x484C, 0x01}, {0x484D, 0x04}, {0x484E, 0x64}, {0x484F, 0x50}, {0x4850, 0x04}, {0x4851, 0x00}, {0x4852, 0x01}, {0x4853, 0x19}, {0x4854, 0x50}, {0x4855, 0x04}, {0x4856, 0x00}, {0x4863, 0x02}, {0x4864, 0x3D}, {0x4865, 0x02}, {0x4866, 0xB0}, {0x4880, 0x00}, {0x48A0, 0x00}, {0x48A1, 0x04}, {0x48A2, 0x01}, {0x48A3, 0xDD}, {0x48A4, 0x0C}, {0x48A5, 0x3B}, {0x48A6, 0x20}, {0x48A7, 0x20}, {0x48A8, 0x20}, {0x48A9, 0x20}, {0x48AA, 0x00}, {0x48C0, 0x3F}, {0x48C1, 0x29}, {0x48C3, 0x14}, {0x48C4, 0x00}, {0x48C5, 0x07}, {0x48C6, 0x88}, {0x48C7, 0x04}, {0x48C8, 0x40}, {0x48C9, 0x00}, {0x48CA, 0x00}, {0x48CB, 0x00}, {0x48CC, 0x1F}, {0x48F0, 0x00}, {0x48F1, 0x00}, {0x48F2, 0x04}, {0x48F3, 0x01}, {0x48F4, 0xE0}, {0x48F5, 0x01}, {0x48F6, 0x10}, {0x48F7, 0x00}, {0x48F8, 0x00}, {0x48F9, 0x00}, {0x48FA, 0x00}, {0x48FB, 0x01}, {0x4931, 0x2B}, {0x4932, 0x01}, {0x4933, 0x01}, {0x4934, 0x00}, {0x4935, 0x0F}, {0x4980, 0x00}, {0x4A72, 0x01}, {0x4A73, 0x01}, {0x4C30, 0x00}, {0x4CF2, 0x01}, {0x4CF3, 0x01}, {0x0104, 0x00}, }; static const char * const hm2170_test_pattern_menu[] = { "Disabled", "Solid Color", "Color Bar", "Color Bar With Blending", "PN11", }; static const s64 link_freq_menu_items[] = { HM2170_LINK_FREQ_384MHZ, }; static const struct hm2170_mode supported_modes[][1] = { { { .width = 1928, .height = 1088, .hts = 2192, .vts_def = HM2170_VTS_DEF, .vts_min = HM2170_VTS_MIN, .reg_list = { .num_of_regs = ARRAY_SIZE(mode_1928x1088_ver_b_regs), .regs = mode_1928x1088_ver_b_regs, }, .link_freq_index = HM2170_LINK_FREQ_384MHZ_INDEX, }, }, { { .width = 1928, .height = 1088, .hts = 2192, .vts_def = HM2170_VTS_DEF, .vts_min = HM2170_VTS_MIN, .reg_list = { .num_of_regs = ARRAY_SIZE(mode_1928x1088_ver_d_regs), .regs = mode_1928x1088_ver_d_regs, }, .link_freq_index = HM2170_LINK_FREQ_384MHZ_INDEX, }, }, }; struct hm2170 { struct v4l2_subdev sd; struct media_pad pad; struct v4l2_ctrl_handler ctrl_handler; /* V4L2 Controls */ struct v4l2_ctrl *link_freq; struct v4l2_ctrl *pixel_rate; struct v4l2_ctrl *vblank; struct v4l2_ctrl *hblank; struct v4l2_ctrl *exposure; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) struct vsc_mipi_config conf; struct vsc_camera_status status; struct v4l2_ctrl *privacy_status; #endif /* Current mode */ const struct hm2170_mode *cur_mode; /* HM2170 silicon revision, B or D */ enum hm2170_silicon_revision rev; /* To serialize asynchronus callbacks */ struct mutex mutex; /* Streaming on/off */ bool streaming; }; static inline struct hm2170 *to_hm2170(struct v4l2_subdev *subdev) { return container_of(subdev, struct hm2170, sd); } static u64 to_pixel_rate(u32 f_index) { u64 pixel_rate = link_freq_menu_items[f_index] * 2 * HM2170_DATA_LANES; do_div(pixel_rate, HM2170_RGB_DEPTH); return pixel_rate; } static u64 to_pixels_per_line(u32 hts, u32 f_index) { u64 ppl = hts * to_pixel_rate(f_index); do_div(ppl, HM2170_SCLK); return ppl; } static int hm2170_read_reg(struct hm2170 *hm2170, u16 reg, u16 len, u32 *val) { struct i2c_client *client = v4l2_get_subdevdata(&hm2170->sd); struct i2c_msg msgs[2]; u8 addr_buf[2]; u8 data_buf[4] = {0}; int ret = 0; if (len > sizeof(data_buf)) return -EINVAL; put_unaligned_be16(reg, addr_buf); msgs[0].addr = client->addr; msgs[0].flags = 0; msgs[0].len = sizeof(addr_buf); msgs[0].buf = addr_buf; msgs[1].addr = client->addr; msgs[1].flags = I2C_M_RD; msgs[1].len = len; msgs[1].buf = &data_buf[sizeof(data_buf) - len]; ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); if (ret != ARRAY_SIZE(msgs)) return ret < 0 ? ret : -EIO; *val = get_unaligned_be32(data_buf); return 0; } static int hm2170_write_reg(struct hm2170 *hm2170, u16 reg, u16 len, u32 val) { struct i2c_client *client = v4l2_get_subdevdata(&hm2170->sd); u8 buf[6]; int ret = 0; if (len > 4) return -EINVAL; if (reg == HM2170_REG_DELAY) { msleep(val); return 0; } put_unaligned_be16(reg, buf); put_unaligned_be32(val << 8 * (4 - len), buf + 2); ret = i2c_master_send(client, buf, len + 2); if (ret != len + 2) return ret < 0 ? ret : -EIO; return 0; } static int hm2170_write_reg_list(struct hm2170 *hm2170, const struct hm2170_reg_list *r_list) { struct i2c_client *client = v4l2_get_subdevdata(&hm2170->sd); unsigned int i; int ret = 0; for (i = 0; i < r_list->num_of_regs; i++) { ret = hm2170_write_reg(hm2170, r_list->regs[i].address, 1, r_list->regs[i].val); if (ret) { dev_err_ratelimited(&client->dev, "write reg 0x%4.4x return err = %d", r_list->regs[i].address, ret); return ret; } } return 0; } static int hm2170_test_pattern(struct hm2170 *hm2170, u32 pattern) { if (pattern) pattern = pattern << HM2170_TEST_PATTERN_BAR_SHIFT | HM2170_TEST_PATTERN_ENABLE; return hm2170_write_reg(hm2170, HM2170_REG_TEST_PATTERN, 1, pattern); } static int hm2170_set_ctrl(struct v4l2_ctrl *ctrl) { struct hm2170 *hm2170 = container_of(ctrl->handler, struct hm2170, ctrl_handler); struct i2c_client *client = v4l2_get_subdevdata(&hm2170->sd); s64 exposure_max; int ret = 0; /* Propagate change of current control to all related controls */ if (ctrl->id == V4L2_CID_VBLANK) { /* Update max exposure while meeting expected vblanking */ exposure_max = hm2170->cur_mode->height + ctrl->val - HM2170_EXPOSURE_MAX_MARGIN; __v4l2_ctrl_modify_range(hm2170->exposure, hm2170->exposure->minimum, exposure_max, hm2170->exposure->step, exposure_max); } /* V4L2 controls values will be applied only when power is already up */ if (!pm_runtime_get_if_in_use(&client->dev)) return 0; ret = hm2170_write_reg(hm2170, HM2170_REG_COMMAND_UPDATE, 1, HM2170_COMMAND_HOLD); if (ret) dev_dbg(&client->dev, "failed to hold command"); switch (ctrl->id) { case V4L2_CID_ANALOGUE_GAIN: ret = hm2170_write_reg(hm2170, HM2170_REG_ANALOG_GAIN, 1, ctrl->val); break; case V4L2_CID_DIGITAL_GAIN: ret = hm2170_write_reg(hm2170, HM2170_REG_DIGITAL_GAIN, 2, ctrl->val); break; case V4L2_CID_EXPOSURE: ret = hm2170_write_reg(hm2170, HM2170_REG_EXPOSURE, 2, ctrl->val); break; case V4L2_CID_VBLANK: ret = hm2170_write_reg(hm2170, HM2170_REG_VTS, 2, hm2170->cur_mode->height + ctrl->val); break; case V4L2_CID_TEST_PATTERN: ret = hm2170_test_pattern(hm2170, ctrl->val); break; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) case V4L2_CID_PRIVACY: dev_dbg(&client->dev, "set privacy to %d", ctrl->val); break; #endif default: ret = -EINVAL; break; } ret |= hm2170_write_reg(hm2170, HM2170_REG_COMMAND_UPDATE, 1, HM2170_COMMAND_UPDATE); pm_runtime_put(&client->dev); return ret; } static const struct v4l2_ctrl_ops hm2170_ctrl_ops = { .s_ctrl = hm2170_set_ctrl, }; static int hm2170_init_controls(struct hm2170 *hm2170) { struct v4l2_ctrl_handler *ctrl_hdlr; const struct hm2170_mode *cur_mode; s64 exposure_max, h_blank, pixel_rate; u32 vblank_min, vblank_max, vblank_default; int size; int ret = 0; ctrl_hdlr = &hm2170->ctrl_handler; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) ret = v4l2_ctrl_handler_init(ctrl_hdlr, 9); #else ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8); #endif if (ret) return ret; ctrl_hdlr->lock = &hm2170->mutex; cur_mode = hm2170->cur_mode; size = ARRAY_SIZE(link_freq_menu_items); hm2170->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, &hm2170_ctrl_ops, V4L2_CID_LINK_FREQ, size - 1, 0, link_freq_menu_items); if (hm2170->link_freq) hm2170->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY; pixel_rate = to_pixel_rate(HM2170_LINK_FREQ_384MHZ_INDEX); hm2170->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &hm2170_ctrl_ops, V4L2_CID_PIXEL_RATE, 0, pixel_rate, 1, pixel_rate); vblank_min = cur_mode->vts_min - cur_mode->height; vblank_max = HM2170_VTS_MAX - cur_mode->height; vblank_default = cur_mode->vts_def - cur_mode->height; hm2170->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &hm2170_ctrl_ops, V4L2_CID_VBLANK, vblank_min, vblank_max, 1, vblank_default); h_blank = to_pixels_per_line(cur_mode->hts, cur_mode->link_freq_index); h_blank -= cur_mode->width; hm2170->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &hm2170_ctrl_ops, V4L2_CID_HBLANK, h_blank, h_blank, 1, h_blank); if (hm2170->hblank) hm2170->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) hm2170->privacy_status = v4l2_ctrl_new_std(ctrl_hdlr, &hm2170_ctrl_ops, V4L2_CID_PRIVACY, 0, 1, 1, !(hm2170->status.status)); #endif v4l2_ctrl_new_std(ctrl_hdlr, &hm2170_ctrl_ops, V4L2_CID_ANALOGUE_GAIN, HM2170_ANAL_GAIN_MIN, HM2170_ANAL_GAIN_MAX, HM2170_ANAL_GAIN_STEP, HM2170_ANAL_GAIN_MIN); v4l2_ctrl_new_std(ctrl_hdlr, &hm2170_ctrl_ops, V4L2_CID_DIGITAL_GAIN, HM2170_DGTL_GAIN_MIN, HM2170_DGTL_GAIN_MAX, HM2170_DGTL_GAIN_STEP, HM2170_DGTL_GAIN_DEFAULT); exposure_max = cur_mode->vts_def - HM2170_EXPOSURE_MAX_MARGIN; hm2170->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &hm2170_ctrl_ops, V4L2_CID_EXPOSURE, HM2170_EXPOSURE_MIN, exposure_max, HM2170_EXPOSURE_STEP, exposure_max); v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &hm2170_ctrl_ops, V4L2_CID_TEST_PATTERN, ARRAY_SIZE(hm2170_test_pattern_menu) - 1, 0, 0, hm2170_test_pattern_menu); if (ctrl_hdlr->error) return ctrl_hdlr->error; hm2170->sd.ctrl_handler = ctrl_hdlr; return 0; } static void hm2170_update_pad_format(const struct hm2170_mode *mode, struct v4l2_mbus_framefmt *fmt) { fmt->width = mode->width; fmt->height = mode->height; fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10; fmt->field = V4L2_FIELD_NONE; } #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) static void hm2170_vsc_privacy_callback(void *handle, enum vsc_privacy_status status) { struct hm2170 *hm2170 = handle; v4l2_ctrl_s_ctrl(hm2170->privacy_status, !status); } #endif static int hm2170_start_streaming(struct hm2170 *hm2170) { struct i2c_client *client = v4l2_get_subdevdata(&hm2170->sd); const struct hm2170_reg_list *reg_list; int ret = 0; reg_list = &hm2170->cur_mode->reg_list; ret = hm2170_write_reg_list(hm2170, reg_list); if (ret) { dev_err(&client->dev, "failed to set mode"); return ret; } ret = __v4l2_ctrl_handler_setup(hm2170->sd.ctrl_handler); if (ret) return ret; ret = hm2170_write_reg(hm2170, HM2170_REG_MODE_SELECT, 1, HM2170_MODE_STREAMING); if (ret) dev_err(&client->dev, "failed to start streaming"); return ret; } static void hm2170_stop_streaming(struct hm2170 *hm2170) { struct i2c_client *client = v4l2_get_subdevdata(&hm2170->sd); if (hm2170_write_reg(hm2170, HM2170_REG_MODE_SELECT, 1, HM2170_MODE_STANDBY)) dev_err(&client->dev, "failed to stop streaming"); } static int hm2170_set_stream(struct v4l2_subdev *sd, int enable) { struct hm2170 *hm2170 = to_hm2170(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); int ret = 0; if (hm2170->streaming == enable) return 0; mutex_lock(&hm2170->mutex); if (enable) { ret = pm_runtime_get_sync(&client->dev); if (ret < 0) { pm_runtime_put_noidle(&client->dev); mutex_unlock(&hm2170->mutex); return ret; } ret = hm2170_start_streaming(hm2170); if (ret) { enable = 0; hm2170_stop_streaming(hm2170); pm_runtime_put(&client->dev); } } else { hm2170_stop_streaming(hm2170); pm_runtime_put(&client->dev); } hm2170->streaming = enable; mutex_unlock(&hm2170->mutex); return ret; } #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) static int hm2170_power_off(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct hm2170 *hm2170 = to_hm2170(sd); int ret; ret = vsc_release_camera_sensor(&hm2170->status); if (ret && ret != -EAGAIN) dev_err(dev, "Release VSC failed"); return ret; } static int hm2170_power_on(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct hm2170 *hm2170 = to_hm2170(sd); int ret; hm2170->conf.lane_num = HM2170_DATA_LANES; /* frequency unit 100k */ hm2170->conf.freq = HM2170_LINK_FREQ_384MHZ / 100000; ret = vsc_acquire_camera_sensor(&hm2170->conf, hm2170_vsc_privacy_callback, hm2170, &hm2170->status); if (ret && ret != -EAGAIN) { dev_err(dev, "Acquire VSC failed"); return ret; } __v4l2_ctrl_s_ctrl(hm2170->privacy_status, !(hm2170->status.status)); return ret; } #endif static int __maybe_unused hm2170_suspend(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct hm2170 *hm2170 = to_hm2170(sd); mutex_lock(&hm2170->mutex); if (hm2170->streaming) hm2170_stop_streaming(hm2170); mutex_unlock(&hm2170->mutex); return 0; } static int __maybe_unused hm2170_resume(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct hm2170 *hm2170 = to_hm2170(sd); int ret = 0; mutex_lock(&hm2170->mutex); if (!hm2170->streaming) goto exit; ret = hm2170_start_streaming(hm2170); if (ret) { hm2170->streaming = false; hm2170_stop_streaming(hm2170); } exit: mutex_unlock(&hm2170->mutex); return ret; } static int hm2170_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { struct hm2170 *hm2170 = to_hm2170(sd); const struct hm2170_mode *mode; s32 vblank_def, h_blank; mode = v4l2_find_nearest_size(supported_modes[hm2170->rev], ARRAY_SIZE(supported_modes[hm2170->rev]), width, height, fmt->format.width, fmt->format.height); mutex_lock(&hm2170->mutex); hm2170_update_pad_format(mode, &fmt->format); if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 8, 0) *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format; #else *v4l2_subdev_state_get_format(sd_state, fmt->pad) = fmt->format; #endif } else { hm2170->cur_mode = mode; __v4l2_ctrl_s_ctrl(hm2170->link_freq, mode->link_freq_index); __v4l2_ctrl_s_ctrl_int64(hm2170->pixel_rate, to_pixel_rate(mode->link_freq_index)); /* Update limits and set FPS to default */ vblank_def = mode->vts_def - mode->height; __v4l2_ctrl_modify_range(hm2170->vblank, mode->vts_min - mode->height, HM2170_VTS_MAX - mode->height, 1, vblank_def); __v4l2_ctrl_s_ctrl(hm2170->vblank, vblank_def); h_blank = to_pixels_per_line(mode->hts, mode->link_freq_index) - mode->width; __v4l2_ctrl_modify_range(hm2170->hblank, h_blank, h_blank, 1, h_blank); } mutex_unlock(&hm2170->mutex); return 0; } static int hm2170_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { struct hm2170 *hm2170 = to_hm2170(sd); mutex_lock(&hm2170->mutex); if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 8, 0) fmt->format = *v4l2_subdev_get_try_format(&hm2170->sd, sd_state, fmt->pad); #else fmt->format = *v4l2_subdev_state_get_format( sd_state, fmt->pad); #endif else hm2170_update_pad_format(hm2170->cur_mode, &fmt->format); mutex_unlock(&hm2170->mutex); return 0; } static int hm2170_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *code) { if (code->index > 0) return -EINVAL; code->code = MEDIA_BUS_FMT_SGRBG10_1X10; return 0; } static int hm2170_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_frame_size_enum *fse) { struct hm2170 *hm2170 = to_hm2170(sd); if (fse->index >= ARRAY_SIZE(supported_modes[hm2170->rev])) return -EINVAL; if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10) return -EINVAL; fse->min_width = supported_modes[hm2170->rev][fse->index].width; fse->max_width = fse->min_width; fse->min_height = supported_modes[hm2170->rev][fse->index].height; fse->max_height = fse->min_height; return 0; } static int hm2170_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { struct hm2170 *hm2170 = to_hm2170(sd); mutex_lock(&hm2170->mutex); hm2170_update_pad_format(&supported_modes[hm2170->rev][0], #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 8, 0) v4l2_subdev_get_try_format(sd, fh->state, 0)); #else v4l2_subdev_state_get_format(fh->state, 0)); #endif mutex_unlock(&hm2170->mutex); return 0; } static const struct v4l2_subdev_video_ops hm2170_video_ops = { .s_stream = hm2170_set_stream, }; static const struct v4l2_subdev_pad_ops hm2170_pad_ops = { .set_fmt = hm2170_set_format, .get_fmt = hm2170_get_format, .enum_mbus_code = hm2170_enum_mbus_code, .enum_frame_size = hm2170_enum_frame_size, }; static const struct v4l2_subdev_ops hm2170_subdev_ops = { .video = &hm2170_video_ops, .pad = &hm2170_pad_ops, }; static const struct media_entity_operations hm2170_subdev_entity_ops = { .link_validate = v4l2_subdev_link_validate, }; static const struct v4l2_subdev_internal_ops hm2170_internal_ops = { .open = hm2170_open, }; static int hm2170_identify_module(struct hm2170 *hm2170) { struct i2c_client *client = v4l2_get_subdevdata(&hm2170->sd); int ret; u32 val; char rev; ret = hm2170_read_reg(hm2170, HM2170_REG_CHIP_ID, 3, &val); if (ret) return ret; rev = val & 0xff; val = val >> 8; if (val != HM2170_CHIP_ID) { dev_err(&client->dev, "chip id mismatch: %x!=%x", HM2170_CHIP_ID, val); return -ENXIO; } hm2170->rev = rev < 0x4 ? HM2170_SILICON_REV_B : HM2170_SILICON_REV_D; return 0; } static int hm2170_check_hwcfg(struct device *dev) { struct v4l2_fwnode_endpoint bus_cfg = { .bus_type = V4L2_MBUS_CSI2_DPHY }; struct fwnode_handle *ep; struct fwnode_handle *fwnode = dev_fwnode(dev); unsigned int i, j; int ret; u32 ext_clk; if (!fwnode) return -ENXIO; ep = fwnode_graph_get_next_endpoint(fwnode, NULL); if (!ep) return -EPROBE_DEFER; ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency", &ext_clk); if (ret) { dev_err(dev, "can't get clock frequency"); return ret; } ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg); fwnode_handle_put(ep); if (ret) return ret; if (bus_cfg.bus.mipi_csi2.num_data_lanes != HM2170_DATA_LANES) { dev_err(dev, "number of CSI2 data lanes %d is not supported", bus_cfg.bus.mipi_csi2.num_data_lanes); ret = -EINVAL; goto out_err; } if (!bus_cfg.nr_of_link_frequencies) { dev_err(dev, "no link frequencies defined"); ret = -EINVAL; goto out_err; } for (i = 0; i < ARRAY_SIZE(link_freq_menu_items); i++) { for (j = 0; j < bus_cfg.nr_of_link_frequencies; j++) { if (link_freq_menu_items[i] == bus_cfg.link_frequencies[j]) break; } if (j == bus_cfg.nr_of_link_frequencies) { dev_err(dev, "no link frequency %lld supported", link_freq_menu_items[i]); ret = -EINVAL; goto out_err; } } out_err: v4l2_fwnode_endpoint_free(&bus_cfg); return ret; } #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) static int hm2170_remove(struct i2c_client *client) #else static void hm2170_remove(struct i2c_client *client) #endif { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct hm2170 *hm2170 = to_hm2170(sd); v4l2_async_unregister_subdev(sd); media_entity_cleanup(&sd->entity); v4l2_ctrl_handler_free(sd->ctrl_handler); pm_runtime_disable(&client->dev); mutex_destroy(&hm2170->mutex); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) return 0; #endif } static int hm2170_probe(struct i2c_client *client) { struct hm2170 *hm2170; int ret = 0; /* Check HW config */ ret = hm2170_check_hwcfg(&client->dev); if (ret) { dev_err(&client->dev, "failed to check hwcfg: %d", ret); return ret; } hm2170 = devm_kzalloc(&client->dev, sizeof(*hm2170), GFP_KERNEL); if (!hm2170) { ret = -ENOMEM; goto probe_error_ret; } v4l2_i2c_subdev_init(&hm2170->sd, client, &hm2170_subdev_ops); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) hm2170->conf.lane_num = HM2170_DATA_LANES; /* frequency unit 100k */ hm2170->conf.freq = HM2170_LINK_FREQ_384MHZ / 100000; ret = vsc_acquire_camera_sensor(&hm2170->conf, hm2170_vsc_privacy_callback, hm2170, &hm2170->status); if (ret == -EAGAIN) { return -EPROBE_DEFER; } else if (ret) { dev_err(&client->dev, "Acquire VSC failed"); return ret; } #endif ret = hm2170_identify_module(hm2170); if (ret) { dev_err(&client->dev, "failed to find sensor: %d", ret); goto probe_error_ret; } mutex_init(&hm2170->mutex); hm2170->cur_mode = &supported_modes[hm2170->rev][0]; ret = hm2170_init_controls(hm2170); if (ret) { dev_err(&client->dev, "failed to init controls: %d", ret); goto probe_error_v4l2_ctrl_handler_free; } hm2170->sd.internal_ops = &hm2170_internal_ops; hm2170->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; hm2170->sd.entity.ops = &hm2170_subdev_entity_ops; hm2170->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; hm2170->pad.flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_pads_init(&hm2170->sd.entity, 1, &hm2170->pad); if (ret) { dev_err(&client->dev, "failed to init entity pads: %d", ret); goto probe_error_v4l2_ctrl_handler_free; } ret = v4l2_async_register_subdev_sensor(&hm2170->sd); if (ret < 0) { dev_err(&client->dev, "failed to register V4L2 subdev: %d", ret); goto probe_error_media_entity_cleanup; } /* * Device is already turned on by i2c-core with ACPI domain PM. * Enable runtime PM and turn off the device. */ pm_runtime_set_active(&client->dev); pm_runtime_enable(&client->dev); pm_runtime_idle(&client->dev); return 0; probe_error_media_entity_cleanup: media_entity_cleanup(&hm2170->sd.entity); probe_error_v4l2_ctrl_handler_free: v4l2_ctrl_handler_free(hm2170->sd.ctrl_handler); mutex_destroy(&hm2170->mutex); probe_error_ret: #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) hm2170_power_off(&client->dev); #endif return ret; } static const struct dev_pm_ops hm2170_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(hm2170_suspend, hm2170_resume) #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) SET_RUNTIME_PM_OPS(hm2170_power_off, hm2170_power_on, NULL) #endif }; static const struct acpi_device_id hm2170_acpi_ids[] = { {"HIMX2170"}, {} }; MODULE_DEVICE_TABLE(acpi, hm2170_acpi_ids); static struct i2c_driver hm2170_i2c_driver = { .driver = { .name = "hm2170", .pm = &hm2170_pm_ops, .acpi_match_table = hm2170_acpi_ids, }, #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) .probe_new = hm2170_probe, #else .probe = hm2170_probe, #endif .remove = hm2170_remove, }; module_i2c_driver(hm2170_i2c_driver); MODULE_AUTHOR("Shawn Tu "); MODULE_DESCRIPTION("Himax HM2170 sensor driver"); MODULE_LICENSE("GPL v2"); ipu6-drivers-0~git202411190607.0ad49882/drivers/media/i2c/hm2172.c000066400000000000000000001171131471702545500232130ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2022 Intel Corporation. #include #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 12, 0) #include #else #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) #include static const struct acpi_device_id cvfd_ids[] = { { "INTC1059", 0 }, { "INTC1095", 0 }, { "INTC100A", 0 }, { "INTC10CF", 0 }, {} }; #endif #define HM2172_LINK_FREQ_384MHZ 384000000ULL #define HM2172_SCLK 76000000LL #define HM2172_MCLK 19200000 #define HM2172_DATA_LANES 2 #define HM2172_RGB_DEPTH 10 #define HM2172_REG_DELAY 0xffff #define HM2172_REG_CHIP_ID 0x0000 #define HM2172_CHIP_ID 0x2172 #define HM2172_REG_MODE_SELECT 0x0100 #define HM2172_MODE_STANDBY 0x00 #define HM2172_MODE_STREAMING 0x01 /* vertical-timings from sensor */ #define HM2172_REG_VTS 0x4809 #define HM2172_VTS_DEF 0x0484 #define HM2172_VTS_MIN 0x0484 #define HM2172_VTS_MAX 0x7fff /* horizontal-timings from sensor */ #define HM2172_REG_HTS 0x480B /* Exposure controls from sensor */ #define HM2172_REG_EXPOSURE 0x0202 #define HM2172_EXPOSURE_MIN 2 #define HM2172_EXPOSURE_MAX_MARGIN 2 #define HM2172_EXPOSURE_STEP 1 /* Analog gain controls from sensor */ #define HM2172_REG_ANALOG_GAIN 0x0208 #define HM2172_ANAL_GAIN_MIN 0 #define HM2172_ANAL_GAIN_MAX 80 #define HM2172_ANAL_GAIN_STEP 1 /* Digital gain controls from sensor */ #define HM2172_REG_DIGITAL_GAIN 0x020A #define HM2172_DGTL_GAIN_MIN 256 #define HM2172_DGTL_GAIN_MAX 1020 #define HM2172_DGTL_GAIN_STEP 1 #define HM2172_DGTL_GAIN_DEFAULT 256 /* Register update control */ #define HM2172_REG_COMMAND_UPDATE 0x0104 #define HM2172_COMMAND_UPDATE 0x00 #define HM2172_COMMAND_HOLD 0x01 /* Test Pattern Control */ #define HM2172_REG_TEST_PATTERN 0x0601 #define HM2172_TEST_PATTERN_ENABLE BIT(0) #define HM2172_TEST_PATTERN_BAR_SHIFT 1 enum { HM2172_LINK_FREQ_384MHZ_INDEX, }; struct hm2172_reg { u16 address; u8 val; }; struct hm2172_reg_list { u32 num_of_regs; const struct hm2172_reg *regs; }; struct hm2172_link_freq_config { const struct hm2172_reg_list reg_list; }; struct hm2172_mode { /* Frame width in pixels */ u32 width; /* Frame height in pixels */ u32 height; /* Horizontal timining size */ u32 hts; /* Default vertical timining size */ u32 vts_def; /* Min vertical timining size */ u32 vts_min; /* Link frequency needed for this resolution */ u32 link_freq_index; /* Sensor register settings for this resolution */ const struct hm2172_reg_list reg_list; }; static const struct hm2172_reg mode_1928x1088_30fps_2lane[] = { {0x0103, 0x00}, {0xffff, 0x0A}, {0x0203, 0xEE}, {0x0300, 0x6A}, {0x0202, 0x08}, {0x0301, 0x19}, {0x0302, 0x08}, {0x0303, 0x04}, {0x030F, 0x04}, {0x0350, 0x61}, {0x1000, 0xC3}, {0x1001, 0xC0}, {0x2000, 0x00}, {0x2088, 0x01}, {0x2089, 0x00}, {0x208A, 0xC8}, {0x2700, 0x00}, {0x2711, 0x01}, {0x2713, 0x04}, {0x272F, 0x01}, {0x2800, 0x01}, {0x281B, 0x02}, {0x2821, 0x8E}, {0x2823, 0x01}, {0x282E, 0x01}, {0x282F, 0xC0}, {0x2839, 0x59}, {0x283A, 0x08}, {0x283B, 0x09}, {0x2842, 0x0C}, {0x2846, 0x01}, {0x2847, 0x94}, {0x2848, 0x31}, {0x3001, 0x00}, {0x3002, 0x88}, {0x3004, 0x02}, {0x3024, 0x20}, {0x3025, 0x12}, {0x3026, 0x00}, {0x3027, 0x81}, {0x3028, 0x01}, {0x3029, 0x00}, {0x302A, 0x30}, {0x3030, 0x68}, {0x3042, 0x00}, {0x3070, 0x01}, {0x307B, 0x08}, {0x30C4, 0x20}, {0x30D0, 0x02}, {0x30D1, 0x00}, {0x30D2, 0x1E}, {0x30D3, 0x31}, {0x30D4, 0x14}, {0x30D7, 0x22}, {0x30D8, 0x00}, {0x30D9, 0x1E}, {0x30DA, 0x31}, {0x30DB, 0x14}, {0x30DE, 0x04}, {0x30DF, 0x00}, {0x30E0, 0x1E}, {0x30E1, 0x31}, {0x30E2, 0x14}, {0x30E5, 0x24}, {0x30E6, 0x00}, {0x30E7, 0x1E}, {0x30E8, 0x31}, {0x30E9, 0x14}, {0x30EC, 0x2C}, {0x30ED, 0x00}, {0x30EE, 0x1E}, {0x30EF, 0x31}, {0x30F0, 0x14}, {0x30F3, 0x2C}, {0x30F4, 0x00}, {0x30F5, 0x1E}, {0x30F6, 0x31}, {0x30F7, 0x14}, {0x30F8, 0x01}, {0x3101, 0x02}, {0x3102, 0x00}, {0x3103, 0x1E}, {0x3104, 0x31}, {0x3105, 0x14}, {0x3108, 0x22}, {0x3109, 0x00}, {0x310A, 0x1E}, {0x310B, 0x31}, {0x310C, 0x14}, {0x310F, 0x04}, {0x3110, 0x00}, {0x3111, 0x1E}, {0x3112, 0x31}, {0x3113, 0x14}, {0x3116, 0x24}, {0x3117, 0x00}, {0x3118, 0x1E}, {0x3119, 0x31}, {0x311A, 0x14}, {0x311D, 0x2C}, {0x311E, 0x00}, {0x311F, 0x1E}, {0x3120, 0x31}, {0x3121, 0x14}, {0x3124, 0x2C}, {0x3125, 0x00}, {0x3126, 0x1E}, {0x3127, 0x31}, {0x3128, 0x14}, {0x3129, 0x01}, {0x3135, 0x01}, {0x3137, 0x03}, {0x3138, 0x10}, {0x3139, 0x25}, {0x313C, 0x6A}, {0x313D, 0x3A}, {0x313E, 0xE8}, {0x3140, 0x0C}, {0x3144, 0x3E}, {0x3145, 0xF4}, {0x3146, 0x32}, {0x3147, 0x11}, {0x3148, 0x10}, {0x3149, 0xA7}, {0x314A, 0x80}, {0x314B, 0x33}, {0x314C, 0x1C}, {0x314D, 0x34}, {0x314E, 0x13}, {0x314F, 0x00}, {0x3151, 0x08}, {0x3152, 0x00}, {0x3158, 0x00}, {0x315B, 0x80}, {0x3161, 0x11}, {0x3166, 0x4F}, {0x316B, 0x0A}, {0x3171, 0x05}, {0x3172, 0x1F}, {0x3179, 0x0F}, {0x317A, 0x21}, {0x317B, 0x84}, {0x317C, 0x1C}, {0x317D, 0x00}, {0x317E, 0x04}, {0x3182, 0x88}, {0x3183, 0x18}, {0x3184, 0x40}, {0x318E, 0x88}, {0x318F, 0x00}, {0x3190, 0x00}, {0x3191, 0x84}, {0x4003, 0x02}, {0x4004, 0x02}, {0x4800, 0x26}, {0x4801, 0x10}, {0x4802, 0x00}, {0x4803, 0x00}, {0x4804, 0x3F}, {0x4805, 0x7F}, {0x4806, 0x3F}, {0x4807, 0x1F}, {0x4809, 0x08}, {0x480A, 0xF0}, {0x480B, 0x04}, {0x480C, 0x54}, {0x480D, 0x00}, {0x480E, 0x00}, {0x480F, 0x04}, {0x4810, 0x3F}, {0x4811, 0x00}, {0x4812, 0x00}, {0x4813, 0x00}, {0x4814, 0x00}, {0x4815, 0x00}, {0x4816, 0x00}, {0x4817, 0x00}, {0x4818, 0x00}, {0x4819, 0x02}, {0x481E, 0x10}, {0x481F, 0x00}, {0x4820, 0x0E}, {0x4821, 0x0E}, {0x4822, 0xA7}, {0x4823, 0x33}, {0x4824, 0x1C}, {0x4840, 0x00}, {0x4844, 0x00}, {0x4845, 0x00}, {0x4846, 0x00}, {0x4847, 0x00}, {0x4848, 0x00}, {0x4849, 0xF1}, {0x484A, 0x00}, {0x484B, 0x88}, {0x484C, 0x01}, {0x484D, 0x04}, {0x484E, 0x64}, {0x484F, 0x50}, {0x4850, 0x04}, {0x4851, 0x00}, {0x4852, 0x01}, {0x4853, 0x19}, {0x4854, 0x50}, {0x4855, 0x04}, {0x4856, 0x00}, {0x4863, 0x02}, {0x4864, 0x3C}, {0x4865, 0x02}, {0x4866, 0xAE}, {0x4880, 0x00}, {0x48A0, 0x00}, {0x48A1, 0x04}, {0x48A2, 0x01}, {0x48A3, 0xDD}, {0x48A4, 0x0C}, {0x48A5, 0x3B}, {0x48A6, 0x20}, {0x48A7, 0x20}, {0x48A8, 0x20}, {0x48A9, 0x20}, {0x48AA, 0x00}, {0x48C0, 0x3F}, {0x48C1, 0x29}, {0x48C3, 0x14}, {0x48C4, 0x00}, {0x48C5, 0x07}, {0x48C6, 0x88}, {0x48C7, 0x04}, {0x48C8, 0x40}, {0x48C9, 0x00}, {0x48CA, 0x00}, {0x48CB, 0x00}, {0x48CC, 0x1F}, {0x48F0, 0x00}, {0x48F1, 0x00}, {0x48F2, 0x04}, {0x48F3, 0x01}, {0x48F4, 0xE0}, {0x48F5, 0x01}, {0x48F6, 0x10}, {0x48F7, 0x00}, {0x48F8, 0x00}, {0x48F9, 0x00}, {0x48FA, 0x00}, {0x48FB, 0x01}, {0x4931, 0x2B}, {0x4932, 0x01}, {0x4933, 0x01}, {0x4934, 0x04}, {0x4935, 0xBA}, {0x495E, 0x10}, {0x4962, 0xA7}, {0x4963, 0x33}, {0x4964, 0x1C}, {0x4980, 0x00}, {0x4A72, 0x01}, {0x4A73, 0x01}, {0x4C1E, 0x10}, {0x4C22, 0xA7}, {0x4C23, 0x33}, {0x4C24, 0x1C}, {0x4C30, 0x00}, {0x4CF2, 0x01}, {0x4CF3, 0x01}, {0x0104, 0x01}, }; static const struct hm2172_reg mode_1928x1088_60fps_2lane[] = { {0x0103, 0x00}, {0xffff, 0x10}, {0x0202, 0x03}, {0x0203, 0x60}, {0x0300, 0x5E},//ver:0513 5E {0x0301, 0x3F}, {0x0302, 0x07},//ver:0513 07 {0x0303, 0x04}, {0x1000, 0xC3}, {0x1001, 0xC0}, {0x2000, 0x00}, {0x2088, 0x01}, {0x2089, 0x00}, {0x208A, 0xC8}, {0x2700, 0x00}, {0x2711, 0x01}, {0x2713, 0x04}, {0x272F, 0x01}, {0x2800, 0x01}, {0x2821, 0x8E}, {0x2823, 0x01}, {0x282E, 0x01}, {0x282F, 0xC0}, {0x2839, 0x13}, {0x283A, 0x01}, {0x283B, 0x0F}, {0x2842, 0x0C}, {0x2846, 0x01}, {0x2847, 0x94}, {0x3001, 0x00}, {0x3002, 0x88}, {0x3004, 0x02}, {0x3024, 0x20}, {0x3025, 0x12}, {0x3026, 0x00}, {0x3027, 0x81}, {0x3028, 0x01}, {0x3029, 0x00}, {0x302A, 0x30}, {0x3042, 0x00}, {0x3070, 0x01}, {0x30C4, 0x20}, {0x30D0, 0x01}, {0x30D2, 0x8E}, {0x30D7, 0x02}, {0x30D9, 0x9E}, {0x30DE, 0x03}, {0x30E0, 0x9E}, {0x30E5, 0x04}, {0x30E7, 0x9F}, {0x30EC, 0x24}, {0x30EE, 0x9F}, {0x30F3, 0x44}, {0x30F5, 0x9F}, {0x30F8, 0x00}, {0x3101, 0x02}, {0x3103, 0x9E}, {0x3108, 0x03}, {0x310A, 0x9E}, {0x310F, 0x04}, {0x3111, 0x9E}, {0x3116, 0x24}, {0x3118, 0x9F}, {0x311D, 0x44}, {0x311F, 0x9F}, {0x3124, 0x64}, {0x3126, 0x9F}, {0x3135, 0x01}, {0x3137, 0x03}, {0x313C, 0x52}, {0x313E, 0x68}, {0x3144, 0x3E}, {0x3145, 0x68}, {0x3146, 0x08}, {0x3147, 0x03}, {0x3148, 0x0F}, {0x3149, 0xFF}, {0x314A, 0x13}, {0x314B, 0x0F}, {0x314C, 0xF8}, {0x314D, 0x04}, {0x314E, 0x10}, {0x3161, 0x11}, {0x3171, 0x05}, {0x317A, 0x21}, {0x317B, 0xF0}, {0x317C, 0x07}, {0x317D, 0x09}, {0x3183, 0x18}, {0x3184, 0x4A}, {0x318E, 0x88}, {0x318F, 0x00}, {0x3190, 0x00}, {0x4003, 0x02}, {0x4004, 0x02}, {0x4800, 0x26}, {0x4801, 0x10}, {0x4802, 0x00}, {0x4803, 0x00}, {0x4804, 0x7F}, {0x4805, 0x7F}, {0x4806, 0x3F}, {0x4807, 0x1F}, {0x4809, 0x04}, {0x480A, 0x7A}, {0x480B, 0x04}, {0x480C, 0x50}, {0x480D, 0x00}, {0x480E, 0x01},//00 {0x480F, 0x04}, {0x4810, 0x40},//3F {0x4811, 0x00}, {0x4812, 0x00}, {0x4813, 0x00}, {0x4814, 0x00}, {0x4815, 0x00}, {0x4816, 0x00}, {0x4817, 0x00}, {0x4818, 0x00}, {0x4819, 0x03}, {0x481F, 0x00}, {0x4820, 0x0E}, {0x4821, 0x0E}, {0x4840, 0x00}, {0x4844, 0x00}, {0x4845, 0x00}, {0x4846, 0x00}, {0x4847, 0x00}, {0x4848, 0x00}, {0x4849, 0xF1}, {0x484A, 0x00}, {0x484B, 0x88}, {0x484C, 0x01}, {0x484D, 0x04}, {0x484E, 0x64}, {0x484F, 0x50}, {0x4850, 0x04}, {0x4851, 0x00}, {0x4852, 0x01}, {0x4853, 0x19}, {0x4854, 0x50}, {0x4855, 0x04}, {0x4856, 0x00}, {0x4863, 0x02}, {0x4864, 0x3D}, {0x4865, 0x02}, {0x4866, 0xB0}, {0x4880, 0x00}, {0x48A0, 0x00}, {0x48A1, 0x04}, {0x48A2, 0x01}, {0x48A3, 0xDD}, {0x48A4, 0x0C}, {0x48A5, 0x3B}, {0x48A6, 0x20}, {0x48A7, 0x20}, {0x48A8, 0x20}, {0x48A9, 0x20}, {0x48AA, 0x00}, {0x48C0, 0x3F}, {0x48C1, 0x29}, {0x48C3, 0x14}, {0x48C4, 0x00}, {0x48C5, 0x07}, {0x48C6, 0x88}, {0x48C7, 0x04}, {0x48C8, 0x40}, {0x48C9, 0x00}, {0x48CA, 0x00}, {0x48CB, 0x00}, {0x48CC, 0x00}, {0x48F0, 0x00}, {0x48F1, 0x00}, {0x48F2, 0x04}, {0x48F3, 0x01}, {0x48F4, 0xE0}, {0x48F5, 0x01}, {0x48F6, 0x10}, {0x48F7, 0x00}, {0x48F8, 0x00}, {0x48F9, 0x00}, {0x48FA, 0x00}, {0x48FB, 0x01}, {0x4931, 0x2B}, {0x4932, 0x01}, {0x4933, 0x01}, {0x4934, 0x00}, {0x4935, 0x0F}, {0x4980, 0x00}, {0x4A72, 0x01}, {0x4A73, 0x01}, {0x4C30, 0x00}, {0x4CF2, 0x01}, {0x4CF3, 0x01}, {0x0104, 0x00} }; static const struct hm2172_reg mode_1928x1088_HDR_60fps[] = { {0x0103, 0x00}, {0xffff, 0x10}, {0x0202, 0x08}, {0x0203, 0xEE}, {0x0300, 0x6A}, {0x0301, 0x19}, {0x0302, 0x08}, {0x0303, 0x04}, {0x030F, 0x04}, {0x0350, 0x61}, {0x1000, 0xC3}, {0x1001, 0xC0}, {0x2000, 0x00}, {0x2088, 0x01}, {0x2089, 0x00}, {0x208A, 0xC8}, {0x2700, 0x00}, {0x2711, 0x01}, {0x2713, 0x04}, {0x272F, 0x01}, {0x2800, 0x01}, {0x281B, 0x02}, {0x2821, 0x8E}, {0x2823, 0x01}, {0x282E, 0x01}, {0x282F, 0xC0}, {0x2839, 0x59}, {0x283A, 0x08}, {0x283B, 0x09}, {0x2842, 0x0C}, {0x2846, 0x01}, {0x2847, 0x94}, {0x2848, 0x31},//LS_OFFSET 1/4 of FLINE:0x121, 07/25; orig:0x0088 {0x3001, 0x00}, {0x3002, 0x88}, {0x3004, 0x02}, {0x3024, 0x20}, {0x3025, 0x12}, {0x3026, 0x00}, {0x3027, 0x81}, {0x3028, 0x01}, {0x3029, 0x00}, {0x302A, 0x30}, {0x3030, 0x68}, {0x3042, 0x00}, {0x3070, 0x01}, {0x307B, 0x08}, {0x30C4, 0x20}, {0x30D0, 0x02}, {0x30D1, 0x00}, {0x30D2, 0x1E}, {0x30D3, 0x31}, {0x30D4, 0x14}, {0x30D7, 0x22}, {0x30D8, 0x00}, {0x30D9, 0x1E}, {0x30DA, 0x31}, {0x30DB, 0x14}, {0x30DE, 0x04}, {0x30DF, 0x00}, {0x30E0, 0x1E}, {0x30E1, 0x31}, {0x30E2, 0x14}, {0x30E5, 0x24}, {0x30E6, 0x00}, {0x30E7, 0x1E}, {0x30E8, 0x31}, {0x30E9, 0x14}, {0x30EC, 0x2C}, {0x30ED, 0x00}, {0x30EE, 0x1E}, {0x30EF, 0x31}, {0x30F0, 0x14}, {0x30F3, 0x2C}, {0x30F4, 0x00}, {0x30F5, 0x1E}, {0x30F6, 0x31}, {0x30F7, 0x14}, {0x30F8, 0x01}, {0x3101, 0x02}, {0x3102, 0x00}, {0x3103, 0x1E}, {0x3104, 0x31}, {0x3105, 0x14}, {0x3108, 0x22}, {0x3109, 0x00}, {0x310A, 0x1E}, {0x310B, 0x31}, {0x310C, 0x14}, {0x310F, 0x04}, {0x3110, 0x00}, {0x3111, 0x1E}, {0x3112, 0x31}, {0x3113, 0x14}, {0x3116, 0x24}, {0x3117, 0x00}, {0x3118, 0x1E}, {0x3119, 0x31}, {0x311A, 0x14}, {0x311D, 0x2C}, {0x311E, 0x00}, {0x311F, 0x1E}, {0x3120, 0x31}, {0x3121, 0x14}, {0x3124, 0x2C}, {0x3125, 0x00}, {0x3126, 0x1E}, {0x3127, 0x31}, {0x3128, 0x14}, {0x3129, 0x01}, {0x3135, 0x01}, {0x3137, 0x03}, {0x3138, 0x10}, {0x3139, 0x25}, {0x313C, 0x6A}, {0x313D, 0x3A}, {0x313E, 0xE8}, {0x3140, 0x0C}, {0x3144, 0x3E}, {0x3145, 0xF4}, {0x3146, 0x32}, {0x3147, 0x11}, {0x3148, 0x10}, {0x3149, 0xA7}, {0x314A, 0x80}, {0x314B, 0x33}, {0x314C, 0x1C}, {0x314D, 0x34}, {0x314E, 0x13}, {0x314F, 0x00}, {0x3151, 0x08}, {0x3152, 0x00}, {0x3158, 0x00}, {0x315B, 0x80}, {0x3161, 0x11}, {0x3166, 0x4F}, {0x316B, 0x0A}, {0x3171, 0x05}, {0x3172, 0x1F}, {0x3179, 0x0F}, {0x317A, 0x21}, {0x317B, 0x84}, {0x317C, 0x1C}, {0x317D, 0x00}, {0x317E, 0x04}, {0x3182, 0x88}, {0x3183, 0x18}, {0x3184, 0x40}, {0x318E, 0x88}, {0x318F, 0x00}, {0x3190, 0x00}, {0x3191, 0x84}, {0x4003, 0x02}, {0x4004, 0x02}, {0x4800, 0x26}, {0x4801, 0x10}, {0x4802, 0x00}, {0x4803, 0x00}, {0x4804, 0x3F}, {0x4805, 0x7F}, {0x4806, 0x3F}, {0x4807, 0x1F}, {0x4809, 0x08}, {0x480A, 0xF0}, {0x480B, 0x04}, {0x480C, 0x54}, {0x480D, 0x00}, {0x480E, 0x00}, {0x480F, 0x04}, {0x4810, 0x3F}, {0x4811, 0x00}, {0x4812, 0x00}, {0x4813, 0x00}, {0x4814, 0x00}, {0x4815, 0x00}, {0x4816, 0x00}, {0x4817, 0x00}, {0x4818, 0x00}, {0x4819, 0x02}, {0x481E, 0x10}, {0x481F, 0x00}, {0x4820, 0x0E}, {0x4821, 0x0E}, {0x4822, 0xA7}, {0x4823, 0x33}, {0x4824, 0x1C}, {0x4840, 0x00}, {0x4844, 0x00}, {0x4845, 0x00}, {0x4846, 0x00}, {0x4847, 0x00}, {0x4848, 0x00}, {0x4849, 0xF1}, {0x484A, 0x00}, {0x484B, 0x88}, {0x484C, 0x01}, {0x484D, 0x04}, {0x484E, 0x64}, {0x484F, 0x50}, {0x4850, 0x04}, {0x4851, 0x00}, {0x4852, 0x01}, {0x4853, 0x19}, {0x4854, 0x50}, {0x4855, 0x04}, {0x4856, 0x00}, {0x4863, 0x02}, {0x4864, 0x3C}, {0x4865, 0x02}, {0x4866, 0xAE}, {0x4880, 0x00}, {0x48A0, 0x00}, {0x48A1, 0x04}, {0x48A2, 0x01}, {0x48A3, 0xDD}, {0x48A4, 0x0C}, {0x48A5, 0x3B}, {0x48A6, 0x20}, {0x48A7, 0x20}, {0x48A8, 0x20}, {0x48A9, 0x20}, {0x48AA, 0x00}, {0x48C0, 0x3F}, {0x48C1, 0x29}, {0x48C3, 0x14}, {0x48C4, 0x00}, {0x48C5, 0x07}, {0x48C6, 0x88}, {0x48C7, 0x04}, {0x48C8, 0x40}, {0x48C9, 0x00}, {0x48CA, 0x00}, {0x48CB, 0x00}, {0x48CC, 0x1F}, {0x48F0, 0x00}, {0x48F1, 0x00}, {0x48F2, 0x04}, {0x48F3, 0x01}, {0x48F4, 0xE0}, {0x48F5, 0x01}, {0x48F6, 0x10}, {0x48F7, 0x00}, {0x48F8, 0x00}, {0x48F9, 0x00}, {0x48FA, 0x00}, {0x48FB, 0x01}, {0x4931, 0x2B}, {0x4932, 0x01}, {0x4933, 0x01}, {0x4934, 0x04}, {0x4935, 0xBA}, {0x495E, 0x10}, {0x4962, 0xA7}, {0x4963, 0x33}, {0x4964, 0x1C}, {0x4980, 0x00}, {0x4A72, 0x01}, {0x4A73, 0x01}, {0x4C1E, 0x10}, {0x4C22, 0xA7}, {0x4C23, 0x33}, {0x4C24, 0x1C}, {0x4C30, 0x00}, {0x4CF2, 0x01}, {0x4CF3, 0x01}, {0x0104, 0x00}, }; static const char * const hm2172_test_pattern_menu[] = { "Disabled", "Solid Color", "Color Bar", "Color Bar With Blending", "PN11", }; static const s64 link_freq_menu_items[] = { HM2172_LINK_FREQ_384MHZ, }; static const struct hm2172_mode supported_modes[] = { { .width = 1928, .height = 1088, .hts = 2192, .vts_def = HM2172_VTS_DEF, .vts_min = HM2172_VTS_MIN, .reg_list = { .num_of_regs = ARRAY_SIZE(mode_1928x1088_30fps_2lane), .regs = mode_1928x1088_30fps_2lane, }, .link_freq_index = HM2172_LINK_FREQ_384MHZ_INDEX, }, { .width = 1928, .height = 1088, .hts = 2192, .vts_def = HM2172_VTS_DEF, .vts_min = HM2172_VTS_MIN, .reg_list = { .num_of_regs = ARRAY_SIZE(mode_1928x1088_60fps_2lane), .regs = mode_1928x1088_60fps_2lane, }, .link_freq_index = HM2172_LINK_FREQ_384MHZ_INDEX, }, { .width = 1928, .height = 1088, .hts = 2192, .vts_def = HM2172_VTS_DEF, .vts_min = HM2172_VTS_MIN, .reg_list = { .num_of_regs = ARRAY_SIZE(mode_1928x1088_HDR_60fps), .regs = mode_1928x1088_HDR_60fps, }, .link_freq_index = HM2172_LINK_FREQ_384MHZ_INDEX, }, }; struct hm2172 { struct v4l2_subdev sd; struct media_pad pad; struct v4l2_ctrl_handler ctrl_handler; /* V4L2 Controls */ struct v4l2_ctrl *link_freq; struct v4l2_ctrl *pixel_rate; struct v4l2_ctrl *vblank; struct v4l2_ctrl *hblank; struct v4l2_ctrl *exposure; struct clk *img_clk; struct regulator *avdd; struct gpio_desc *reset; struct gpio_desc *handshake; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) struct vsc_mipi_config conf; struct vsc_camera_status status; struct v4l2_ctrl *privacy_status; #endif /* Current mode */ const struct hm2172_mode *cur_mode; /* To serialize asynchronus callbacks */ struct mutex mutex; /* Streaming on/off */ bool streaming; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) bool use_intel_vsc; #endif }; static inline struct hm2172 *to_hm2172(struct v4l2_subdev *subdev) { return container_of(subdev, struct hm2172, sd); } static u64 to_pixel_rate(u32 f_index) { u64 pixel_rate = link_freq_menu_items[f_index] * 2 * HM2172_DATA_LANES; do_div(pixel_rate, HM2172_RGB_DEPTH); return pixel_rate; } static u64 to_pixels_per_line(u32 hts, u32 f_index) { u64 ppl = hts * to_pixel_rate(f_index); do_div(ppl, HM2172_SCLK); return ppl; } static int hm2172_read_reg(struct hm2172 *hm2172, u16 reg, u16 len, u32 *val) { struct i2c_client *client = v4l2_get_subdevdata(&hm2172->sd); struct i2c_msg msgs[2]; u8 addr_buf[2]; u8 data_buf[4] = {0}; int ret = 0; if (len > sizeof(data_buf)) return -EINVAL; put_unaligned_be16(reg, addr_buf); msgs[0].addr = client->addr; msgs[0].flags = 0; msgs[0].len = sizeof(addr_buf); msgs[0].buf = addr_buf; msgs[1].addr = client->addr; msgs[1].flags = I2C_M_RD; msgs[1].len = len; msgs[1].buf = &data_buf[sizeof(data_buf) - len]; ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); if (ret != ARRAY_SIZE(msgs)) return ret < 0 ? ret : -EIO; *val = get_unaligned_be32(data_buf); return 0; } static int hm2172_write_reg(struct hm2172 *hm2172, u16 reg, u16 len, u32 val) { struct i2c_client *client = v4l2_get_subdevdata(&hm2172->sd); u8 buf[6]; int ret = 0; if (len > 4) return -EINVAL; if (reg == HM2172_REG_DELAY) { msleep(val); return 0; } dev_dbg(&client->dev, "%s, reg %x len %x, val %x\n", __func__, reg, len, val); put_unaligned_be16(reg, buf); put_unaligned_be32(val << 8 * (4 - len), buf + 2); ret = i2c_master_send(client, buf, len + 2); if (ret != len + 2) { dev_err(&client->dev, "failed to write reg %d val %d", reg, val); return ret < 0 ? ret : -EIO; } return 0; } static int hm2172_write_reg_list(struct hm2172 *hm2172, const struct hm2172_reg_list *r_list) { struct i2c_client *client = v4l2_get_subdevdata(&hm2172->sd); unsigned int i; int ret = 0; for (i = 0; i < r_list->num_of_regs; i++) { ret = hm2172_write_reg(hm2172, r_list->regs[i].address, 1, r_list->regs[i].val); if (ret) { dev_err_ratelimited(&client->dev, "write reg 0x%4.4x return err = %d", r_list->regs[i].address, ret); return ret; } } return 0; } static int hm2172_test_pattern(struct hm2172 *hm2172, u32 pattern) { if (pattern) pattern = pattern << HM2172_TEST_PATTERN_BAR_SHIFT | HM2172_TEST_PATTERN_ENABLE; return hm2172_write_reg(hm2172, HM2172_REG_TEST_PATTERN, 1, pattern); } static int hm2172_set_ctrl(struct v4l2_ctrl *ctrl) { struct hm2172 *hm2172 = container_of(ctrl->handler, struct hm2172, ctrl_handler); struct i2c_client *client = v4l2_get_subdevdata(&hm2172->sd); s64 exposure_max; int ret = 0; /* Propagate change of current control to all related controls */ if (ctrl->id == V4L2_CID_VBLANK) { /* Update max exposure while meeting expected vblanking */ exposure_max = hm2172->cur_mode->height + ctrl->val - HM2172_EXPOSURE_MAX_MARGIN; __v4l2_ctrl_modify_range(hm2172->exposure, hm2172->exposure->minimum, exposure_max, hm2172->exposure->step, exposure_max); } /* V4L2 controls values will be applied only when power is already up */ if (!pm_runtime_get_if_in_use(&client->dev)) return 0; ret = hm2172_write_reg(hm2172, HM2172_REG_COMMAND_UPDATE, 1, HM2172_COMMAND_HOLD); if (ret) dev_dbg(&client->dev, "failed to hold command"); switch (ctrl->id) { case V4L2_CID_ANALOGUE_GAIN: ret = hm2172_write_reg(hm2172, HM2172_REG_ANALOG_GAIN, 1, ctrl->val); break; case V4L2_CID_DIGITAL_GAIN: ret = hm2172_write_reg(hm2172, HM2172_REG_DIGITAL_GAIN, 2, ctrl->val); break; case V4L2_CID_EXPOSURE: ret = hm2172_write_reg(hm2172, HM2172_REG_EXPOSURE, 2, ctrl->val); break; case V4L2_CID_VBLANK: ret = hm2172_write_reg(hm2172, HM2172_REG_VTS, 2, hm2172->cur_mode->height + ctrl->val); break; case V4L2_CID_TEST_PATTERN: ret = hm2172_test_pattern(hm2172, ctrl->val); break; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) case V4L2_CID_PRIVACY: dev_dbg(&client->dev, "set privacy to %d", ctrl->val); break; #endif default: ret = -EINVAL; break; } ret |= hm2172_write_reg(hm2172, HM2172_REG_COMMAND_UPDATE, 1, HM2172_COMMAND_UPDATE); pm_runtime_put(&client->dev); return ret; } static const struct v4l2_ctrl_ops hm2172_ctrl_ops = { .s_ctrl = hm2172_set_ctrl, }; static int hm2172_init_controls(struct hm2172 *hm2172) { struct v4l2_ctrl_handler *ctrl_hdlr; const struct hm2172_mode *cur_mode; s64 exposure_max, h_blank, pixel_rate; u32 vblank_min, vblank_max, vblank_default; int size; int ret = 0; ctrl_hdlr = &hm2172->ctrl_handler; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) ret = v4l2_ctrl_handler_init(ctrl_hdlr, 9); #else ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8); #endif if (ret) return ret; ctrl_hdlr->lock = &hm2172->mutex; cur_mode = hm2172->cur_mode; size = ARRAY_SIZE(link_freq_menu_items); hm2172->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, &hm2172_ctrl_ops, V4L2_CID_LINK_FREQ, size - 1, 0, link_freq_menu_items); if (hm2172->link_freq) hm2172->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY; pixel_rate = to_pixel_rate(HM2172_LINK_FREQ_384MHZ_INDEX); hm2172->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &hm2172_ctrl_ops, V4L2_CID_PIXEL_RATE, 0, pixel_rate, 1, pixel_rate); vblank_min = cur_mode->vts_min - cur_mode->height; vblank_max = HM2172_VTS_MAX - cur_mode->height; vblank_default = cur_mode->vts_def - cur_mode->height; hm2172->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &hm2172_ctrl_ops, V4L2_CID_VBLANK, vblank_min, vblank_max, 1, vblank_default); h_blank = to_pixels_per_line(cur_mode->hts, cur_mode->link_freq_index); h_blank -= cur_mode->width; hm2172->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &hm2172_ctrl_ops, V4L2_CID_HBLANK, h_blank, h_blank, 1, h_blank); if (hm2172->hblank) hm2172->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) hm2172->privacy_status = v4l2_ctrl_new_std(ctrl_hdlr, &hm2172_ctrl_ops, V4L2_CID_PRIVACY, 0, 1, 1, !(hm2172->status.status)); #endif v4l2_ctrl_new_std(ctrl_hdlr, &hm2172_ctrl_ops, V4L2_CID_ANALOGUE_GAIN, HM2172_ANAL_GAIN_MIN, HM2172_ANAL_GAIN_MAX, HM2172_ANAL_GAIN_STEP, HM2172_ANAL_GAIN_MIN); v4l2_ctrl_new_std(ctrl_hdlr, &hm2172_ctrl_ops, V4L2_CID_DIGITAL_GAIN, HM2172_DGTL_GAIN_MIN, HM2172_DGTL_GAIN_MAX, HM2172_DGTL_GAIN_STEP, HM2172_DGTL_GAIN_DEFAULT); exposure_max = cur_mode->vts_def - HM2172_EXPOSURE_MAX_MARGIN; hm2172->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &hm2172_ctrl_ops, V4L2_CID_EXPOSURE, HM2172_EXPOSURE_MIN, exposure_max, HM2172_EXPOSURE_STEP, exposure_max); v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &hm2172_ctrl_ops, V4L2_CID_TEST_PATTERN, ARRAY_SIZE(hm2172_test_pattern_menu) - 1, 0, 0, hm2172_test_pattern_menu); if (ctrl_hdlr->error) return ctrl_hdlr->error; hm2172->sd.ctrl_handler = ctrl_hdlr; return 0; } static void hm2172_update_pad_format(const struct hm2172_mode *mode, struct v4l2_mbus_framefmt *fmt) { fmt->width = mode->width; fmt->height = mode->height; fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10; fmt->field = V4L2_FIELD_NONE; } #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) static void hm2172_vsc_privacy_callback(void *handle, enum vsc_privacy_status status) { struct hm2172 *hm2172 = handle; v4l2_ctrl_s_ctrl(hm2172->privacy_status, !status); } #endif static int hm2172_start_streaming(struct hm2172 *hm2172) { struct i2c_client *client = v4l2_get_subdevdata(&hm2172->sd); const struct hm2172_reg_list *reg_list; int ret = 0; reg_list = &hm2172->cur_mode->reg_list; ret = hm2172_write_reg_list(hm2172, reg_list); if (ret) { dev_err(&client->dev, "failed to set mode"); return ret; } ret = __v4l2_ctrl_handler_setup(hm2172->sd.ctrl_handler); if (ret) return ret; ret = hm2172_write_reg(hm2172, HM2172_REG_MODE_SELECT, 1, HM2172_MODE_STREAMING); if (ret) dev_err(&client->dev, "failed to start streaming"); return ret; } static void hm2172_stop_streaming(struct hm2172 *hm2172) { struct i2c_client *client = v4l2_get_subdevdata(&hm2172->sd); if (hm2172_write_reg(hm2172, HM2172_REG_MODE_SELECT, 1, HM2172_MODE_STANDBY)) dev_err(&client->dev, "failed to stop streaming"); } static int hm2172_set_stream(struct v4l2_subdev *sd, int enable) { struct hm2172 *hm2172 = to_hm2172(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); int ret = 0; if (hm2172->streaming == enable) return 0; mutex_lock(&hm2172->mutex); if (enable) { ret = pm_runtime_get_sync(&client->dev); if (ret < 0) { pm_runtime_put_noidle(&client->dev); mutex_unlock(&hm2172->mutex); return ret; } ret = hm2172_start_streaming(hm2172); if (ret) { enable = 0; hm2172_stop_streaming(hm2172); pm_runtime_put(&client->dev); } } else { hm2172_stop_streaming(hm2172); pm_runtime_put(&client->dev); } hm2172->streaming = enable; mutex_unlock(&hm2172->mutex); return ret; } static int hm2172_power_off(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct hm2172 *hm2172 = to_hm2172(sd); int ret = 0; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) if (hm2172->use_intel_vsc) { ret = vsc_release_camera_sensor(&hm2172->status); if (ret && ret != -EAGAIN) dev_err(dev, "Release VSC failed"); return ret; } #endif gpiod_set_value_cansleep(hm2172->reset, 1); gpiod_set_value_cansleep(hm2172->handshake, 0); if (hm2172->avdd) ret = regulator_disable(hm2172->avdd); clk_disable_unprepare(hm2172->img_clk); return ret; } static int hm2172_power_on(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct hm2172 *hm2172 = to_hm2172(sd); int ret; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) if (hm2172->use_intel_vsc) { hm2172->conf.lane_num = HM2172_DATA_LANES; /* frequency unit 100k */ hm2172->conf.freq = HM2172_LINK_FREQ_384MHZ / 100000; ret = vsc_acquire_camera_sensor(&hm2172->conf, hm2172_vsc_privacy_callback, hm2172, &hm2172->status); if (ret == -EAGAIN) return -EPROBE_DEFER; if (ret) { dev_err(dev, "Acquire VSC failed"); return ret; } if (hm2172->privacy_status) __v4l2_ctrl_s_ctrl(hm2172->privacy_status, !(hm2172->status.status)); return ret; } #endif ret = clk_prepare_enable(hm2172->img_clk); if (ret < 0) { dev_err(dev, "failed to enable imaging clock: %d", ret); return ret; } if (hm2172->avdd) { ret = regulator_enable(hm2172->avdd); if (ret < 0) { dev_err(dev, "failed to enable avdd: %d", ret); clk_disable_unprepare(hm2172->img_clk); return ret; } } gpiod_set_value_cansleep(hm2172->handshake, 1); gpiod_set_value_cansleep(hm2172->reset, 0); /* Lattice MIPI aggregator with some version FW needs longer delay after handshake triggered. We set 25ms as a safe value and wait for a stable version FW. */ msleep_interruptible(25); return ret; } // This function tries to get power control resources static int hm2172_get_pm_resources(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct hm2172 *hm2172 = to_hm2172(sd); int ret = 0; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) acpi_handle handle = ACPI_HANDLE(dev); struct acpi_handle_list dep_devices; acpi_status status; int i = 0; hm2172->use_intel_vsc = false; if (!acpi_has_method(handle, "_DEP")) return false; status = acpi_evaluate_reference(handle, "_DEP", NULL, &dep_devices); if (ACPI_FAILURE(status)) { acpi_handle_debug(handle, "Failed to evaluate _DEP.\n"); return false; } for (i = 0; i < dep_devices.count; i++) { struct acpi_device *dep_device = NULL; if (dep_devices.handles[i]) #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0) acpi_bus_get_device(dep_devices.handles[i], &dep_device); #else dep_device = acpi_fetch_acpi_dev(dep_devices.handles[i]); #endif if (dep_device && acpi_match_device_ids(dep_device, cvfd_ids) == 0) { hm2172->use_intel_vsc = true; return 0; } } #endif hm2172->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(hm2172->reset)) return dev_err_probe(dev, PTR_ERR(hm2172->reset), "failed to get reset gpio\n"); hm2172->handshake = devm_gpiod_get_optional(dev, "handshake", GPIOD_OUT_LOW); if (IS_ERR(hm2172->handshake)) return dev_err_probe(dev, PTR_ERR(hm2172->handshake), "failed to get handshake gpio\n"); hm2172->img_clk = devm_clk_get_optional(dev, NULL); if (IS_ERR(hm2172->img_clk)) return dev_err_probe(dev, PTR_ERR(hm2172->img_clk), "failed to get imaging clock\n"); hm2172->avdd = devm_regulator_get_optional(dev, "avdd"); if (IS_ERR(hm2172->avdd)) { ret = PTR_ERR(hm2172->avdd); hm2172->avdd = NULL; if (ret != -ENODEV) return dev_err_probe(dev, ret, "failed to get avdd regulator\n"); } return ret; } static int __maybe_unused hm2172_suspend(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct hm2172 *hm2172 = to_hm2172(sd); mutex_lock(&hm2172->mutex); if (hm2172->streaming) hm2172_stop_streaming(hm2172); mutex_unlock(&hm2172->mutex); return 0; } static int __maybe_unused hm2172_resume(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct hm2172 *hm2172 = to_hm2172(sd); int ret = 0; mutex_lock(&hm2172->mutex); if (!hm2172->streaming) goto exit; ret = hm2172_start_streaming(hm2172); if (ret) { hm2172->streaming = false; hm2172_stop_streaming(hm2172); } exit: mutex_unlock(&hm2172->mutex); return ret; } static int hm2172_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { struct hm2172 *hm2172 = to_hm2172(sd); const struct hm2172_mode *mode; s32 vblank_def, h_blank; mode = v4l2_find_nearest_size(supported_modes, ARRAY_SIZE(supported_modes), width, height, fmt->format.width, fmt->format.height); mutex_lock(&hm2172->mutex); hm2172_update_pad_format(mode, &fmt->format); if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 8, 0) *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format; #else *v4l2_subdev_state_get_format(sd_state, fmt->pad) = fmt->format; #endif } else { hm2172->cur_mode = mode; __v4l2_ctrl_s_ctrl(hm2172->link_freq, mode->link_freq_index); __v4l2_ctrl_s_ctrl_int64(hm2172->pixel_rate, to_pixel_rate(mode->link_freq_index)); /* Update limits and set FPS to default */ vblank_def = mode->vts_def - mode->height; __v4l2_ctrl_modify_range(hm2172->vblank, mode->vts_min - mode->height, HM2172_VTS_MAX - mode->height, 1, vblank_def); __v4l2_ctrl_s_ctrl(hm2172->vblank, vblank_def); h_blank = to_pixels_per_line(mode->hts, mode->link_freq_index) - mode->width; __v4l2_ctrl_modify_range(hm2172->hblank, h_blank, h_blank, 1, h_blank); } mutex_unlock(&hm2172->mutex); return 0; } static int hm2172_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { struct hm2172 *hm2172 = to_hm2172(sd); mutex_lock(&hm2172->mutex); if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 8, 0) fmt->format = *v4l2_subdev_get_try_format(&hm2172->sd, sd_state, fmt->pad); #else fmt->format = *v4l2_subdev_state_get_format( sd_state, fmt->pad); #endif else hm2172_update_pad_format(hm2172->cur_mode, &fmt->format); mutex_unlock(&hm2172->mutex); return 0; } static int hm2172_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *code) { if (code->index > 0) return -EINVAL; code->code = MEDIA_BUS_FMT_SGRBG10_1X10; return 0; } static int hm2172_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_frame_size_enum *fse) { if (fse->index >= ARRAY_SIZE(supported_modes)) return -EINVAL; if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10) return -EINVAL; fse->min_width = supported_modes[fse->index].width; fse->max_width = fse->min_width; fse->min_height = supported_modes[fse->index].height; fse->max_height = fse->min_height; return 0; } static int hm2172_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { struct hm2172 *hm2172 = to_hm2172(sd); mutex_lock(&hm2172->mutex); hm2172_update_pad_format(&supported_modes[0], #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 8, 0) v4l2_subdev_get_try_format(sd, fh->state, 0)); #else v4l2_subdev_state_get_format(fh->state, 0)); #endif mutex_unlock(&hm2172->mutex); return 0; } static const struct v4l2_subdev_video_ops hm2172_video_ops = { .s_stream = hm2172_set_stream, }; static const struct v4l2_subdev_pad_ops hm2172_pad_ops = { .set_fmt = hm2172_set_format, .get_fmt = hm2172_get_format, .enum_mbus_code = hm2172_enum_mbus_code, .enum_frame_size = hm2172_enum_frame_size, }; static const struct v4l2_subdev_ops hm2172_subdev_ops = { .video = &hm2172_video_ops, .pad = &hm2172_pad_ops, }; static const struct media_entity_operations hm2172_subdev_entity_ops = { .link_validate = v4l2_subdev_link_validate, }; static const struct v4l2_subdev_internal_ops hm2172_internal_ops = { .open = hm2172_open, }; static int hm2172_identify_module(struct hm2172 *hm2172) { struct i2c_client *client = v4l2_get_subdevdata(&hm2172->sd); int ret; u32 val; char rev; ret = hm2172_read_reg(hm2172, HM2172_REG_CHIP_ID, 3, &val); if (ret) return ret; rev = val & 0xff; val = val >> 8; if (val != HM2172_CHIP_ID) { dev_err(&client->dev, "chip id mismatch: %x!=%x", HM2172_CHIP_ID, val); return -ENXIO; } return 0; } static int hm2172_check_hwcfg(struct device *dev) { struct v4l2_fwnode_endpoint bus_cfg = { .bus_type = V4L2_MBUS_CSI2_DPHY }; struct fwnode_handle *ep; struct fwnode_handle *fwnode = dev_fwnode(dev); unsigned int i, j; int ret; u32 ext_clk; if (!fwnode) return -ENXIO; ep = fwnode_graph_get_next_endpoint(fwnode, NULL); if (!ep) return -EPROBE_DEFER; ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency", &ext_clk); if (ret) { dev_err(dev, "can't get clock frequency"); return ret; } ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg); fwnode_handle_put(ep); if (ret) return ret; if (bus_cfg.bus.mipi_csi2.num_data_lanes != HM2172_DATA_LANES) { dev_err(dev, "number of CSI2 data lanes %d is not supported", bus_cfg.bus.mipi_csi2.num_data_lanes); ret = -EINVAL; goto out_err; } if (!bus_cfg.nr_of_link_frequencies) { dev_err(dev, "no link frequencies defined"); ret = -EINVAL; goto out_err; } for (i = 0; i < ARRAY_SIZE(link_freq_menu_items); i++) { for (j = 0; j < bus_cfg.nr_of_link_frequencies; j++) { if (link_freq_menu_items[i] == bus_cfg.link_frequencies[j]) break; } if (j == bus_cfg.nr_of_link_frequencies) { dev_err(dev, "no link frequency %lld supported", link_freq_menu_items[i]); ret = -EINVAL; goto out_err; } } out_err: v4l2_fwnode_endpoint_free(&bus_cfg); return ret; } #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) static int hm2172_remove(struct i2c_client *client) #else static void hm2172_remove(struct i2c_client *client) #endif { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct hm2172 *hm2172 = to_hm2172(sd); v4l2_async_unregister_subdev(sd); media_entity_cleanup(&sd->entity); v4l2_ctrl_handler_free(sd->ctrl_handler); pm_runtime_disable(&client->dev); mutex_destroy(&hm2172->mutex); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) return 0; #endif } static int hm2172_probe(struct i2c_client *client) { struct hm2172 *hm217; int ret; /* Check HW config */ ret = hm2172_check_hwcfg(&client->dev); if (ret) { dev_err(&client->dev, "failed to check hwcfg: %d", ret); return ret; } hm217 = devm_kzalloc(&client->dev, sizeof(*hm217), GFP_KERNEL); if (!hm217) return -ENOMEM; /* Initialize subdev */ v4l2_i2c_subdev_init(&hm217->sd, client, &hm2172_subdev_ops); hm2172_get_pm_resources(&client->dev); ret = hm2172_power_on(&client->dev); if (ret) { dev_err_probe(&client->dev, ret, "failed to power on\n"); goto error_power_off; } /* Check module identity */ ret = hm2172_identify_module(hm217); if (ret) { dev_err(&client->dev, "failed to find sensor: %d\n", ret); goto error_power_off; } /* Set default mode to max resolution */ hm217->cur_mode = &supported_modes[0]; ret = hm2172_init_controls(hm217); if (ret) return ret; /* Initialize subdev */ hm217->sd.internal_ops = &hm2172_internal_ops; hm217->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; hm217->sd.entity.ops = &hm2172_subdev_entity_ops; hm217->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; /* Initialize source pad */ hm217->pad.flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_pads_init(&hm217->sd.entity, 1, &hm217->pad); if (ret) { dev_err(&client->dev, "%s failed:%d\n", __func__, ret); goto error_handler_free; } ret = v4l2_async_register_subdev_sensor(&hm217->sd); if (ret < 0) goto error_media_entity; /* * Device is already turned on by i2c-core with ACPI domain PM. * Enable runtime PM and turn off the device. */ pm_runtime_set_active(&client->dev); pm_runtime_enable(&client->dev); pm_runtime_idle(&client->dev); return 0; error_media_entity: media_entity_cleanup(&hm217->sd.entity); error_handler_free: v4l2_ctrl_handler_free(hm217->sd.ctrl_handler); mutex_destroy(&hm217->mutex); dev_err(&client->dev, "%s failed:%d\n", __func__, ret); error_power_off: hm2172_power_off(&client->dev); return ret; } static const struct dev_pm_ops hm2172_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(hm2172_suspend, hm2172_resume) SET_RUNTIME_PM_OPS(hm2172_power_off, hm2172_power_on, NULL) }; static const struct acpi_device_id hm2172_acpi_ids[] = { {"HIMX2172"}, {} }; MODULE_DEVICE_TABLE(acpi, hm2172_acpi_ids); static struct i2c_driver hm2172_i2c_driver = { .driver = { .name = "hm2172", .pm = &hm2172_pm_ops, .acpi_match_table = hm2172_acpi_ids, }, #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) .probe_new = hm2172_probe, #else .probe = hm2172_probe, #endif .remove = hm2172_remove, }; module_i2c_driver(hm2172_i2c_driver); MODULE_AUTHOR("Jiabin He "); MODULE_DESCRIPTION("Himax HM2172 sensor driver"); MODULE_LICENSE("GPL v2"); ipu6-drivers-0~git202411190607.0ad49882/drivers/media/i2c/ov01a10.c000066400000000000000000000665771471702545500234030ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2020-2022 Intel Corporation. #include #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 12, 0) #include #else #include #endif #include #include #include #include #include #include #include #include #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) #include #endif #define OV01A10_LINK_FREQ_400MHZ 400000000ULL #define OV01A10_SCLK 40000000LL #define OV01A10_MCLK 19200000 #define OV01A10_DATA_LANES 1 #define OV01A10_RGB_DEPTH 10 #define OV01A10_REG_CHIP_ID 0x300a #define OV01A10_CHIP_ID 0x560141 #define OV01A10_REG_MODE_SELECT 0x0100 #define OV01A10_MODE_STANDBY 0x00 #define OV01A10_MODE_STREAMING 0x01 /* vertical-timings from sensor */ #define OV01A10_REG_VTS 0x380e #define OV01A10_VTS_DEF 0x0380 #define OV01A10_VTS_MIN 0x0380 #define OV01A10_VTS_MAX 0xffff /* Exposure controls from sensor */ #define OV01A10_REG_EXPOSURE 0x3501 #define OV01A10_EXPOSURE_MIN 4 #define OV01A10_EXPOSURE_MAX_MARGIN 8 #define OV01A10_EXPOSURE_STEP 1 /* Analog gain controls from sensor */ #define OV01A10_REG_ANALOG_GAIN 0x3508 #define OV01A10_ANAL_GAIN_MIN 0x100 #define OV01A10_ANAL_GAIN_MAX 0xffff #define OV01A10_ANAL_GAIN_STEP 1 /* Digital gain controls from sensor */ #define OV01A10_REG_DIGILAL_GAIN_B 0x350A #define OV01A10_REG_DIGITAL_GAIN_GB 0x3510 #define OV01A10_REG_DIGITAL_GAIN_GR 0x3513 #define OV01A10_REG_DIGITAL_GAIN_R 0x3516 #define OV01A10_DGTL_GAIN_MIN 0 #define OV01A10_DGTL_GAIN_MAX 0x3ffff #define OV01A10_DGTL_GAIN_STEP 1 #define OV01A10_DGTL_GAIN_DEFAULT 1024 /* Test Pattern Control */ #define OV01A10_REG_TEST_PATTERN 0x4503 #define OV01A10_TEST_PATTERN_ENABLE BIT(7) #define OV01A10_TEST_PATTERN_BAR_SHIFT 0 enum { OV01A10_LINK_FREQ_400MHZ_INDEX, }; struct ov01a10_reg { u16 address; u8 val; }; struct ov01a10_reg_list { u32 num_of_regs; const struct ov01a10_reg *regs; }; struct ov01a10_link_freq_config { const struct ov01a10_reg_list reg_list; }; struct ov01a10_mode { /* Frame width in pixels */ u32 width; /* Frame height in pixels */ u32 height; /* Horizontal timining size */ u32 hts; /* Default vertical timining size */ u32 vts_def; /* Min vertical timining size */ u32 vts_min; /* Link frequency needed for this resolution */ u32 link_freq_index; /* Sensor register settings for this resolution */ const struct ov01a10_reg_list reg_list; }; static const struct ov01a10_reg mipi_data_rate_720mbps[] = { }; static const struct ov01a10_reg sensor_1280x800_setting[] = { {0x0103, 0x01}, {0x0302, 0x00}, {0x0303, 0x06}, {0x0304, 0x01}, {0x0305, 0xe0}, {0x0306, 0x00}, {0x0308, 0x01}, {0x0309, 0x00}, {0x030c, 0x01}, {0x0322, 0x01}, {0x0323, 0x06}, {0x0324, 0x01}, {0x0325, 0x68}, {0x3002, 0xa1}, {0x301e, 0xf0}, {0x3022, 0x01}, {0x3501, 0x03}, {0x3502, 0x78}, {0x3504, 0x0c}, {0x3508, 0x01}, {0x3509, 0x00}, {0x3601, 0xc0}, {0x3603, 0x71}, {0x3610, 0x68}, {0x3611, 0x86}, {0x3640, 0x10}, {0x3641, 0x80}, {0x3642, 0xdc}, {0x3646, 0x55}, {0x3647, 0x57}, {0x364b, 0x00}, {0x3653, 0x10}, {0x3655, 0x00}, {0x3656, 0x00}, {0x365f, 0x0f}, {0x3661, 0x45}, {0x3662, 0x24}, {0x3663, 0x11}, {0x3664, 0x07}, {0x3709, 0x34}, {0x370b, 0x6f}, {0x3714, 0x22}, {0x371b, 0x27}, {0x371c, 0x67}, {0x371d, 0xa7}, {0x371e, 0xe7}, {0x3730, 0x81}, {0x3733, 0x10}, {0x3734, 0x40}, {0x3737, 0x04}, {0x3739, 0x1c}, {0x3767, 0x00}, {0x376c, 0x81}, {0x3772, 0x14}, {0x37c2, 0x04}, {0x37d8, 0x03}, {0x37d9, 0x0c}, {0x37e0, 0x00}, {0x37e1, 0x08}, {0x37e2, 0x10}, {0x37e3, 0x04}, {0x37e4, 0x04}, {0x37e5, 0x03}, {0x37e6, 0x04}, {0x3800, 0x00}, {0x3801, 0x00}, {0x3802, 0x00}, {0x3803, 0x00}, {0x3804, 0x05}, {0x3805, 0x0f}, {0x3806, 0x03}, {0x3807, 0x2f}, {0x3808, 0x05}, {0x3809, 0x00}, {0x380a, 0x03}, {0x380b, 0x20}, {0x380c, 0x02}, {0x380d, 0xe8}, {0x380e, 0x03}, {0x380f, 0x80}, {0x3810, 0x00}, {0x3811, 0x09}, {0x3812, 0x00}, {0x3813, 0x08}, {0x3814, 0x01}, {0x3815, 0x01}, {0x3816, 0x01}, {0x3817, 0x01}, {0x3820, 0xa8}, {0x3822, 0x13}, {0x3832, 0x28}, {0x3833, 0x10}, {0x3b00, 0x00}, {0x3c80, 0x00}, {0x3c88, 0x02}, {0x3c8c, 0x07}, {0x3c8d, 0x40}, {0x3cc7, 0x80}, {0x4000, 0xc3}, {0x4001, 0xe0}, {0x4003, 0x40}, {0x4008, 0x02}, {0x4009, 0x19}, {0x400a, 0x01}, {0x400b, 0x6c}, {0x4011, 0x00}, {0x4041, 0x00}, {0x4300, 0xff}, {0x4301, 0x00}, {0x4302, 0x0f}, {0x4503, 0x00}, {0x4601, 0x50}, {0x4800, 0x64}, {0x481f, 0x34}, {0x4825, 0x33}, {0x4837, 0x11}, {0x4881, 0x40}, {0x4883, 0x01}, {0x4890, 0x00}, {0x4901, 0x00}, {0x4902, 0x00}, {0x4b00, 0x2a}, {0x4b0d, 0x00}, {0x450a, 0x04}, {0x450b, 0x00}, {0x5000, 0x65}, {0x5200, 0x18}, {0x5004, 0x00}, {0x5080, 0x40}, {0x0305, 0xf4}, {0x0325, 0xc2}, {0x380c, 0x05}, {0x380d, 0xd0}, }; static const char * const ov01a10_test_pattern_menu[] = { "Disabled", "Color Bar", "Top-Bottom Darker Color Bar", "Right-Left Darker Color Bar", "Color Bar type 4", }; static const s64 link_freq_menu_items[] = { OV01A10_LINK_FREQ_400MHZ, }; static const struct ov01a10_link_freq_config link_freq_configs[] = { [OV01A10_LINK_FREQ_400MHZ_INDEX] = { .reg_list = { .num_of_regs = ARRAY_SIZE(mipi_data_rate_720mbps), .regs = mipi_data_rate_720mbps, } }, }; static const struct ov01a10_mode supported_modes[] = { { .width = 1280, .height = 800, .hts = 1488, .vts_def = OV01A10_VTS_DEF, .vts_min = OV01A10_VTS_MIN, .reg_list = { .num_of_regs = ARRAY_SIZE(sensor_1280x800_setting), .regs = sensor_1280x800_setting, }, .link_freq_index = OV01A10_LINK_FREQ_400MHZ_INDEX, }, }; struct ov01a10 { struct v4l2_subdev sd; struct media_pad pad; struct v4l2_ctrl_handler ctrl_handler; /* V4L2 Controls */ struct v4l2_ctrl *link_freq; struct v4l2_ctrl *pixel_rate; struct v4l2_ctrl *vblank; struct v4l2_ctrl *hblank; struct v4l2_ctrl *exposure; /* Current mode */ const struct ov01a10_mode *cur_mode; /* To serialize asynchronus callbacks */ struct mutex mutex; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) struct vsc_mipi_config conf; struct vsc_camera_status status; struct v4l2_ctrl *privacy_status; #endif /* Streaming on/off */ bool streaming; }; static inline struct ov01a10 *to_ov01a10(struct v4l2_subdev *subdev) { return container_of(subdev, struct ov01a10, sd); } static int ov01a10_read_reg(struct ov01a10 *ov01a10, u16 reg, u16 len, u32 *val) { struct i2c_client *client = v4l2_get_subdevdata(&ov01a10->sd); struct i2c_msg msgs[2]; u8 addr_buf[2]; u8 data_buf[4] = {0}; int ret = 0; if (len > sizeof(data_buf)) return -EINVAL; put_unaligned_be16(reg, addr_buf); msgs[0].addr = client->addr; msgs[0].flags = 0; msgs[0].len = sizeof(addr_buf); msgs[0].buf = addr_buf; msgs[1].addr = client->addr; msgs[1].flags = I2C_M_RD; msgs[1].len = len; msgs[1].buf = &data_buf[sizeof(data_buf) - len]; ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); if (ret != ARRAY_SIZE(msgs)) return ret < 0 ? ret : -EIO; *val = get_unaligned_be32(data_buf); return 0; } static int ov01a10_write_reg(struct ov01a10 *ov01a10, u16 reg, u16 len, u32 val) { struct i2c_client *client = v4l2_get_subdevdata(&ov01a10->sd); u8 buf[6]; int ret = 0; if (len > 4) return -EINVAL; put_unaligned_be16(reg, buf); put_unaligned_be32(val << 8 * (4 - len), buf + 2); ret = i2c_master_send(client, buf, len + 2); if (ret != len + 2) return ret < 0 ? ret : -EIO; return 0; } static int ov01a10_write_reg_list(struct ov01a10 *ov01a10, const struct ov01a10_reg_list *r_list) { struct i2c_client *client = v4l2_get_subdevdata(&ov01a10->sd); unsigned int i; int ret = 0; for (i = 0; i < r_list->num_of_regs; i++) { ret = ov01a10_write_reg(ov01a10, r_list->regs[i].address, 1, r_list->regs[i].val); if (ret) { dev_err_ratelimited(&client->dev, "write reg 0x%4.4x return err = %d", r_list->regs[i].address, ret); return ret; } } return 0; } static int ov01a10_update_digital_gain(struct ov01a10 *ov01a10, u32 d_gain) { struct i2c_client *client = v4l2_get_subdevdata(&ov01a10->sd); u32 real = d_gain << 6; int ret = 0; ret = ov01a10_write_reg(ov01a10, OV01A10_REG_DIGILAL_GAIN_B, 3, real); if (ret) { dev_err(&client->dev, "failed to set OV01A10_REG_DIGITAL_GAIN_B"); return ret; } ret = ov01a10_write_reg(ov01a10, OV01A10_REG_DIGITAL_GAIN_GB, 3, real); if (ret) { dev_err(&client->dev, "failed to set OV01A10_REG_DIGITAL_GAIN_GB"); return ret; } ret = ov01a10_write_reg(ov01a10, OV01A10_REG_DIGITAL_GAIN_GR, 3, real); if (ret) { dev_err(&client->dev, "failed to set OV01A10_REG_DIGITAL_GAIN_GR"); return ret; } ret = ov01a10_write_reg(ov01a10, OV01A10_REG_DIGITAL_GAIN_R, 3, real); if (ret) { dev_err(&client->dev, "failed to set OV01A10_REG_DIGITAL_GAIN_R"); return ret; } return ret; } static int ov01a10_test_pattern(struct ov01a10 *ov01a10, u32 pattern) { if (pattern) pattern = (pattern - 1) << OV01A10_TEST_PATTERN_BAR_SHIFT | OV01A10_TEST_PATTERN_ENABLE; return ov01a10_write_reg(ov01a10, OV01A10_REG_TEST_PATTERN, 1, pattern); } static int ov01a10_set_ctrl(struct v4l2_ctrl *ctrl) { struct ov01a10 *ov01a10 = container_of(ctrl->handler, struct ov01a10, ctrl_handler); struct i2c_client *client = v4l2_get_subdevdata(&ov01a10->sd); s64 exposure_max; int ret = 0; /* Propagate change of current control to all related controls */ if (ctrl->id == V4L2_CID_VBLANK) { /* Update max exposure while meeting expected vblanking */ exposure_max = ov01a10->cur_mode->height + ctrl->val - OV01A10_EXPOSURE_MAX_MARGIN; __v4l2_ctrl_modify_range(ov01a10->exposure, ov01a10->exposure->minimum, exposure_max, ov01a10->exposure->step, exposure_max); } /* V4L2 controls values will be applied only when power is already up */ if (!pm_runtime_get_if_in_use(&client->dev)) return 0; switch (ctrl->id) { case V4L2_CID_ANALOGUE_GAIN: ret = ov01a10_write_reg(ov01a10, OV01A10_REG_ANALOG_GAIN, 2, ctrl->val); break; case V4L2_CID_DIGITAL_GAIN: ret = ov01a10_update_digital_gain(ov01a10, ctrl->val); break; case V4L2_CID_EXPOSURE: ret = ov01a10_write_reg(ov01a10, OV01A10_REG_EXPOSURE, 2, ctrl->val); break; case V4L2_CID_VBLANK: ret = ov01a10_write_reg(ov01a10, OV01A10_REG_VTS, 2, ov01a10->cur_mode->height + ctrl->val); break; case V4L2_CID_TEST_PATTERN: ret = ov01a10_test_pattern(ov01a10, ctrl->val); break; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) case V4L2_CID_PRIVACY: dev_dbg(&client->dev, "set privacy to %d", ctrl->val); break; #endif default: ret = -EINVAL; break; } pm_runtime_put(&client->dev); return ret; } static const struct v4l2_ctrl_ops ov01a10_ctrl_ops = { .s_ctrl = ov01a10_set_ctrl, }; static int ov01a10_init_controls(struct ov01a10 *ov01a10) { struct v4l2_ctrl_handler *ctrl_hdlr; const struct ov01a10_mode *cur_mode; s64 exposure_max, h_blank; u32 vblank_min, vblank_max, vblank_default; int size; int ret = 0; ctrl_hdlr = &ov01a10->ctrl_handler; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) ret = v4l2_ctrl_handler_init(ctrl_hdlr, 9); #else ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8); #endif if (ret) return ret; ctrl_hdlr->lock = &ov01a10->mutex; cur_mode = ov01a10->cur_mode; size = ARRAY_SIZE(link_freq_menu_items); ov01a10->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, &ov01a10_ctrl_ops, V4L2_CID_LINK_FREQ, size - 1, 0, link_freq_menu_items); if (ov01a10->link_freq) ov01a10->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY; ov01a10->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov01a10_ctrl_ops, V4L2_CID_PIXEL_RATE, 0, OV01A10_SCLK, 1, OV01A10_SCLK); vblank_min = cur_mode->vts_min - cur_mode->height; vblank_max = OV01A10_VTS_MAX - cur_mode->height; vblank_default = cur_mode->vts_def - cur_mode->height; ov01a10->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov01a10_ctrl_ops, V4L2_CID_VBLANK, vblank_min, vblank_max, 1, vblank_default); h_blank = cur_mode->hts - cur_mode->width; ov01a10->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov01a10_ctrl_ops, V4L2_CID_HBLANK, h_blank, h_blank, 1, h_blank); if (ov01a10->hblank) ov01a10->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) ov01a10->privacy_status = v4l2_ctrl_new_std(ctrl_hdlr, &ov01a10_ctrl_ops, V4L2_CID_PRIVACY, 0, 1, 1, !(ov01a10->status.status)); #endif v4l2_ctrl_new_std(ctrl_hdlr, &ov01a10_ctrl_ops, V4L2_CID_ANALOGUE_GAIN, OV01A10_ANAL_GAIN_MIN, OV01A10_ANAL_GAIN_MAX, OV01A10_ANAL_GAIN_STEP, OV01A10_ANAL_GAIN_MIN); v4l2_ctrl_new_std(ctrl_hdlr, &ov01a10_ctrl_ops, V4L2_CID_DIGITAL_GAIN, OV01A10_DGTL_GAIN_MIN, OV01A10_DGTL_GAIN_MAX, OV01A10_DGTL_GAIN_STEP, OV01A10_DGTL_GAIN_DEFAULT); exposure_max = cur_mode->vts_def - OV01A10_EXPOSURE_MAX_MARGIN; ov01a10->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &ov01a10_ctrl_ops, V4L2_CID_EXPOSURE, OV01A10_EXPOSURE_MIN, exposure_max, OV01A10_EXPOSURE_STEP, exposure_max); v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &ov01a10_ctrl_ops, V4L2_CID_TEST_PATTERN, ARRAY_SIZE(ov01a10_test_pattern_menu) - 1, 0, 0, ov01a10_test_pattern_menu); if (ctrl_hdlr->error) return ctrl_hdlr->error; ov01a10->sd.ctrl_handler = ctrl_hdlr; return 0; } static void ov01a10_update_pad_format(const struct ov01a10_mode *mode, struct v4l2_mbus_framefmt *fmt) { fmt->width = mode->width; fmt->height = mode->height; fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10; fmt->field = V4L2_FIELD_NONE; } static int ov01a10_start_streaming(struct ov01a10 *ov01a10) { struct i2c_client *client = v4l2_get_subdevdata(&ov01a10->sd); const struct ov01a10_reg_list *reg_list; int link_freq_index; int ret = 0; link_freq_index = ov01a10->cur_mode->link_freq_index; reg_list = &link_freq_configs[link_freq_index].reg_list; ret = ov01a10_write_reg_list(ov01a10, reg_list); if (ret) { dev_err(&client->dev, "failed to set plls"); return ret; } reg_list = &ov01a10->cur_mode->reg_list; ret = ov01a10_write_reg_list(ov01a10, reg_list); if (ret) { dev_err(&client->dev, "failed to set mode"); return ret; } ret = __v4l2_ctrl_handler_setup(ov01a10->sd.ctrl_handler); if (ret) return ret; ret = ov01a10_write_reg(ov01a10, OV01A10_REG_MODE_SELECT, 1, OV01A10_MODE_STREAMING); if (ret) dev_err(&client->dev, "failed to start streaming"); return ret; } static void ov01a10_stop_streaming(struct ov01a10 *ov01a10) { struct i2c_client *client = v4l2_get_subdevdata(&ov01a10->sd); int ret = 0; ret = ov01a10_write_reg(ov01a10, OV01A10_REG_MODE_SELECT, 1, OV01A10_MODE_STANDBY); if (ret) dev_err(&client->dev, "failed to stop streaming"); } static int ov01a10_set_stream(struct v4l2_subdev *sd, int enable) { struct ov01a10 *ov01a10 = to_ov01a10(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); int ret = 0; if (ov01a10->streaming == enable) return 0; mutex_lock(&ov01a10->mutex); if (enable) { ret = pm_runtime_get_sync(&client->dev); if (ret < 0) { pm_runtime_put_noidle(&client->dev); mutex_unlock(&ov01a10->mutex); return ret; } ret = ov01a10_start_streaming(ov01a10); if (ret) { enable = 0; ov01a10_stop_streaming(ov01a10); pm_runtime_put(&client->dev); } } else { ov01a10_stop_streaming(ov01a10); pm_runtime_put(&client->dev); } ov01a10->streaming = enable; mutex_unlock(&ov01a10->mutex); return ret; } #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) static void ov01a10_vsc_privacy_callback(void *handle, enum vsc_privacy_status status) { struct ov01a10 *ov01a10 = handle; v4l2_ctrl_s_ctrl(ov01a10->privacy_status, !status); } static int ov01a10_power_off(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct ov01a10 *ov01a10 = to_ov01a10(sd); int ret; ret = vsc_release_camera_sensor(&ov01a10->status); if (ret && ret != -EAGAIN) dev_err(dev, "Release VSC failed"); return ret; } static int ov01a10_power_on(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct ov01a10 *ov01a10 = to_ov01a10(sd); int ret; ret = vsc_acquire_camera_sensor(&ov01a10->conf, NULL, ov01a10, &ov01a10->status); if (ret && ret != -EAGAIN) dev_err(dev, "Acquire VSC failed"); return ret; } #endif static int __maybe_unused ov01a10_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct v4l2_subdev *sd = i2c_get_clientdata(client); struct ov01a10 *ov01a10 = to_ov01a10(sd); mutex_lock(&ov01a10->mutex); if (ov01a10->streaming) ov01a10_stop_streaming(ov01a10); mutex_unlock(&ov01a10->mutex); return 0; } static int __maybe_unused ov01a10_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct v4l2_subdev *sd = i2c_get_clientdata(client); struct ov01a10 *ov01a10 = to_ov01a10(sd); int ret = 0; mutex_lock(&ov01a10->mutex); if (!ov01a10->streaming) goto exit; ret = ov01a10_start_streaming(ov01a10); if (ret) { ov01a10->streaming = false; ov01a10_stop_streaming(ov01a10); } exit: mutex_unlock(&ov01a10->mutex); return ret; } static int ov01a10_set_format(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *sd_state, #endif struct v4l2_subdev_format *fmt) { struct ov01a10 *ov01a10 = to_ov01a10(sd); const struct ov01a10_mode *mode; s32 vblank_def, h_blank; mode = v4l2_find_nearest_size(supported_modes, ARRAY_SIZE(supported_modes), width, height, fmt->format.width, fmt->format.height); mutex_lock(&ov01a10->mutex); ov01a10_update_pad_format(mode, &fmt->format); if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format; #elif LINUX_VERSION_CODE < KERNEL_VERSION(6, 8, 0) *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format; #else *v4l2_subdev_state_get_format(sd_state, fmt->pad) = fmt->format; #endif } else { ov01a10->cur_mode = mode; __v4l2_ctrl_s_ctrl(ov01a10->link_freq, mode->link_freq_index); __v4l2_ctrl_s_ctrl_int64(ov01a10->pixel_rate, OV01A10_SCLK); /* Update limits and set FPS to default */ vblank_def = mode->vts_def - mode->height; __v4l2_ctrl_modify_range(ov01a10->vblank, mode->vts_min - mode->height, OV01A10_VTS_MAX - mode->height, 1, vblank_def); __v4l2_ctrl_s_ctrl(ov01a10->vblank, vblank_def); h_blank = mode->hts - mode->width; __v4l2_ctrl_modify_range(ov01a10->hblank, h_blank, h_blank, 1, h_blank); } mutex_unlock(&ov01a10->mutex); return 0; } static int ov01a10_get_format(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *sd_state, #endif struct v4l2_subdev_format *fmt) { struct ov01a10 *ov01a10 = to_ov01a10(sd); mutex_lock(&ov01a10->mutex); if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) fmt->format = *v4l2_subdev_get_try_format(&ov01a10->sd, cfg, fmt->pad); #elif LINUX_VERSION_CODE < KERNEL_VERSION(6, 8, 0) fmt->format = *v4l2_subdev_get_try_format(&ov01a10->sd, sd_state, fmt->pad); #else fmt->format = *v4l2_subdev_state_get_format( sd_state, fmt->pad); #endif else ov01a10_update_pad_format(ov01a10->cur_mode, &fmt->format); mutex_unlock(&ov01a10->mutex); return 0; } static int ov01a10_enum_mbus_code(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *sd_state, #endif struct v4l2_subdev_mbus_code_enum *code) { if (code->index > 0) return -EINVAL; code->code = MEDIA_BUS_FMT_SBGGR10_1X10; return 0; } static int ov01a10_enum_frame_size(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *sd_state, #endif struct v4l2_subdev_frame_size_enum *fse) { if (fse->index >= ARRAY_SIZE(supported_modes)) return -EINVAL; if (fse->code != MEDIA_BUS_FMT_SBGGR10_1X10) return -EINVAL; fse->min_width = supported_modes[fse->index].width; fse->max_width = fse->min_width; fse->min_height = supported_modes[fse->index].height; fse->max_height = fse->min_height; return 0; } static int ov01a10_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { struct ov01a10 *ov01a10 = to_ov01a10(sd); mutex_lock(&ov01a10->mutex); #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) ov01a10_update_pad_format(&supported_modes[0], v4l2_subdev_get_try_format(sd, fh->pad, 0)); #elif LINUX_VERSION_CODE < KERNEL_VERSION(6, 8, 0) ov01a10_update_pad_format(&supported_modes[0], v4l2_subdev_get_try_format(sd, fh->state, 0)); #else ov01a10_update_pad_format(&supported_modes[0], v4l2_subdev_state_get_format(fh->state, 0)); #endif mutex_unlock(&ov01a10->mutex); return 0; } static const struct v4l2_subdev_video_ops ov01a10_video_ops = { .s_stream = ov01a10_set_stream, }; static const struct v4l2_subdev_pad_ops ov01a10_pad_ops = { .set_fmt = ov01a10_set_format, .get_fmt = ov01a10_get_format, .enum_mbus_code = ov01a10_enum_mbus_code, .enum_frame_size = ov01a10_enum_frame_size, }; static const struct v4l2_subdev_ops ov01a10_subdev_ops = { .video = &ov01a10_video_ops, .pad = &ov01a10_pad_ops, }; static const struct media_entity_operations ov01a10_subdev_entity_ops = { .link_validate = v4l2_subdev_link_validate, }; static const struct v4l2_subdev_internal_ops ov01a10_internal_ops = { .open = ov01a10_open, }; static int ov01a10_identify_module(struct ov01a10 *ov01a10) { struct i2c_client *client = v4l2_get_subdevdata(&ov01a10->sd); int ret; u32 val; ret = ov01a10_read_reg(ov01a10, OV01A10_REG_CHIP_ID, 3, &val); if (ret) return ret; if (val != OV01A10_CHIP_ID) { dev_err(&client->dev, "chip id mismatch: %x!=%x", OV01A10_CHIP_ID, val); return -ENXIO; } return 0; } static int ov01a10_check_hwcfg(struct device *dev) { struct v4l2_fwnode_endpoint bus_cfg = { .bus_type = V4L2_MBUS_CSI2_DPHY }; struct fwnode_handle *ep; struct fwnode_handle *fwnode = dev_fwnode(dev); unsigned int i, j; int ret; u32 ext_clk; if (!fwnode) return -ENXIO; ep = fwnode_graph_get_next_endpoint(fwnode, NULL); if (!ep) return -EPROBE_DEFER; ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency", &ext_clk); if (ret) { dev_err(dev, "can't get clock frequency"); return ret; } ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg); fwnode_handle_put(ep); if (ret) return ret; if (bus_cfg.bus.mipi_csi2.num_data_lanes != OV01A10_DATA_LANES) { dev_err(dev, "number of CSI2 data lanes %d is not supported", bus_cfg.bus.mipi_csi2.num_data_lanes); ret = -EINVAL; goto out_err; } if (!bus_cfg.nr_of_link_frequencies) { dev_err(dev, "no link frequencies defined"); ret = -EINVAL; goto out_err; } for (i = 0; i < ARRAY_SIZE(link_freq_menu_items); i++) { for (j = 0; j < bus_cfg.nr_of_link_frequencies; j++) { if (link_freq_menu_items[i] == bus_cfg.link_frequencies[j]) break; } if (j == bus_cfg.nr_of_link_frequencies) { dev_err(dev, "no link frequency %lld supported", link_freq_menu_items[i]); ret = -EINVAL; goto out_err; } } out_err: v4l2_fwnode_endpoint_free(&bus_cfg); return ret; } #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) static int ov01a10_remove(struct i2c_client *client) #else static void ov01a10_remove(struct i2c_client *client) #endif { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct ov01a10 *ov01a10 = to_ov01a10(sd); v4l2_async_unregister_subdev(sd); media_entity_cleanup(&sd->entity); v4l2_ctrl_handler_free(sd->ctrl_handler); pm_runtime_disable(&client->dev); mutex_destroy(&ov01a10->mutex); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) return 0; #endif } static int ov01a10_probe(struct i2c_client *client) { struct ov01a10 *ov01a10; int ret = 0; /* Check HW config */ ret = ov01a10_check_hwcfg(&client->dev); if (ret) { dev_err(&client->dev, "failed to check hwcfg: %d", ret); return ret; } ov01a10 = devm_kzalloc(&client->dev, sizeof(*ov01a10), GFP_KERNEL); if (!ov01a10) return -ENOMEM; v4l2_i2c_subdev_init(&ov01a10->sd, client, &ov01a10_subdev_ops); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) ov01a10->conf.lane_num = OV01A10_DATA_LANES; /* frequency unit 100k */ ov01a10->conf.freq = OV01A10_LINK_FREQ_400MHZ / 100000; ret = vsc_acquire_camera_sensor(&ov01a10->conf, ov01a10_vsc_privacy_callback, ov01a10, &ov01a10->status); if (ret == -EAGAIN) { return -EPROBE_DEFER; } else if (ret) { dev_err(&client->dev, "Acquire VSC failed"); return ret; } #endif ret = ov01a10_identify_module(ov01a10); if (ret) { dev_err(&client->dev, "failed to find sensor: %d", ret); goto probe_error_ret; } mutex_init(&ov01a10->mutex); ov01a10->cur_mode = &supported_modes[0]; ret = ov01a10_init_controls(ov01a10); if (ret) { dev_err(&client->dev, "failed to init controls: %d", ret); goto probe_error_v4l2_ctrl_handler_free; } ov01a10->sd.internal_ops = &ov01a10_internal_ops; ov01a10->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; ov01a10->sd.entity.ops = &ov01a10_subdev_entity_ops; ov01a10->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; ov01a10->pad.flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_pads_init(&ov01a10->sd.entity, 1, &ov01a10->pad); if (ret) { dev_err(&client->dev, "failed to init entity pads: %d", ret); goto probe_error_v4l2_ctrl_handler_free; } ret = v4l2_async_register_subdev_sensor(&ov01a10->sd); if (ret < 0) { dev_err(&client->dev, "failed to register V4L2 subdev: %d", ret); goto probe_error_media_entity_cleanup; } /* * Device is already turned on by i2c-core with ACPI domain PM. * Enable runtime PM and turn off the device. */ pm_runtime_set_active(&client->dev); pm_runtime_enable(&client->dev); pm_runtime_idle(&client->dev); return 0; probe_error_media_entity_cleanup: media_entity_cleanup(&ov01a10->sd.entity); probe_error_v4l2_ctrl_handler_free: v4l2_ctrl_handler_free(ov01a10->sd.ctrl_handler); mutex_destroy(&ov01a10->mutex); probe_error_ret: #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) ov01a10_power_off(&client->dev); #endif return ret; } static const struct dev_pm_ops ov01a10_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(ov01a10_suspend, ov01a10_resume) #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) SET_RUNTIME_PM_OPS(ov01a10_power_off, ov01a10_power_on, NULL) #endif }; #ifdef CONFIG_ACPI static const struct acpi_device_id ov01a10_acpi_ids[] = { {"OVTI01A0"}, {} }; MODULE_DEVICE_TABLE(acpi, ov01a10_acpi_ids); #endif static struct i2c_driver ov01a10_i2c_driver = { .driver = { .name = "ov01a10", .pm = &ov01a10_pm_ops, .acpi_match_table = ACPI_PTR(ov01a10_acpi_ids), }, #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) .probe_new = ov01a10_probe, #else .probe = ov01a10_probe, #endif .remove = ov01a10_remove, }; module_i2c_driver(ov01a10_i2c_driver); MODULE_AUTHOR("Wang Yating "); MODULE_DESCRIPTION("OmniVision OV01A10 sensor driver"); MODULE_LICENSE("GPL v2"); ipu6-drivers-0~git202411190607.0ad49882/drivers/media/i2c/ov01a1s.c000066400000000000000000000770151471702545500234730ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2020-2022 Intel Corporation. #include #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 12, 0) #include #else #include #endif #include #include #include #include #include #include #include #include #if IS_ENABLED(CONFIG_INTEL_SKL_INT3472) #include #include #elif IS_ENABLED(CONFIG_POWER_CTRL_LOGIC) #include "power_ctrl_logic.h" #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) #include static const struct acpi_device_id cvfd_ids[] = { { "INTC1059", 0 }, { "INTC1095", 0 }, { "INTC100A", 0 }, { "INTC10CF", 0 }, { } }; #endif #define OV01A1S_LINK_FREQ_400MHZ 400000000ULL #define OV01A1S_SCLK 40000000LL #define OV01A1S_MCLK 19200000 #define OV01A1S_DATA_LANES 1 #define OV01A1S_RGB_DEPTH 10 #define OV01A1S_REG_CHIP_ID 0x300a #define OV01A1S_CHIP_ID 0x560141 #define OV01A1S_REG_MODE_SELECT 0x0100 #define OV01A1S_MODE_STANDBY 0x00 #define OV01A1S_MODE_STREAMING 0x01 /* vertical-timings from sensor */ #define OV01A1S_REG_VTS 0x380e #define OV01A1S_VTS_DEF 0x0380 #define OV01A1S_VTS_MIN 0x0380 #define OV01A1S_VTS_MAX 0xffff /* Exposure controls from sensor */ #define OV01A1S_REG_EXPOSURE 0x3501 #define OV01A1S_EXPOSURE_MIN 4 #define OV01A1S_EXPOSURE_MAX_MARGIN 8 #define OV01A1S_EXPOSURE_STEP 1 /* Analog gain controls from sensor */ #define OV01A1S_REG_ANALOG_GAIN 0x3508 #define OV01A1S_ANAL_GAIN_MIN 0x100 #define OV01A1S_ANAL_GAIN_MAX 0xffff #define OV01A1S_ANAL_GAIN_STEP 1 /* Digital gain controls from sensor */ #define OV01A1S_REG_DIGILAL_GAIN_B 0x350A #define OV01A1S_REG_DIGITAL_GAIN_GB 0x3510 #define OV01A1S_REG_DIGITAL_GAIN_GR 0x3513 #define OV01A1S_REG_DIGITAL_GAIN_R 0x3516 #define OV01A1S_DGTL_GAIN_MIN 0 #define OV01A1S_DGTL_GAIN_MAX 0x3ffff #define OV01A1S_DGTL_GAIN_STEP 1 #define OV01A1S_DGTL_GAIN_DEFAULT 1024 /* Test Pattern Control */ #define OV01A1S_REG_TEST_PATTERN 0x4503 #define OV01A1S_TEST_PATTERN_ENABLE BIT(7) #define OV01A1S_TEST_PATTERN_BAR_SHIFT 0 enum { OV01A1S_LINK_FREQ_400MHZ_INDEX, }; struct ov01a1s_reg { u16 address; u8 val; }; struct ov01a1s_reg_list { u32 num_of_regs; const struct ov01a1s_reg *regs; }; struct ov01a1s_link_freq_config { const struct ov01a1s_reg_list reg_list; }; struct ov01a1s_mode { /* Frame width in pixels */ u32 width; /* Frame height in pixels */ u32 height; /* Horizontal timining size */ u32 hts; /* Default vertical timining size */ u32 vts_def; /* Min vertical timining size */ u32 vts_min; /* Link frequency needed for this resolution */ u32 link_freq_index; /* Sensor register settings for this resolution */ const struct ov01a1s_reg_list reg_list; }; static const struct ov01a1s_reg mipi_data_rate_720mbps[] = { }; static const struct ov01a1s_reg sensor_1296x800_setting[] = { {0x0103, 0x01}, {0x0302, 0x00}, {0x0303, 0x06}, {0x0304, 0x01}, {0x0305, 0x90}, {0x0306, 0x00}, {0x0308, 0x01}, {0x0309, 0x00}, {0x030c, 0x01}, {0x0322, 0x01}, {0x0323, 0x06}, {0x0324, 0x01}, {0x0325, 0x68}, {0x3002, 0xa1}, {0x301e, 0xf0}, {0x3022, 0x01}, {0x3501, 0x03}, {0x3502, 0x78}, {0x3504, 0x0c}, {0x3508, 0x01}, {0x3509, 0x00}, {0x3601, 0xc0}, {0x3603, 0x71}, {0x3610, 0x68}, {0x3611, 0x86}, {0x3640, 0x10}, {0x3641, 0x80}, {0x3642, 0xdc}, {0x3646, 0x55}, {0x3647, 0x57}, {0x364b, 0x00}, {0x3653, 0x10}, {0x3655, 0x00}, {0x3656, 0x00}, {0x365f, 0x0f}, {0x3661, 0x45}, {0x3662, 0x24}, {0x3663, 0x11}, {0x3664, 0x07}, {0x3709, 0x34}, {0x370b, 0x6f}, {0x3714, 0x22}, {0x371b, 0x27}, {0x371c, 0x67}, {0x371d, 0xa7}, {0x371e, 0xe7}, {0x3730, 0x81}, {0x3733, 0x10}, {0x3734, 0x40}, {0x3737, 0x04}, {0x3739, 0x1c}, {0x3767, 0x00}, {0x376c, 0x81}, {0x3772, 0x14}, {0x37c2, 0x04}, {0x37d8, 0x03}, {0x37d9, 0x0c}, {0x37e0, 0x00}, {0x37e1, 0x08}, {0x37e2, 0x10}, {0x37e3, 0x04}, {0x37e4, 0x04}, {0x37e5, 0x03}, {0x37e6, 0x04}, {0x3800, 0x00}, {0x3801, 0x00}, {0x3802, 0x00}, {0x3803, 0x00}, {0x3804, 0x05}, {0x3805, 0x0f}, {0x3806, 0x03}, {0x3807, 0x2f}, {0x3808, 0x05}, {0x3809, 0x00}, {0x380a, 0x03}, {0x380b, 0x1e}, {0x380c, 0x05}, {0x380d, 0xd0}, {0x380e, 0x03}, {0x380f, 0x80}, {0x3810, 0x00}, {0x3811, 0x09}, {0x3812, 0x00}, {0x3813, 0x08}, {0x3814, 0x01}, {0x3815, 0x01}, {0x3816, 0x01}, {0x3817, 0x01}, {0x3820, 0xa8}, {0x3822, 0x03}, {0x3832, 0x28}, {0x3833, 0x10}, {0x3b00, 0x00}, {0x3c80, 0x00}, {0x3c88, 0x02}, {0x3c8c, 0x07}, {0x3c8d, 0x40}, {0x3cc7, 0x80}, {0x4000, 0xc3}, {0x4001, 0xe0}, {0x4003, 0x40}, {0x4008, 0x02}, {0x4009, 0x19}, {0x400a, 0x01}, {0x400b, 0x6c}, {0x4011, 0x00}, {0x4041, 0x00}, {0x4300, 0xff}, {0x4301, 0x00}, {0x4302, 0x0f}, {0x4503, 0x00}, {0x4601, 0x50}, {0x481f, 0x34}, {0x4825, 0x33}, {0x4837, 0x14}, {0x4881, 0x40}, {0x4883, 0x01}, {0x4890, 0x00}, {0x4901, 0x00}, {0x4902, 0x00}, {0x4b00, 0x2a}, {0x4b0d, 0x00}, {0x450a, 0x04}, {0x450b, 0x00}, {0x5000, 0x65}, {0x5004, 0x00}, {0x5080, 0x40}, {0x5200, 0x18}, {0x4837, 0x14}, {0x0305, 0xf4}, {0x0325, 0xc2}, {0x3808, 0x05}, {0x3809, 0x10}, {0x380a, 0x03}, {0x380b, 0x1e}, {0x3810, 0x00}, {0x3811, 0x00}, {0x3812, 0x00}, {0x3813, 0x09}, {0x3820, 0x88}, {0x373d, 0x24}, }; static const char * const ov01a1s_test_pattern_menu[] = { "Disabled", "Color Bar", "Top-Bottom Darker Color Bar", "Right-Left Darker Color Bar", "Color Bar type 4", }; static const s64 link_freq_menu_items[] = { OV01A1S_LINK_FREQ_400MHZ, }; static const struct ov01a1s_link_freq_config link_freq_configs[] = { [OV01A1S_LINK_FREQ_400MHZ_INDEX] = { .reg_list = { .num_of_regs = ARRAY_SIZE(mipi_data_rate_720mbps), .regs = mipi_data_rate_720mbps, } }, }; static const struct ov01a1s_mode supported_modes[] = { { .width = 1296, .height = 798, .hts = 1488, .vts_def = OV01A1S_VTS_DEF, .vts_min = OV01A1S_VTS_MIN, .reg_list = { .num_of_regs = ARRAY_SIZE(sensor_1296x800_setting), .regs = sensor_1296x800_setting, }, .link_freq_index = OV01A1S_LINK_FREQ_400MHZ_INDEX, }, }; struct ov01a1s { struct v4l2_subdev sd; struct media_pad pad; struct v4l2_ctrl_handler ctrl_handler; /* V4L2 Controls */ struct v4l2_ctrl *link_freq; struct v4l2_ctrl *pixel_rate; struct v4l2_ctrl *vblank; struct v4l2_ctrl *hblank; struct v4l2_ctrl *exposure; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) struct v4l2_ctrl *privacy_status; /* VSC settings */ struct vsc_mipi_config conf; struct vsc_camera_status status; #endif /* Current mode */ const struct ov01a1s_mode *cur_mode; /* To serialize asynchronus callbacks */ struct mutex mutex; /* i2c client */ struct i2c_client *client; #if IS_ENABLED(CONFIG_INTEL_SKL_INT3472) /* GPIO for reset */ struct gpio_desc *reset_gpio; /* GPIO for powerdown */ struct gpio_desc *powerdown_gpio; /* Power enable */ struct regulator *avdd; /* Clock provider */ struct clk *clk; #endif enum { OV01A1S_USE_DEFAULT = 0, #if IS_ENABLED(CONFIG_INTEL_SKL_INT3472) || IS_ENABLED(CONFIG_POWER_CTRL_LOGIC) OV01A1S_USE_INT3472 = 1, #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) OV01A1S_USE_INTEL_VSC = 2, #endif } power_type; /* Streaming on/off */ bool streaming; }; static inline struct ov01a1s *to_ov01a1s(struct v4l2_subdev *subdev) { return container_of(subdev, struct ov01a1s, sd); } static int ov01a1s_read_reg(struct ov01a1s *ov01a1s, u16 reg, u16 len, u32 *val) { struct i2c_client *client = ov01a1s->client; struct i2c_msg msgs[2]; u8 addr_buf[2]; u8 data_buf[4] = {0}; int ret = 0; if (len > sizeof(data_buf)) return -EINVAL; put_unaligned_be16(reg, addr_buf); msgs[0].addr = client->addr; msgs[0].flags = 0; msgs[0].len = sizeof(addr_buf); msgs[0].buf = addr_buf; msgs[1].addr = client->addr; msgs[1].flags = I2C_M_RD; msgs[1].len = len; msgs[1].buf = &data_buf[sizeof(data_buf) - len]; ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); if (ret != ARRAY_SIZE(msgs)) return ret < 0 ? ret : -EIO; *val = get_unaligned_be32(data_buf); return 0; } static int ov01a1s_write_reg(struct ov01a1s *ov01a1s, u16 reg, u16 len, u32 val) { struct i2c_client *client = ov01a1s->client; u8 buf[6]; int ret = 0; if (len > 4) return -EINVAL; put_unaligned_be16(reg, buf); put_unaligned_be32(val << 8 * (4 - len), buf + 2); ret = i2c_master_send(client, buf, len + 2); if (ret != len + 2) return ret < 0 ? ret : -EIO; return 0; } static int ov01a1s_write_reg_list(struct ov01a1s *ov01a1s, const struct ov01a1s_reg_list *r_list) { struct i2c_client *client = ov01a1s->client; unsigned int i; int ret = 0; for (i = 0; i < r_list->num_of_regs; i++) { ret = ov01a1s_write_reg(ov01a1s, r_list->regs[i].address, 1, r_list->regs[i].val); if (ret) { dev_err_ratelimited(&client->dev, "write reg 0x%4.4x return err = %d", r_list->regs[i].address, ret); return ret; } } return 0; } static int ov01a1s_update_digital_gain(struct ov01a1s *ov01a1s, u32 d_gain) { struct i2c_client *client = ov01a1s->client; u32 real = d_gain << 6; int ret = 0; ret = ov01a1s_write_reg(ov01a1s, OV01A1S_REG_DIGILAL_GAIN_B, 3, real); if (ret) { dev_err(&client->dev, "failed to set OV01A1S_REG_DIGITAL_GAIN_B"); return ret; } ret = ov01a1s_write_reg(ov01a1s, OV01A1S_REG_DIGITAL_GAIN_GB, 3, real); if (ret) { dev_err(&client->dev, "failed to set OV01A1S_REG_DIGITAL_GAIN_GB"); return ret; } ret = ov01a1s_write_reg(ov01a1s, OV01A1S_REG_DIGITAL_GAIN_GR, 3, real); if (ret) { dev_err(&client->dev, "failed to set OV01A1S_REG_DIGITAL_GAIN_GR"); return ret; } ret = ov01a1s_write_reg(ov01a1s, OV01A1S_REG_DIGITAL_GAIN_R, 3, real); if (ret) { dev_err(&client->dev, "failed to set OV01A1S_REG_DIGITAL_GAIN_R"); return ret; } return ret; } static int ov01a1s_test_pattern(struct ov01a1s *ov01a1s, u32 pattern) { if (pattern) pattern = (pattern - 1) << OV01A1S_TEST_PATTERN_BAR_SHIFT | OV01A1S_TEST_PATTERN_ENABLE; return ov01a1s_write_reg(ov01a1s, OV01A1S_REG_TEST_PATTERN, 1, pattern); } static int ov01a1s_set_ctrl(struct v4l2_ctrl *ctrl) { struct ov01a1s *ov01a1s = container_of(ctrl->handler, struct ov01a1s, ctrl_handler); struct i2c_client *client = ov01a1s->client; s64 exposure_max; int ret = 0; /* Propagate change of current control to all related controls */ if (ctrl->id == V4L2_CID_VBLANK) { /* Update max exposure while meeting expected vblanking */ exposure_max = ov01a1s->cur_mode->height + ctrl->val - OV01A1S_EXPOSURE_MAX_MARGIN; __v4l2_ctrl_modify_range(ov01a1s->exposure, ov01a1s->exposure->minimum, exposure_max, ov01a1s->exposure->step, exposure_max); } /* V4L2 controls values will be applied only when power is already up */ if (!pm_runtime_get_if_in_use(&client->dev)) return 0; switch (ctrl->id) { case V4L2_CID_ANALOGUE_GAIN: ret = ov01a1s_write_reg(ov01a1s, OV01A1S_REG_ANALOG_GAIN, 2, ctrl->val); break; case V4L2_CID_DIGITAL_GAIN: ret = ov01a1s_update_digital_gain(ov01a1s, ctrl->val); break; case V4L2_CID_EXPOSURE: ret = ov01a1s_write_reg(ov01a1s, OV01A1S_REG_EXPOSURE, 2, ctrl->val); break; case V4L2_CID_VBLANK: ret = ov01a1s_write_reg(ov01a1s, OV01A1S_REG_VTS, 2, ov01a1s->cur_mode->height + ctrl->val); break; case V4L2_CID_TEST_PATTERN: ret = ov01a1s_test_pattern(ov01a1s, ctrl->val); break; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) case V4L2_CID_PRIVACY: dev_dbg(&client->dev, "set privacy to %d", ctrl->val); break; #endif default: ret = -EINVAL; break; } pm_runtime_put(&client->dev); return ret; } static const struct v4l2_ctrl_ops ov01a1s_ctrl_ops = { .s_ctrl = ov01a1s_set_ctrl, }; static int ov01a1s_init_controls(struct ov01a1s *ov01a1s) { struct v4l2_ctrl_handler *ctrl_hdlr; const struct ov01a1s_mode *cur_mode; s64 exposure_max, h_blank; u32 vblank_min, vblank_max, vblank_default; int size; int ret = 0; ctrl_hdlr = &ov01a1s->ctrl_handler; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) ret = v4l2_ctrl_handler_init(ctrl_hdlr, 9); #else ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8); #endif if (ret) return ret; ctrl_hdlr->lock = &ov01a1s->mutex; cur_mode = ov01a1s->cur_mode; size = ARRAY_SIZE(link_freq_menu_items); ov01a1s->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, &ov01a1s_ctrl_ops, V4L2_CID_LINK_FREQ, size - 1, 0, link_freq_menu_items); if (ov01a1s->link_freq) ov01a1s->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY; ov01a1s->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov01a1s_ctrl_ops, V4L2_CID_PIXEL_RATE, 0, OV01A1S_SCLK, 1, OV01A1S_SCLK); vblank_min = cur_mode->vts_min - cur_mode->height; vblank_max = OV01A1S_VTS_MAX - cur_mode->height; vblank_default = cur_mode->vts_def - cur_mode->height; ov01a1s->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov01a1s_ctrl_ops, V4L2_CID_VBLANK, vblank_min, vblank_max, 1, vblank_default); h_blank = cur_mode->hts - cur_mode->width; ov01a1s->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov01a1s_ctrl_ops, V4L2_CID_HBLANK, h_blank, h_blank, 1, h_blank); if (ov01a1s->hblank) ov01a1s->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) ov01a1s->privacy_status = v4l2_ctrl_new_std(ctrl_hdlr, &ov01a1s_ctrl_ops, V4L2_CID_PRIVACY, 0, 1, 1, !(ov01a1s->status.status)); #endif v4l2_ctrl_new_std(ctrl_hdlr, &ov01a1s_ctrl_ops, V4L2_CID_ANALOGUE_GAIN, OV01A1S_ANAL_GAIN_MIN, OV01A1S_ANAL_GAIN_MAX, OV01A1S_ANAL_GAIN_STEP, OV01A1S_ANAL_GAIN_MIN); v4l2_ctrl_new_std(ctrl_hdlr, &ov01a1s_ctrl_ops, V4L2_CID_DIGITAL_GAIN, OV01A1S_DGTL_GAIN_MIN, OV01A1S_DGTL_GAIN_MAX, OV01A1S_DGTL_GAIN_STEP, OV01A1S_DGTL_GAIN_DEFAULT); exposure_max = cur_mode->vts_def - OV01A1S_EXPOSURE_MAX_MARGIN; ov01a1s->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &ov01a1s_ctrl_ops, V4L2_CID_EXPOSURE, OV01A1S_EXPOSURE_MIN, exposure_max, OV01A1S_EXPOSURE_STEP, exposure_max); v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &ov01a1s_ctrl_ops, V4L2_CID_TEST_PATTERN, ARRAY_SIZE(ov01a1s_test_pattern_menu) - 1, 0, 0, ov01a1s_test_pattern_menu); if (ctrl_hdlr->error) return ctrl_hdlr->error; ov01a1s->sd.ctrl_handler = ctrl_hdlr; return 0; } static void ov01a1s_update_pad_format(const struct ov01a1s_mode *mode, struct v4l2_mbus_framefmt *fmt) { fmt->width = mode->width; fmt->height = mode->height; fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10; fmt->field = V4L2_FIELD_NONE; } #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) static void ov01a1s_vsc_privacy_callback(void *handle, enum vsc_privacy_status status) { struct ov01a1s *ov01a1s = handle; v4l2_ctrl_s_ctrl(ov01a1s->privacy_status, !status); } #endif static int ov01a1s_start_streaming(struct ov01a1s *ov01a1s) { struct i2c_client *client = ov01a1s->client; const struct ov01a1s_reg_list *reg_list; int link_freq_index; int ret = 0; link_freq_index = ov01a1s->cur_mode->link_freq_index; reg_list = &link_freq_configs[link_freq_index].reg_list; ret = ov01a1s_write_reg_list(ov01a1s, reg_list); if (ret) { dev_err(&client->dev, "failed to set plls"); return ret; } reg_list = &ov01a1s->cur_mode->reg_list; ret = ov01a1s_write_reg_list(ov01a1s, reg_list); if (ret) { dev_err(&client->dev, "failed to set mode"); return ret; } ret = __v4l2_ctrl_handler_setup(ov01a1s->sd.ctrl_handler); if (ret) return ret; ret = ov01a1s_write_reg(ov01a1s, OV01A1S_REG_MODE_SELECT, 1, OV01A1S_MODE_STREAMING); if (ret) dev_err(&client->dev, "failed to start streaming"); return ret; } static void ov01a1s_stop_streaming(struct ov01a1s *ov01a1s) { struct i2c_client *client = ov01a1s->client; int ret = 0; ret = ov01a1s_write_reg(ov01a1s, OV01A1S_REG_MODE_SELECT, 1, OV01A1S_MODE_STANDBY); if (ret) dev_err(&client->dev, "failed to stop streaming"); } static int ov01a1s_set_stream(struct v4l2_subdev *sd, int enable) { struct ov01a1s *ov01a1s = to_ov01a1s(sd); struct i2c_client *client = ov01a1s->client; int ret = 0; if (ov01a1s->streaming == enable) return 0; mutex_lock(&ov01a1s->mutex); if (enable) { ret = pm_runtime_get_sync(&client->dev); if (ret < 0) { pm_runtime_put_noidle(&client->dev); mutex_unlock(&ov01a1s->mutex); return ret; } ret = ov01a1s_start_streaming(ov01a1s); if (ret) { enable = 0; ov01a1s_stop_streaming(ov01a1s); pm_runtime_put(&client->dev); } } else { ov01a1s_stop_streaming(ov01a1s); pm_runtime_put(&client->dev); } ov01a1s->streaming = enable; mutex_unlock(&ov01a1s->mutex); return ret; } static int ov01a1s_power_off(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct ov01a1s *ov01a1s = to_ov01a1s(sd); int ret = 0; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) if (ov01a1s->power_type == OV01A1S_USE_INTEL_VSC) { ret = vsc_release_camera_sensor(&ov01a1s->status); if (ret && ret != -EAGAIN) dev_err(dev, "Release VSC failed"); } #endif #if IS_ENABLED(CONFIG_INTEL_SKL_INT3472) if (ov01a1s->power_type == OV01A1S_USE_INT3472) { gpiod_set_value_cansleep(ov01a1s->reset_gpio, 1); gpiod_set_value_cansleep(ov01a1s->powerdown_gpio, 1); if (ov01a1s->avdd) ret = regulator_disable(ov01a1s->avdd); clk_disable_unprepare(ov01a1s->clk); msleep(20); } #elif IS_ENABLED(CONFIG_POWER_CTRL_LOGIC) if (ov01a1s->power_type == OV01A1S_USE_INT3472) ret = power_ctrl_logic_set_power(0); #endif return ret; } static int ov01a1s_power_on(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct ov01a1s *ov01a1s = to_ov01a1s(sd); int ret = 0; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) if (ov01a1s->power_type == OV01A1S_USE_INTEL_VSC) { ov01a1s->conf.lane_num = OV01A1S_DATA_LANES; /* frequency unit 100k */ ov01a1s->conf.freq = OV01A1S_LINK_FREQ_400MHZ / 100000; ret = vsc_acquire_camera_sensor(&ov01a1s->conf, ov01a1s_vsc_privacy_callback, ov01a1s, &ov01a1s->status); if (ret == -EAGAIN) return -EPROBE_DEFER; if (ret) { dev_err(dev, "Acquire VSC failed"); return ret; } if (ov01a1s->privacy_status) __v4l2_ctrl_s_ctrl(ov01a1s->privacy_status, !(ov01a1s->status.status)); return ret; } #endif #if IS_ENABLED(CONFIG_INTEL_SKL_INT3472) if (ov01a1s->power_type == OV01A1S_USE_INT3472) { ret = clk_prepare_enable(ov01a1s->clk); if (ret) return ret; if (ov01a1s->avdd) ret = regulator_enable(ov01a1s->avdd); if (ret) return ret; gpiod_set_value_cansleep(ov01a1s->powerdown_gpio, 0); gpiod_set_value_cansleep(ov01a1s->reset_gpio, 0); msleep(20); } #elif IS_ENABLED(CONFIG_POWER_CTRL_LOGIC) if (ov01a1s->power_type == OV01A1S_USE_INT3472) ret = power_ctrl_logic_set_power(1); #endif return ret; } static int __maybe_unused ov01a1s_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct v4l2_subdev *sd = i2c_get_clientdata(client); struct ov01a1s *ov01a1s = to_ov01a1s(sd); mutex_lock(&ov01a1s->mutex); if (ov01a1s->streaming) ov01a1s_stop_streaming(ov01a1s); mutex_unlock(&ov01a1s->mutex); return 0; } static int __maybe_unused ov01a1s_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct v4l2_subdev *sd = i2c_get_clientdata(client); struct ov01a1s *ov01a1s = to_ov01a1s(sd); int ret = 0; mutex_lock(&ov01a1s->mutex); if (!ov01a1s->streaming) goto exit; ret = ov01a1s_start_streaming(ov01a1s); if (ret) { ov01a1s->streaming = false; ov01a1s_stop_streaming(ov01a1s); } exit: mutex_unlock(&ov01a1s->mutex); return ret; } static int ov01a1s_set_format(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *sd_state, #endif struct v4l2_subdev_format *fmt) { struct ov01a1s *ov01a1s = to_ov01a1s(sd); const struct ov01a1s_mode *mode; s32 vblank_def, h_blank; mode = v4l2_find_nearest_size(supported_modes, ARRAY_SIZE(supported_modes), width, height, fmt->format.width, fmt->format.height); mutex_lock(&ov01a1s->mutex); ov01a1s_update_pad_format(mode, &fmt->format); if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format; #elif LINUX_VERSION_CODE < KERNEL_VERSION(6, 8, 0) *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format; #else *v4l2_subdev_state_get_format(sd_state, fmt->pad) = fmt->format; #endif } else { ov01a1s->cur_mode = mode; __v4l2_ctrl_s_ctrl(ov01a1s->link_freq, mode->link_freq_index); __v4l2_ctrl_s_ctrl_int64(ov01a1s->pixel_rate, OV01A1S_SCLK); /* Update limits and set FPS to default */ vblank_def = mode->vts_def - mode->height; __v4l2_ctrl_modify_range(ov01a1s->vblank, mode->vts_min - mode->height, OV01A1S_VTS_MAX - mode->height, 1, vblank_def); __v4l2_ctrl_s_ctrl(ov01a1s->vblank, vblank_def); h_blank = mode->hts - mode->width; __v4l2_ctrl_modify_range(ov01a1s->hblank, h_blank, h_blank, 1, h_blank); } mutex_unlock(&ov01a1s->mutex); return 0; } static int ov01a1s_get_format(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *sd_state, #endif struct v4l2_subdev_format *fmt) { struct ov01a1s *ov01a1s = to_ov01a1s(sd); mutex_lock(&ov01a1s->mutex); if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) fmt->format = *v4l2_subdev_get_try_format(&ov01a1s->sd, cfg, fmt->pad); #elif LINUX_VERSION_CODE < KERNEL_VERSION(6, 8, 0) fmt->format = *v4l2_subdev_get_try_format(&ov01a1s->sd, sd_state, fmt->pad); #else fmt->format = *v4l2_subdev_state_get_format( sd_state, fmt->pad); #endif else ov01a1s_update_pad_format(ov01a1s->cur_mode, &fmt->format); mutex_unlock(&ov01a1s->mutex); return 0; } static int ov01a1s_enum_mbus_code(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *sd_state, #endif struct v4l2_subdev_mbus_code_enum *code) { if (code->index > 0) return -EINVAL; code->code = MEDIA_BUS_FMT_SGRBG10_1X10; return 0; } static int ov01a1s_enum_frame_size(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *sd_state, #endif struct v4l2_subdev_frame_size_enum *fse) { if (fse->index >= ARRAY_SIZE(supported_modes)) return -EINVAL; if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10) return -EINVAL; fse->min_width = supported_modes[fse->index].width; fse->max_width = fse->min_width; fse->min_height = supported_modes[fse->index].height; fse->max_height = fse->min_height; return 0; } static int ov01a1s_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { struct ov01a1s *ov01a1s = to_ov01a1s(sd); mutex_lock(&ov01a1s->mutex); #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) ov01a1s_update_pad_format(&supported_modes[0], v4l2_subdev_get_try_format(sd, fh->pad, 0)); #elif LINUX_VERSION_CODE < KERNEL_VERSION(6, 8, 0) ov01a1s_update_pad_format(&supported_modes[0], v4l2_subdev_get_try_format(sd, fh->state, 0)); #else ov01a1s_update_pad_format(&supported_modes[0], v4l2_subdev_state_get_format(fh->state, 0)); #endif mutex_unlock(&ov01a1s->mutex); return 0; } static const struct v4l2_subdev_video_ops ov01a1s_video_ops = { .s_stream = ov01a1s_set_stream, }; static const struct v4l2_subdev_pad_ops ov01a1s_pad_ops = { .set_fmt = ov01a1s_set_format, .get_fmt = ov01a1s_get_format, .enum_mbus_code = ov01a1s_enum_mbus_code, .enum_frame_size = ov01a1s_enum_frame_size, }; static const struct v4l2_subdev_ops ov01a1s_subdev_ops = { .video = &ov01a1s_video_ops, .pad = &ov01a1s_pad_ops, }; static const struct media_entity_operations ov01a1s_subdev_entity_ops = { .link_validate = v4l2_subdev_link_validate, }; static const struct v4l2_subdev_internal_ops ov01a1s_internal_ops = { .open = ov01a1s_open, }; static int ov01a1s_identify_module(struct ov01a1s *ov01a1s) { struct i2c_client *client = ov01a1s->client; int ret; u32 val; ret = ov01a1s_read_reg(ov01a1s, OV01A1S_REG_CHIP_ID, 3, &val); if (ret) return ret; if (val != OV01A1S_CHIP_ID) { dev_err(&client->dev, "chip id mismatch: %x!=%x", OV01A1S_CHIP_ID, val); return -ENXIO; } return 0; } static int ov01a1s_check_hwcfg(struct device *dev) { struct v4l2_fwnode_endpoint bus_cfg = { .bus_type = V4L2_MBUS_CSI2_DPHY }; struct fwnode_handle *ep; struct fwnode_handle *fwnode = dev_fwnode(dev); unsigned int i, j; int ret; u32 ext_clk; if (!fwnode) return -ENXIO; ep = fwnode_graph_get_next_endpoint(fwnode, NULL); if (!ep) return -EPROBE_DEFER; ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency", &ext_clk); if (ret) { dev_err(dev, "can't get clock frequency"); return ret; } ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg); fwnode_handle_put(ep); if (ret) return ret; if (bus_cfg.bus.mipi_csi2.num_data_lanes != OV01A1S_DATA_LANES) { dev_err(dev, "number of CSI2 data lanes %d is not supported", bus_cfg.bus.mipi_csi2.num_data_lanes); ret = -EINVAL; goto out_err; } if (!bus_cfg.nr_of_link_frequencies) { dev_err(dev, "no link frequencies defined"); ret = -EINVAL; goto out_err; } for (i = 0; i < ARRAY_SIZE(link_freq_menu_items); i++) { for (j = 0; j < bus_cfg.nr_of_link_frequencies; j++) { if (link_freq_menu_items[i] == bus_cfg.link_frequencies[j]) break; } if (j == bus_cfg.nr_of_link_frequencies) { dev_err(dev, "no link frequency %lld supported", link_freq_menu_items[i]); ret = -EINVAL; goto out_err; } } out_err: v4l2_fwnode_endpoint_free(&bus_cfg); return ret; } #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) static int ov01a1s_remove(struct i2c_client *client) #else static void ov01a1s_remove(struct i2c_client *client) #endif { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct ov01a1s *ov01a1s = to_ov01a1s(sd); v4l2_async_unregister_subdev(sd); media_entity_cleanup(&sd->entity); v4l2_ctrl_handler_free(sd->ctrl_handler); pm_runtime_disable(&client->dev); mutex_destroy(&ov01a1s->mutex); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) return 0; #endif } /* This function tries to get power control resources */ static int ov01a1s_get_pm_resources(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct ov01a1s *ov01a1s = to_ov01a1s(sd); int ret; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) acpi_handle handle = ACPI_HANDLE(dev); struct acpi_handle_list deps; acpi_status status; int i = 0; if (!acpi_has_method(handle, "_DEP")) return false; status = acpi_evaluate_reference(handle, "_DEP", NULL, &deps); if (ACPI_FAILURE(status)) { acpi_handle_debug(handle, "Failed to evaluate _DEP.\n"); return false; } for (i = 0; i < deps.count; i++) { struct acpi_device *dep = NULL; if (deps.handles[i]) #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0) acpi_bus_get_device(deps.handles[i], &dep); #else dep = acpi_fetch_acpi_dev(deps.handles[i]); #endif if (dep && !acpi_match_device_ids(dep, cvfd_ids)) { ov01a1s->power_type = OV01A1S_USE_INTEL_VSC; return 0; } } #endif #if IS_ENABLED(CONFIG_INTEL_SKL_INT3472) ov01a1s->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(ov01a1s->reset_gpio)) return dev_err_probe(dev, PTR_ERR(ov01a1s->reset_gpio), "failed to get reset gpio\n"); ov01a1s->clk = devm_clk_get_optional(dev, NULL); if (IS_ERR(ov01a1s->clk)) return dev_err_probe(dev, PTR_ERR(ov01a1s->clk), "failed to get imaging clock\n"); ov01a1s->avdd = devm_regulator_get_optional(dev, "avdd"); if (IS_ERR(ov01a1s->avdd)) { ret = PTR_ERR(ov01a1s->avdd); ov01a1s->avdd = NULL; if (ret != -ENODEV) return dev_err_probe(dev, ret, "failed to get avdd regulator\n"); } #endif #if IS_ENABLED(CONFIG_INTEL_SKL_INT3472) || IS_ENABLED(CONFIG_POWER_CTRL_LOGIC) ov01a1s->power_type = OV01A1S_USE_INT3472; #endif return 0; } static int ov01a1s_probe(struct i2c_client *client) { struct ov01a1s *ov01a1s; int ret = 0; /* Check HW config */ ret = ov01a1s_check_hwcfg(&client->dev); if (ret) { dev_err(&client->dev, "failed to check hwcfg: %d", ret); return ret; } ov01a1s = devm_kzalloc(&client->dev, sizeof(*ov01a1s), GFP_KERNEL); if (!ov01a1s) return -ENOMEM; ov01a1s->client = client; v4l2_i2c_subdev_init(&ov01a1s->sd, client, &ov01a1s_subdev_ops); ov01a1s_get_pm_resources(&client->dev); ret = ov01a1s_power_on(&client->dev); if (ret) { dev_err_probe(&client->dev, ret, "failed to power on\n"); return ret; } ret = ov01a1s_identify_module(ov01a1s); if (ret) { dev_err(&client->dev, "failed to find sensor: %d", ret); goto probe_error_power_off; } mutex_init(&ov01a1s->mutex); ov01a1s->cur_mode = &supported_modes[0]; ret = ov01a1s_init_controls(ov01a1s); if (ret) { dev_err(&client->dev, "failed to init controls: %d", ret); goto probe_error_v4l2_ctrl_handler_free; } ov01a1s->sd.internal_ops = &ov01a1s_internal_ops; ov01a1s->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; ov01a1s->sd.entity.ops = &ov01a1s_subdev_entity_ops; ov01a1s->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; ov01a1s->pad.flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_pads_init(&ov01a1s->sd.entity, 1, &ov01a1s->pad); if (ret) { dev_err(&client->dev, "failed to init entity pads: %d", ret); goto probe_error_v4l2_ctrl_handler_free; } #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 13, 0) ret = v4l2_async_register_subdev_sensor_common(&ov01a1s->sd); #else ret = v4l2_async_register_subdev_sensor(&ov01a1s->sd); #endif if (ret < 0) { dev_err(&client->dev, "failed to register V4L2 subdev: %d", ret); goto probe_error_media_entity_cleanup; } /* * Device is already turned on by i2c-core with ACPI domain PM. * Enable runtime PM and turn off the device. */ pm_runtime_set_active(&client->dev); pm_runtime_enable(&client->dev); pm_runtime_idle(&client->dev); return 0; probe_error_media_entity_cleanup: media_entity_cleanup(&ov01a1s->sd.entity); probe_error_v4l2_ctrl_handler_free: v4l2_ctrl_handler_free(ov01a1s->sd.ctrl_handler); mutex_destroy(&ov01a1s->mutex); probe_error_power_off: ov01a1s_power_off(&client->dev); return ret; } static const struct dev_pm_ops ov01a1s_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(ov01a1s_suspend, ov01a1s_resume) SET_RUNTIME_PM_OPS(ov01a1s_power_off, ov01a1s_power_on, NULL) }; #ifdef CONFIG_ACPI static const struct acpi_device_id ov01a1s_acpi_ids[] = { { "OVTI01AS" }, {} }; MODULE_DEVICE_TABLE(acpi, ov01a1s_acpi_ids); #endif static struct i2c_driver ov01a1s_i2c_driver = { .driver = { .name = "ov01a1s", .pm = &ov01a1s_pm_ops, .acpi_match_table = ACPI_PTR(ov01a1s_acpi_ids), }, #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) .probe_new = ov01a1s_probe, #else .probe = ov01a1s_probe, #endif .remove = ov01a1s_remove, }; module_i2c_driver(ov01a1s_i2c_driver); MODULE_AUTHOR("Xu, Chongyang "); MODULE_AUTHOR("Lai, Jim "); MODULE_AUTHOR("Qiu, Tianshu "); MODULE_AUTHOR("Shawn Tu "); MODULE_AUTHOR("Bingbu Cao "); MODULE_DESCRIPTION("OmniVision OV01A1S sensor driver"); MODULE_LICENSE("GPL v2"); ipu6-drivers-0~git202411190607.0ad49882/drivers/media/i2c/ov02c10.c000066400000000000000000001167271471702545500233770ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2022 Intel Corporation. #include #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 12, 0) #include #else #include #endif #include #include #include #include #include #include #include #include #include #include #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) #include static const struct acpi_device_id cvfd_ids[] = { { "INTC1059", 0 }, { "INTC1095", 0 }, { "INTC100A", 0 }, { "INTC10CF", 0 }, {} }; #endif #define OV02C10_LINK_FREQ_400MHZ 400000000ULL #define OV02C10_SCLK 80000000LL #define OV02C10_MCLK 19200000 #define OV02C10_DATA_LANES 1 #define OV02C10_RGB_DEPTH 10 #define OV02C10_REG_CHIP_ID 0x300a #define OV02C10_CHIP_ID 0x560243 #define OV02C10_REG_MODE_SELECT 0x0100 #define OV02C10_MODE_STANDBY 0x00 #define OV02C10_MODE_STREAMING 0x01 /* vertical-timings from sensor */ #define OV02C10_REG_VTS 0x380e #define OV02C10_VTS_MAX 0xffff /* Exposure controls from sensor */ #define OV02C10_REG_EXPOSURE 0x3501 #define OV02C10_EXPOSURE_MIN 4 #define OV02C10_EXPOSURE_MAX_MARGIN 8 #define OV02C10_EXPOSURE_STEP 1 /* Analog gain controls from sensor */ #define OV02C10_REG_ANALOG_GAIN 0x3508 #define OV02C10_ANAL_GAIN_MIN 0x10 #define OV02C10_ANAL_GAIN_MAX 0xf8 #define OV02C10_ANAL_GAIN_STEP 1 #define OV02C10_ANAL_GAIN_DEFAULT 0x10 /* Digital gain controls from sensor */ #define OV02C10_REG_DIGILAL_GAIN 0x350a #define OV02C10_DGTL_GAIN_MIN 0x0400 #define OV02C10_DGTL_GAIN_MAX 0x3fff #define OV02C10_DGTL_GAIN_STEP 1 #define OV02C10_DGTL_GAIN_DEFAULT 0x0400 /* Rotate */ #define OV02C10_ROTATE_CONTROL 0x3820 #define OV02C10_ISP_X_WIN_CONTROL 0x3811 #define OV02C10_ISP_Y_WIN_CONTROL 0x3813 #define OV02C10_CONFIG_ROTATE 0x18 /* Test Pattern Control */ #define OV02C10_REG_TEST_PATTERN 0x4503 #define OV02C10_TEST_PATTERN_ENABLE BIT(7) #define OV02C10_TEST_PATTERN_BAR_SHIFT 0 enum { OV02C10_LINK_FREQ_400MHZ_INDEX, }; enum module_names { MODULE_OTHERS = 0, MODULE_2BG203N3, MODULE_CJFME32, MODULE_KBFC645, }; struct ov02c10_reg { u16 address; u8 val; }; struct ov02c10_reg_list { u32 num_of_regs; const struct ov02c10_reg *regs; }; struct ov02c10_link_freq_config { const struct ov02c10_reg_list reg_list; }; struct ov02c10_mode { /* Frame width in pixels */ u32 width; /* Frame height in pixels */ u32 height; /* Horizontal timining size */ u32 hts; /* Default vertical timining size */ u32 vts_def; /* Min vertical timining size */ u32 vts_min; /* Link frequency needed for this resolution */ u32 link_freq_index; /* MIPI lanes used */ u8 mipi_lanes; /* Sensor register settings for this resolution */ const struct ov02c10_reg_list reg_list; }; struct mipi_camera_link_ssdb { u8 version; u8 sku; u8 guid_csi2[16]; u8 devfunction; u8 bus; u32 dphylinkenfuses; u32 clockdiv; u8 link; u8 lanes; u32 csiparams[10]; u32 maxlanespeed; u8 sensorcalibfileidx; u8 sensorcalibfileidxInMBZ[3]; u8 romtype; u8 vcmtype; u8 platforminfo; u8 platformsubinfo; u8 flash; u8 privacyled; u8 degree; u8 mipilinkdefined; u32 mclkspeed; u8 controllogicid; u8 reserved1[3]; u8 mclkport; u8 reserved2[13]; } __packed; /* * 822ace8f-2814-4174-a56b-5f029fe079ee * This _DSM GUID returns a string from the sensor device, which acts as a * module identifier. */ static const guid_t cio2_sensor_module_guid = GUID_INIT(0x822ace8f, 0x2814, 0x4174, 0xa5, 0x6b, 0x5f, 0x02, 0x9f, 0xe0, 0x79, 0xee); static const struct ov02c10_reg mipi_data_rate_960mbps[] = { }; static const struct ov02c10_reg sensor_1928x1092_1lane_30fps_setting[] = { {0x0301, 0x08}, {0x0303, 0x06}, {0x0304, 0x01}, {0x0305, 0xe0}, {0x0313, 0x40}, {0x031c, 0x4f}, {0x301b, 0xd2}, {0x3020, 0x97}, {0x3022, 0x01}, {0x3026, 0xb4}, {0x3027, 0xe1}, {0x303b, 0x00}, {0x303c, 0x4f}, {0x303d, 0xe6}, {0x303e, 0x00}, {0x303f, 0x03}, {0x3021, 0x23}, {0x3501, 0x04}, {0x3502, 0x6c}, {0x3504, 0x0c}, {0x3507, 0x00}, {0x3508, 0x08}, {0x3509, 0x00}, {0x350a, 0x01}, {0x350b, 0x00}, {0x350c, 0x41}, {0x3600, 0x84}, {0x3603, 0x08}, {0x3610, 0x57}, {0x3611, 0x1b}, {0x3613, 0x78}, {0x3623, 0x00}, {0x3632, 0xa0}, {0x3642, 0xe8}, {0x364c, 0x70}, {0x365f, 0x0f}, {0x3708, 0x30}, {0x3714, 0x24}, {0x3725, 0x02}, {0x3737, 0x08}, {0x3739, 0x28}, {0x3749, 0x32}, {0x374a, 0x32}, {0x374b, 0x32}, {0x374c, 0x32}, {0x374d, 0x81}, {0x374e, 0x81}, {0x374f, 0x81}, {0x3752, 0x36}, {0x3753, 0x36}, {0x3754, 0x36}, {0x3761, 0x00}, {0x376c, 0x81}, {0x3774, 0x18}, {0x3776, 0x08}, {0x377c, 0x81}, {0x377d, 0x81}, {0x377e, 0x81}, {0x37a0, 0x44}, {0x37a6, 0x44}, {0x37aa, 0x0d}, {0x37ae, 0x00}, {0x37cb, 0x03}, {0x37cc, 0x01}, {0x37d8, 0x02}, {0x37d9, 0x10}, {0x37e1, 0x10}, {0x37e2, 0x18}, {0x37e3, 0x08}, {0x37e4, 0x08}, {0x37e5, 0x02}, {0x37e6, 0x08}, // 1928x1092 {0x3800, 0x00}, {0x3801, 0x00}, {0x3802, 0x00}, {0x3803, 0x00}, {0x3804, 0x07}, {0x3805, 0x8f}, {0x3806, 0x04}, {0x3807, 0x47}, {0x3808, 0x07}, {0x3809, 0x88}, {0x380a, 0x04}, {0x380b, 0x44}, {0x380c, 0x08}, {0x380d, 0xe8}, {0x380e, 0x04}, {0x380f, 0x8c}, {0x3810, 0x00}, {0x3811, 0x03}, {0x3812, 0x00}, {0x3813, 0x03}, {0x3814, 0x01}, {0x3815, 0x01}, {0x3816, 0x01}, {0x3817, 0x01}, {0x3820, 0xa8}, {0x3821, 0x00}, {0x3822, 0x80}, {0x3823, 0x08}, {0x3824, 0x00}, {0x3825, 0x20}, {0x3826, 0x00}, {0x3827, 0x08}, {0x382a, 0x00}, {0x382b, 0x08}, {0x382d, 0x00}, {0x382e, 0x00}, {0x382f, 0x23}, {0x3834, 0x00}, {0x3839, 0x00}, {0x383a, 0xd1}, {0x383e, 0x03}, {0x393d, 0x29}, {0x393f, 0x6e}, {0x394b, 0x06}, {0x394c, 0x06}, {0x394d, 0x08}, {0x394e, 0x0b}, {0x394f, 0x01}, {0x3950, 0x01}, {0x3951, 0x01}, {0x3952, 0x01}, {0x3953, 0x01}, {0x3954, 0x01}, {0x3955, 0x01}, {0x3956, 0x01}, {0x3957, 0x0e}, {0x3958, 0x08}, {0x3959, 0x08}, {0x395a, 0x08}, {0x395b, 0x13}, {0x395c, 0x09}, {0x395d, 0x05}, {0x395e, 0x02}, {0x395f, 0x00}, {0x395f, 0x00}, {0x3960, 0x00}, {0x3961, 0x00}, {0x3962, 0x00}, {0x3963, 0x00}, {0x3964, 0x00}, {0x3965, 0x00}, {0x3966, 0x00}, {0x3967, 0x00}, {0x3968, 0x01}, {0x3969, 0x01}, {0x396a, 0x01}, {0x396b, 0x01}, {0x396c, 0x10}, {0x396d, 0xf0}, {0x396e, 0x11}, {0x396f, 0x00}, {0x3970, 0x37}, {0x3971, 0x37}, {0x3972, 0x37}, {0x3973, 0x37}, {0x3974, 0x00}, {0x3975, 0x3c}, {0x3976, 0x3c}, {0x3977, 0x3c}, {0x3978, 0x3c}, {0x3c00, 0x0f}, {0x3c20, 0x01}, {0x3c21, 0x08}, {0x3f00, 0x8b}, {0x3f02, 0x0f}, {0x4000, 0xc3}, {0x4001, 0xe0}, {0x4002, 0x00}, {0x4003, 0x40}, {0x4008, 0x04}, {0x4009, 0x23}, {0x400a, 0x04}, {0x400b, 0x01}, {0x4077, 0x06}, {0x4078, 0x00}, {0x4079, 0x1a}, {0x407a, 0x7f}, {0x407b, 0x01}, {0x4080, 0x03}, {0x4081, 0x84}, {0x4308, 0x03}, {0x4309, 0xff}, {0x430d, 0x00}, {0x4806, 0x00}, {0x4813, 0x00}, {0x4837, 0x10}, {0x4857, 0x05}, {0x4500, 0x07}, {0x4501, 0x00}, {0x4503, 0x00}, {0x450a, 0x04}, {0x450e, 0x00}, {0x450f, 0x00}, {0x4800, 0x24}, {0x4900, 0x00}, {0x4901, 0x00}, {0x4902, 0x01}, {0x5000, 0xf5}, {0x5001, 0x50}, {0x5006, 0x00}, {0x5080, 0x40}, {0x5181, 0x2b}, {0x5202, 0xa3}, {0x5206, 0x01}, {0x5207, 0x00}, {0x520a, 0x01}, {0x520b, 0x00}, {0x365d, 0x00}, {0x4815, 0x40}, {0x4816, 0x12}, {0x4f00, 0x01}, // plls {0x0303, 0x05}, {0x0305, 0x90}, {0x0316, 0x90}, {0x3016, 0x12}, }; static const struct ov02c10_reg sensor_1928x1092_2lane_30fps_setting[] = { {0x0301, 0x08}, {0x0303, 0x06}, {0x0304, 0x01}, {0x0305, 0xe0}, {0x0313, 0x40}, {0x031c, 0x4f}, {0x301b, 0xf0}, {0x3020, 0x97}, {0x3022, 0x01}, {0x3026, 0xb4}, {0x3027, 0xf1}, {0x303b, 0x00}, {0x303c, 0x4f}, {0x303d, 0xe6}, {0x303e, 0x00}, {0x303f, 0x03}, {0x3021, 0x23}, {0x3501, 0x04}, {0x3502, 0x6c}, {0x3504, 0x0c}, {0x3507, 0x00}, {0x3508, 0x08}, {0x3509, 0x00}, {0x350a, 0x01}, {0x350b, 0x00}, {0x350c, 0x41}, {0x3600, 0x84}, {0x3603, 0x08}, {0x3610, 0x57}, {0x3611, 0x1b}, {0x3613, 0x78}, {0x3623, 0x00}, {0x3632, 0xa0}, {0x3642, 0xe8}, {0x364c, 0x70}, {0x365f, 0x0f}, {0x3708, 0x30}, {0x3714, 0x24}, {0x3725, 0x02}, {0x3737, 0x08}, {0x3739, 0x28}, {0x3749, 0x32}, {0x374a, 0x32}, {0x374b, 0x32}, {0x374c, 0x32}, {0x374d, 0x81}, {0x374e, 0x81}, {0x374f, 0x81}, {0x3752, 0x36}, {0x3753, 0x36}, {0x3754, 0x36}, {0x3761, 0x00}, {0x376c, 0x81}, {0x3774, 0x18}, {0x3776, 0x08}, {0x377c, 0x81}, {0x377d, 0x81}, {0x377e, 0x81}, {0x37a0, 0x44}, {0x37a6, 0x44}, {0x37aa, 0x0d}, {0x37ae, 0x00}, {0x37cb, 0x03}, {0x37cc, 0x01}, {0x37d8, 0x02}, {0x37d9, 0x10}, {0x37e1, 0x10}, {0x37e2, 0x18}, {0x37e3, 0x08}, {0x37e4, 0x08}, {0x37e5, 0x02}, {0x37e6, 0x08}, // 1928x1092 {0x3800, 0x00}, {0x3801, 0x00}, {0x3802, 0x00}, {0x3803, 0x00}, {0x3804, 0x07}, {0x3805, 0x8f}, {0x3806, 0x04}, {0x3807, 0x47}, {0x3808, 0x07}, {0x3809, 0x88}, {0x380a, 0x04}, {0x380b, 0x44}, {0x380c, 0x04}, {0x380d, 0x74}, {0x380e, 0x09}, {0x380f, 0x18}, {0x3810, 0x00}, {0x3811, 0x03}, {0x3812, 0x00}, {0x3813, 0x03}, {0x3814, 0x01}, {0x3815, 0x01}, {0x3816, 0x01}, {0x3817, 0x01}, {0x3820, 0xa8}, {0x3821, 0x00}, {0x3822, 0x80}, {0x3823, 0x08}, {0x3824, 0x00}, {0x3825, 0x20}, {0x3826, 0x00}, {0x3827, 0x08}, {0x382a, 0x00}, {0x382b, 0x08}, {0x382d, 0x00}, {0x382e, 0x00}, {0x382f, 0x23}, {0x3834, 0x00}, {0x3839, 0x00}, {0x383a, 0xd1}, {0x383e, 0x03}, {0x393d, 0x29}, {0x393f, 0x6e}, {0x394b, 0x06}, {0x394c, 0x06}, {0x394d, 0x08}, {0x394e, 0x0a}, {0x394f, 0x01}, {0x3950, 0x01}, {0x3951, 0x01}, {0x3952, 0x01}, {0x3953, 0x01}, {0x3954, 0x01}, {0x3955, 0x01}, {0x3956, 0x01}, {0x3957, 0x0e}, {0x3958, 0x08}, {0x3959, 0x08}, {0x395a, 0x08}, {0x395b, 0x13}, {0x395c, 0x09}, {0x395d, 0x05}, {0x395e, 0x02}, {0x395f, 0x00}, {0x395f, 0x00}, {0x3960, 0x00}, {0x3961, 0x00}, {0x3962, 0x00}, {0x3963, 0x00}, {0x3964, 0x00}, {0x3965, 0x00}, {0x3966, 0x00}, {0x3967, 0x00}, {0x3968, 0x01}, {0x3969, 0x01}, {0x396a, 0x01}, {0x396b, 0x01}, {0x396c, 0x10}, {0x396d, 0xf0}, {0x396e, 0x11}, {0x396f, 0x00}, {0x3970, 0x37}, {0x3971, 0x37}, {0x3972, 0x37}, {0x3973, 0x37}, {0x3974, 0x00}, {0x3975, 0x3c}, {0x3976, 0x3c}, {0x3977, 0x3c}, {0x3978, 0x3c}, {0x3c00, 0x0f}, {0x3c20, 0x01}, {0x3c21, 0x08}, {0x3f00, 0x8b}, {0x3f02, 0x0f}, {0x4000, 0xc3}, {0x4001, 0xe0}, {0x4002, 0x00}, {0x4003, 0x40}, {0x4008, 0x04}, {0x4009, 0x23}, {0x400a, 0x04}, {0x400b, 0x01}, {0x4041, 0x20}, {0x4077, 0x06}, {0x4078, 0x00}, {0x4079, 0x1a}, {0x407a, 0x7f}, {0x407b, 0x01}, {0x4080, 0x03}, {0x4081, 0x84}, {0x4308, 0x03}, {0x4309, 0xff}, {0x430d, 0x00}, {0x4806, 0x00}, {0x4813, 0x00}, {0x4837, 0x10}, {0x4857, 0x05}, {0x4884, 0x04}, {0x4500, 0x07}, {0x4501, 0x00}, {0x4503, 0x00}, {0x450a, 0x04}, {0x450e, 0x00}, {0x450f, 0x00}, {0x4800, 0x64}, {0x4900, 0x00}, {0x4901, 0x00}, {0x4902, 0x01}, {0x4d00, 0x03}, {0x4d01, 0xd8}, {0x4d02, 0xba}, {0x4d03, 0xa0}, {0x4d04, 0xb7}, {0x4d05, 0x34}, {0x4d0d, 0x00}, {0x5000, 0xfd}, {0x5001, 0x50}, {0x5006, 0x00}, {0x5080, 0x40}, {0x5181, 0x2b}, {0x5202, 0xa3}, {0x5206, 0x01}, {0x5207, 0x00}, {0x520a, 0x01}, {0x520b, 0x00}, {0x365d, 0x00}, {0x4815, 0x40}, {0x4816, 0x12}, {0x481f, 0x30}, {0x4f00, 0x01}, // plls {0x0303, 0x05}, {0x0305, 0x90}, {0x0316, 0x90}, {0x3016, 0x32}, }; static const char * const ov02c10_test_pattern_menu[] = { "Disabled", "Color Bar", "Top-Bottom Darker Color Bar", "Right-Left Darker Color Bar", "Color Bar type 4", }; static const char * const ov02c10_module_names[] = { [MODULE_OTHERS] = "", [MODULE_2BG203N3] = "2BG203N3", [MODULE_CJFME32] = "CJFME32", [MODULE_KBFC645] = "KBFC645", }; static const s64 link_freq_menu_items[] = { OV02C10_LINK_FREQ_400MHZ, }; static const struct ov02c10_link_freq_config link_freq_configs[] = { [OV02C10_LINK_FREQ_400MHZ_INDEX] = { .reg_list = { .num_of_regs = ARRAY_SIZE(mipi_data_rate_960mbps), .regs = mipi_data_rate_960mbps, } }, }; static const struct ov02c10_mode supported_modes[] = { { .width = 1928, .height = 1092, .hts = 2280, .vts_def = 1164, .vts_min = 1164, .mipi_lanes = 1, .reg_list = { .num_of_regs = ARRAY_SIZE(sensor_1928x1092_1lane_30fps_setting), .regs = sensor_1928x1092_1lane_30fps_setting, }, .link_freq_index = OV02C10_LINK_FREQ_400MHZ_INDEX, }, { .width = 1928, .height = 1092, .hts = 1140, .vts_def = 2328, .vts_min = 2328, .mipi_lanes = 2, .reg_list = { .num_of_regs = ARRAY_SIZE(sensor_1928x1092_2lane_30fps_setting), .regs = sensor_1928x1092_2lane_30fps_setting, }, .link_freq_index = OV02C10_LINK_FREQ_400MHZ_INDEX, }, }; struct ov02c10 { struct v4l2_subdev sd; struct media_pad pad; struct v4l2_ctrl_handler ctrl_handler; /* V4L2 Controls */ struct v4l2_ctrl *link_freq; struct v4l2_ctrl *pixel_rate; struct v4l2_ctrl *vblank; struct v4l2_ctrl *hblank; struct v4l2_ctrl *exposure; struct clk *img_clk; struct regulator *avdd; struct gpio_desc *reset; struct gpio_desc *handshake; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) struct vsc_mipi_config conf; struct vsc_camera_status status; struct v4l2_ctrl *privacy_status; #endif /* Current mode */ const struct ov02c10_mode *cur_mode; /* To serialize asynchronus callbacks */ struct mutex mutex; /* MIPI lanes used */ u8 mipi_lanes; /* Streaming on/off */ bool streaming; /* Module name index */ u8 module_name_index; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) bool use_intel_vsc; #endif }; static inline struct ov02c10 *to_ov02c10(struct v4l2_subdev *subdev) { return container_of(subdev, struct ov02c10, sd); } static int ov02c10_read_reg(struct ov02c10 *ov02c10, u16 reg, u16 len, u32 *val) { struct i2c_client *client = v4l2_get_subdevdata(&ov02c10->sd); struct i2c_msg msgs[2]; u8 addr_buf[2]; u8 data_buf[4] = {0}; int ret = 0; if (len > sizeof(data_buf)) return -EINVAL; put_unaligned_be16(reg, addr_buf); msgs[0].addr = client->addr; msgs[0].flags = 0; msgs[0].len = sizeof(addr_buf); msgs[0].buf = addr_buf; msgs[1].addr = client->addr; msgs[1].flags = I2C_M_RD; msgs[1].len = len; msgs[1].buf = &data_buf[sizeof(data_buf) - len]; ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); if (ret != ARRAY_SIZE(msgs)) return ret < 0 ? ret : -EIO; *val = get_unaligned_be32(data_buf); return 0; } static int ov02c10_write_reg(struct ov02c10 *ov02c10, u16 reg, u16 len, u32 val) { struct i2c_client *client = v4l2_get_subdevdata(&ov02c10->sd); u8 buf[6]; int ret = 0; if (len > 4) return -EINVAL; put_unaligned_be16(reg, buf); put_unaligned_be32(val << 8 * (4 - len), buf + 2); ret = i2c_master_send(client, buf, len + 2); if (ret != len + 2) return ret < 0 ? ret : -EIO; return 0; } static int ov02c10_write_reg_list(struct ov02c10 *ov02c10, const struct ov02c10_reg_list *r_list) { struct i2c_client *client = v4l2_get_subdevdata(&ov02c10->sd); unsigned int i; int ret = 0; for (i = 0; i < r_list->num_of_regs; i++) { ret = ov02c10_write_reg(ov02c10, r_list->regs[i].address, 1, r_list->regs[i].val); if (ret) { dev_err_ratelimited(&client->dev, "write reg 0x%4.4x return err = %d", r_list->regs[i].address, ret); return ret; } } return 0; } static int ov02c10_test_pattern(struct ov02c10 *ov02c10, u32 pattern) { if (pattern) pattern = (pattern - 1) << OV02C10_TEST_PATTERN_BAR_SHIFT | OV02C10_TEST_PATTERN_ENABLE; return ov02c10_write_reg(ov02c10, OV02C10_REG_TEST_PATTERN, 1, pattern); } static int ov02c10_set_ctrl(struct v4l2_ctrl *ctrl) { struct ov02c10 *ov02c10 = container_of(ctrl->handler, struct ov02c10, ctrl_handler); struct i2c_client *client = v4l2_get_subdevdata(&ov02c10->sd); s64 exposure_max; int ret = 0; /* Propagate change of current control to all related controls */ if (ctrl->id == V4L2_CID_VBLANK) { /* Update max exposure while meeting expected vblanking */ exposure_max = ov02c10->cur_mode->height + ctrl->val - OV02C10_EXPOSURE_MAX_MARGIN; __v4l2_ctrl_modify_range(ov02c10->exposure, ov02c10->exposure->minimum, exposure_max, ov02c10->exposure->step, exposure_max); } /* V4L2 controls values will be applied only when power is already up */ if (!pm_runtime_get_if_in_use(&client->dev)) return 0; switch (ctrl->id) { case V4L2_CID_ANALOGUE_GAIN: ret = ov02c10_write_reg(ov02c10, OV02C10_REG_ANALOG_GAIN, 2, ctrl->val << 4); break; case V4L2_CID_DIGITAL_GAIN: ret = ov02c10_write_reg(ov02c10, OV02C10_REG_DIGILAL_GAIN, 3, ctrl->val << 6); break; case V4L2_CID_EXPOSURE: ret = ov02c10_write_reg(ov02c10, OV02C10_REG_EXPOSURE, 2, ctrl->val); break; case V4L2_CID_VBLANK: ret = ov02c10_write_reg(ov02c10, OV02C10_REG_VTS, 2, ov02c10->cur_mode->height + ctrl->val); break; case V4L2_CID_TEST_PATTERN: ret = ov02c10_test_pattern(ov02c10, ctrl->val); break; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) case V4L2_CID_PRIVACY: dev_dbg(&client->dev, "set privacy to %d", ctrl->val); break; #endif default: ret = -EINVAL; break; } pm_runtime_put(&client->dev); return ret; } static const struct v4l2_ctrl_ops ov02c10_ctrl_ops = { .s_ctrl = ov02c10_set_ctrl, }; static int ov02c10_init_controls(struct ov02c10 *ov02c10) { struct v4l2_ctrl_handler *ctrl_hdlr; const struct ov02c10_mode *cur_mode; s64 exposure_max, h_blank; u32 vblank_min, vblank_max, vblank_default; int size; int ret = 0; ctrl_hdlr = &ov02c10->ctrl_handler; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) ret = v4l2_ctrl_handler_init(ctrl_hdlr, 9); #else ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8); #endif if (ret) return ret; ctrl_hdlr->lock = &ov02c10->mutex; cur_mode = ov02c10->cur_mode; size = ARRAY_SIZE(link_freq_menu_items); ov02c10->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, &ov02c10_ctrl_ops, V4L2_CID_LINK_FREQ, size - 1, 0, link_freq_menu_items); if (ov02c10->link_freq) ov02c10->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY; ov02c10->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov02c10_ctrl_ops, V4L2_CID_PIXEL_RATE, 0, OV02C10_SCLK, 1, OV02C10_SCLK); vblank_min = cur_mode->vts_min - cur_mode->height; vblank_max = OV02C10_VTS_MAX - cur_mode->height; vblank_default = cur_mode->vts_def - cur_mode->height; ov02c10->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov02c10_ctrl_ops, V4L2_CID_VBLANK, vblank_min, vblank_max, 1, vblank_default); h_blank = cur_mode->hts - cur_mode->width; ov02c10->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov02c10_ctrl_ops, V4L2_CID_HBLANK, h_blank, h_blank, 1, h_blank); if (ov02c10->hblank) ov02c10->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) ov02c10->privacy_status = v4l2_ctrl_new_std(ctrl_hdlr, &ov02c10_ctrl_ops, V4L2_CID_PRIVACY, 0, 1, 1, !(ov02c10->status.status)); #endif v4l2_ctrl_new_std(ctrl_hdlr, &ov02c10_ctrl_ops, V4L2_CID_ANALOGUE_GAIN, OV02C10_ANAL_GAIN_MIN, OV02C10_ANAL_GAIN_MAX, OV02C10_ANAL_GAIN_STEP, OV02C10_ANAL_GAIN_DEFAULT); v4l2_ctrl_new_std(ctrl_hdlr, &ov02c10_ctrl_ops, V4L2_CID_DIGITAL_GAIN, OV02C10_DGTL_GAIN_MIN, OV02C10_DGTL_GAIN_MAX, OV02C10_DGTL_GAIN_STEP, OV02C10_DGTL_GAIN_DEFAULT); exposure_max = cur_mode->vts_def - OV02C10_EXPOSURE_MAX_MARGIN; ov02c10->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &ov02c10_ctrl_ops, V4L2_CID_EXPOSURE, OV02C10_EXPOSURE_MIN, exposure_max, OV02C10_EXPOSURE_STEP, exposure_max); v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &ov02c10_ctrl_ops, V4L2_CID_TEST_PATTERN, ARRAY_SIZE(ov02c10_test_pattern_menu) - 1, 0, 0, ov02c10_test_pattern_menu); if (ctrl_hdlr->error) return ctrl_hdlr->error; ov02c10->sd.ctrl_handler = ctrl_hdlr; return 0; } static void ov02c10_update_pad_format(const struct ov02c10_mode *mode, struct v4l2_mbus_framefmt *fmt) { fmt->width = mode->width; fmt->height = mode->height; fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10; fmt->field = V4L2_FIELD_NONE; } static int ov02c10_start_streaming(struct ov02c10 *ov02c10) { struct i2c_client *client = v4l2_get_subdevdata(&ov02c10->sd); const struct ov02c10_reg_list *reg_list; int link_freq_index; int ret = 0; u32 rotate, shift_x, shift_y; link_freq_index = ov02c10->cur_mode->link_freq_index; reg_list = &link_freq_configs[link_freq_index].reg_list; ret = ov02c10_write_reg_list(ov02c10, reg_list); if (ret) { dev_err(&client->dev, "failed to set plls"); return ret; } reg_list = &ov02c10->cur_mode->reg_list; ret = ov02c10_write_reg_list(ov02c10, reg_list); if (ret) { dev_err(&client->dev, "failed to set mode"); return ret; } ret = __v4l2_ctrl_handler_setup(ov02c10->sd.ctrl_handler); if (ret) return ret; switch (ov02c10->module_name_index) { case MODULE_CJFME32: case MODULE_2BG203N3: case MODULE_KBFC645: ret = ov02c10_read_reg(ov02c10, OV02C10_ROTATE_CONTROL, 1, &rotate); if (ret) dev_err(&client->dev, "read ROTATE_CONTROL fail: %d", ret); ret = ov02c10_read_reg(ov02c10, OV02C10_ISP_X_WIN_CONTROL, 1, &shift_x); if (ret) dev_err(&client->dev, "read ISP_X_WIN_CONTROL fail: %d", ret); ret = ov02c10_read_reg(ov02c10, OV02C10_ISP_Y_WIN_CONTROL, 1, &shift_y); if (ret) dev_err(&client->dev, "read ISP_Y_WIN_CONTROL fail: %d", ret); rotate ^= OV02C10_CONFIG_ROTATE; shift_x = shift_x - 1; shift_y = shift_y - 1; ret = ov02c10_write_reg(ov02c10, OV02C10_ROTATE_CONTROL, 1, rotate); if (ret) dev_err(&client->dev, "write ROTATE_CONTROL fail: %d", ret); ret = ov02c10_write_reg(ov02c10, OV02C10_ISP_X_WIN_CONTROL, 1, shift_x); if (ret) dev_err(&client->dev, "write ISP_X_WIN_CONTROL fail: %d", ret); ret = ov02c10_write_reg(ov02c10, OV02C10_ISP_Y_WIN_CONTROL, 1, shift_y); if (ret) dev_err(&client->dev, "write ISP_Y_WIN_CONTROL fail: %d", ret); break; } ret = ov02c10_write_reg(ov02c10, OV02C10_REG_MODE_SELECT, 1, OV02C10_MODE_STREAMING); if (ret) dev_err(&client->dev, "failed to start streaming"); return ret; } static void ov02c10_stop_streaming(struct ov02c10 *ov02c10) { struct i2c_client *client = v4l2_get_subdevdata(&ov02c10->sd); int ret = 0; ret = ov02c10_write_reg(ov02c10, OV02C10_REG_MODE_SELECT, 1, OV02C10_MODE_STANDBY); if (ret) dev_err(&client->dev, "failed to stop streaming"); } static int ov02c10_set_stream(struct v4l2_subdev *sd, int enable) { struct ov02c10 *ov02c10 = to_ov02c10(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); int ret = 0; if (ov02c10->streaming == enable) return 0; mutex_lock(&ov02c10->mutex); if (enable) { ret = pm_runtime_get_sync(&client->dev); if (ret < 0) { pm_runtime_put_noidle(&client->dev); mutex_unlock(&ov02c10->mutex); return ret; } ret = ov02c10_start_streaming(ov02c10); if (ret) { enable = 0; ov02c10_stop_streaming(ov02c10); pm_runtime_put(&client->dev); } } else { ov02c10_stop_streaming(ov02c10); pm_runtime_put(&client->dev); } ov02c10->streaming = enable; mutex_unlock(&ov02c10->mutex); return ret; } /* This function tries to get power control resources */ static int ov02c10_get_pm_resources(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct ov02c10 *ov02c10 = to_ov02c10(sd); int ret; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) acpi_handle handle = ACPI_HANDLE(dev); struct acpi_handle_list deps; acpi_status status; int i = 0; ov02c10->use_intel_vsc = false; if (!acpi_has_method(handle, "_DEP")) return false; status = acpi_evaluate_reference(handle, "_DEP", NULL, &deps); if (ACPI_FAILURE(status)) { acpi_handle_debug(handle, "Failed to evaluate _DEP.\n"); return false; } for (i = 0; i < deps.count; i++) { struct acpi_device *dep = NULL; if (deps.handles[i]) #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0) acpi_bus_get_device(deps.handles[i], &dep); #else dep = acpi_fetch_acpi_dev(deps.handles[i]); #endif if (dep && !acpi_match_device_ids(dep, cvfd_ids)) { ov02c10->use_intel_vsc = true; return 0; } } #endif ov02c10->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(ov02c10->reset)) return dev_err_probe(dev, PTR_ERR(ov02c10->reset), "failed to get reset gpio\n"); ov02c10->handshake = devm_gpiod_get_optional(dev, "handshake", GPIOD_OUT_LOW); if (IS_ERR(ov02c10->handshake)) return dev_err_probe(dev, PTR_ERR(ov02c10->handshake), "failed to get handshake gpio\n"); ov02c10->img_clk = devm_clk_get_optional(dev, NULL); if (IS_ERR(ov02c10->img_clk)) return dev_err_probe(dev, PTR_ERR(ov02c10->img_clk), "failed to get imaging clock\n"); ov02c10->avdd = devm_regulator_get_optional(dev, "avdd"); if (IS_ERR(ov02c10->avdd)) { ret = PTR_ERR(ov02c10->avdd); ov02c10->avdd = NULL; if (ret != -ENODEV) return dev_err_probe(dev, ret, "failed to get avdd regulator\n"); } return 0; } #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) static void ov02c10_vsc_privacy_callback(void *handle, enum vsc_privacy_status status) { struct ov02c10 *ov02c10 = handle; v4l2_ctrl_s_ctrl(ov02c10->privacy_status, !status); } #endif static int ov02c10_power_off(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct ov02c10 *ov02c10 = to_ov02c10(sd); int ret = 0; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) if (ov02c10->use_intel_vsc) { ret = vsc_release_camera_sensor(&ov02c10->status); if (ret && ret != -EAGAIN) dev_err(dev, "Release VSC failed"); return ret; } #endif gpiod_set_value_cansleep(ov02c10->reset, 1); gpiod_set_value_cansleep(ov02c10->handshake, 0); if (ov02c10->avdd) ret = regulator_disable(ov02c10->avdd); clk_disable_unprepare(ov02c10->img_clk); return ret; } static int ov02c10_power_on(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct ov02c10 *ov02c10 = to_ov02c10(sd); int ret; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) if (ov02c10->use_intel_vsc) { ov02c10->conf.lane_num = ov02c10->mipi_lanes; /* frequency unit 100k */ ov02c10->conf.freq = OV02C10_LINK_FREQ_400MHZ / 100000; ret = vsc_acquire_camera_sensor(&ov02c10->conf, ov02c10_vsc_privacy_callback, ov02c10, &ov02c10->status); if (ret == -EAGAIN) return -EPROBE_DEFER; if (ret) { dev_err(dev, "Acquire VSC failed"); return ret; } if (ov02c10->privacy_status) __v4l2_ctrl_s_ctrl(ov02c10->privacy_status, !(ov02c10->status.status)); return ret; } #endif ret = clk_prepare_enable(ov02c10->img_clk); if (ret < 0) { dev_err(dev, "failed to enable imaging clock: %d", ret); return ret; } if (ov02c10->avdd) { ret = regulator_enable(ov02c10->avdd); if (ret < 0) { dev_err(dev, "failed to enable avdd: %d", ret); clk_disable_unprepare(ov02c10->img_clk); return ret; } } gpiod_set_value_cansleep(ov02c10->handshake, 1); gpiod_set_value_cansleep(ov02c10->reset, 0); /* Lattice MIPI aggregator with some version FW needs longer delay after handshake triggered. We set 25ms as a safe value and wait for a stable version FW. */ msleep_interruptible(25); return ret; } static int __maybe_unused ov02c10_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct v4l2_subdev *sd = i2c_get_clientdata(client); struct ov02c10 *ov02c10 = to_ov02c10(sd); mutex_lock(&ov02c10->mutex); if (ov02c10->streaming) ov02c10_stop_streaming(ov02c10); mutex_unlock(&ov02c10->mutex); return 0; } static int __maybe_unused ov02c10_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct v4l2_subdev *sd = i2c_get_clientdata(client); struct ov02c10 *ov02c10 = to_ov02c10(sd); int ret = 0; mutex_lock(&ov02c10->mutex); if (!ov02c10->streaming) goto exit; ret = ov02c10_start_streaming(ov02c10); if (ret) { ov02c10->streaming = false; ov02c10_stop_streaming(ov02c10); } exit: mutex_unlock(&ov02c10->mutex); return ret; } static int ov02c10_set_format(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *sd_state, #endif struct v4l2_subdev_format *fmt) { struct ov02c10 *ov02c10 = to_ov02c10(sd); const struct ov02c10_mode *mode; s32 vblank_def, h_blank; if (ov02c10->mipi_lanes == 1) mode = &supported_modes[0]; else if (ov02c10->mipi_lanes == 2) mode = &supported_modes[1]; else { mode = v4l2_find_nearest_size(supported_modes, ARRAY_SIZE(supported_modes), width, height, fmt->format.width, fmt->format.height); } mutex_lock(&ov02c10->mutex); ov02c10_update_pad_format(mode, &fmt->format); if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format; #elif LINUX_VERSION_CODE < KERNEL_VERSION(6, 8, 0) *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format; #else *v4l2_subdev_state_get_format(sd_state, fmt->pad) = fmt->format; #endif } else { ov02c10->cur_mode = mode; __v4l2_ctrl_s_ctrl(ov02c10->link_freq, mode->link_freq_index); __v4l2_ctrl_s_ctrl_int64(ov02c10->pixel_rate, OV02C10_SCLK); /* Update limits and set FPS to default */ vblank_def = mode->vts_def - mode->height; __v4l2_ctrl_modify_range(ov02c10->vblank, mode->vts_min - mode->height, OV02C10_VTS_MAX - mode->height, 1, vblank_def); __v4l2_ctrl_s_ctrl(ov02c10->vblank, vblank_def); h_blank = mode->hts - mode->width; __v4l2_ctrl_modify_range(ov02c10->hblank, h_blank, h_blank, 1, h_blank); } mutex_unlock(&ov02c10->mutex); return 0; } static int ov02c10_get_format(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *sd_state, #endif struct v4l2_subdev_format *fmt) { struct ov02c10 *ov02c10 = to_ov02c10(sd); mutex_lock(&ov02c10->mutex); if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) fmt->format = *v4l2_subdev_get_try_format(&ov02c10->sd, cfg, fmt->pad); #elif LINUX_VERSION_CODE < KERNEL_VERSION(6, 8, 0) fmt->format = *v4l2_subdev_get_try_format(&ov02c10->sd, sd_state, fmt->pad); #else fmt->format = *v4l2_subdev_state_get_format( sd_state, fmt->pad); #endif else ov02c10_update_pad_format(ov02c10->cur_mode, &fmt->format); mutex_unlock(&ov02c10->mutex); return 0; } static int ov02c10_enum_mbus_code(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *sd_state, #endif struct v4l2_subdev_mbus_code_enum *code) { if (code->index > 0) return -EINVAL; code->code = MEDIA_BUS_FMT_SGRBG10_1X10; return 0; } static int ov02c10_enum_frame_size(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *sd_state, #endif struct v4l2_subdev_frame_size_enum *fse) { if (fse->index >= ARRAY_SIZE(supported_modes)) return -EINVAL; if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10) return -EINVAL; fse->min_width = supported_modes[fse->index].width; fse->max_width = fse->min_width; fse->min_height = supported_modes[fse->index].height; fse->max_height = fse->min_height; return 0; } static int ov02c10_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { struct ov02c10 *ov02c10 = to_ov02c10(sd); mutex_lock(&ov02c10->mutex); #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) ov02c10_update_pad_format(&supported_modes[0], v4l2_subdev_get_try_format(sd, fh->pad, 0)); #elif LINUX_VERSION_CODE < KERNEL_VERSION(6, 8, 0) ov02c10_update_pad_format(&supported_modes[0], v4l2_subdev_get_try_format(sd, fh->state, 0)); #else ov02c10_update_pad_format(&supported_modes[0], v4l2_subdev_state_get_format(fh->state, 0)); #endif mutex_unlock(&ov02c10->mutex); return 0; } static const struct v4l2_subdev_video_ops ov02c10_video_ops = { .s_stream = ov02c10_set_stream, }; static const struct v4l2_subdev_pad_ops ov02c10_pad_ops = { .set_fmt = ov02c10_set_format, .get_fmt = ov02c10_get_format, .enum_mbus_code = ov02c10_enum_mbus_code, .enum_frame_size = ov02c10_enum_frame_size, }; static const struct v4l2_subdev_ops ov02c10_subdev_ops = { .video = &ov02c10_video_ops, .pad = &ov02c10_pad_ops, }; static const struct media_entity_operations ov02c10_subdev_entity_ops = { .link_validate = v4l2_subdev_link_validate, }; static const struct v4l2_subdev_internal_ops ov02c10_internal_ops = { .open = ov02c10_open, }; static void ov02c10_read_mipi_lanes(struct ov02c10 *ov02c10) { struct i2c_client *client = v4l2_get_subdevdata(&ov02c10->sd); struct mipi_camera_link_ssdb ssdb; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_device *adev = ACPI_COMPANION(&client->dev); union acpi_object *obj; acpi_status status; if (!adev) { dev_info(&client->dev, "Not ACPI device\n"); return; } status = acpi_evaluate_object(adev->handle, "SSDB", NULL, &buffer); if (ACPI_FAILURE(status)) { dev_info(&client->dev, "ACPI fail: %d\n", -ENODEV); return; } obj = buffer.pointer; if (!obj) { dev_info(&client->dev, "Couldn't locate ACPI buffer\n"); return; } if (obj->type != ACPI_TYPE_BUFFER) { dev_info(&client->dev, "Not an ACPI buffer\n"); goto out_free_buff; } if (obj->buffer.length > sizeof(ssdb)) { dev_err(&client->dev, "Given buffer is too small\n"); goto out_free_buff; } memcpy(&ssdb, obj->buffer.pointer, obj->buffer.length); ov02c10->mipi_lanes = ssdb.lanes; out_free_buff: kfree(buffer.pointer); } static int ov02c10_identify_module(struct ov02c10 *ov02c10) { struct i2c_client *client = v4l2_get_subdevdata(&ov02c10->sd); int ret; u32 val; ret = ov02c10_read_reg(ov02c10, OV02C10_REG_CHIP_ID, 3, &val); if (ret) return ret; if (val != OV02C10_CHIP_ID) { dev_err(&client->dev, "chip id mismatch: %x!=%x", OV02C10_CHIP_ID, val); return -ENXIO; } return 0; } static int ov02c10_check_hwcfg(struct device *dev) { struct v4l2_fwnode_endpoint bus_cfg = { .bus_type = V4L2_MBUS_CSI2_DPHY }; struct fwnode_handle *ep; struct fwnode_handle *fwnode = dev_fwnode(dev); unsigned int i, j; int ret; u32 ext_clk; if (!fwnode) return -ENXIO; ep = fwnode_graph_get_next_endpoint(fwnode, NULL); if (!ep) return -EPROBE_DEFER; ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency", &ext_clk); if (ret) { dev_err(dev, "can't get clock frequency"); return ret; } ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg); fwnode_handle_put(ep); if (ret) return ret; if (!bus_cfg.nr_of_link_frequencies) { dev_err(dev, "no link frequencies defined"); ret = -EINVAL; goto out_err; } for (i = 0; i < ARRAY_SIZE(link_freq_menu_items); i++) { for (j = 0; j < bus_cfg.nr_of_link_frequencies; j++) { if (link_freq_menu_items[i] == bus_cfg.link_frequencies[j]) break; } if (j == bus_cfg.nr_of_link_frequencies) { dev_err(dev, "no link frequency %lld supported", link_freq_menu_items[i]); ret = -EINVAL; goto out_err; } } out_err: v4l2_fwnode_endpoint_free(&bus_cfg); return ret; } #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) static int ov02c10_remove(struct i2c_client *client) #else static void ov02c10_remove(struct i2c_client *client) #endif { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct ov02c10 *ov02c10 = to_ov02c10(sd); v4l2_async_unregister_subdev(sd); media_entity_cleanup(&sd->entity); v4l2_ctrl_handler_free(sd->ctrl_handler); pm_runtime_disable(&client->dev); mutex_destroy(&ov02c10->mutex); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) return 0; #endif } static int ov02c10_read_module_name(struct ov02c10 *ov02c10) { struct i2c_client *client = v4l2_get_subdevdata(&ov02c10->sd); struct device *dev = &client->dev; union acpi_object *obj; struct acpi_device *adev = ACPI_COMPANION(dev); int i; ov02c10->module_name_index = 0; if (!adev) return 0; obj = acpi_evaluate_dsm_typed(adev->handle, &cio2_sensor_module_guid, 0x00, 0x01, NULL, ACPI_TYPE_STRING); if (!obj) return 0; dev_dbg(dev, "module name: %s", obj->string.pointer); for (i = 1; i < ARRAY_SIZE(ov02c10_module_names); i++) { if (!strcmp(ov02c10_module_names[i], obj->string.pointer)) { ov02c10->module_name_index = i; break; } } ACPI_FREE(obj); return 0; } static int ov02c10_probe(struct i2c_client *client) { struct ov02c10 *ov02c10; int ret = 0; /* Check HW config */ ret = ov02c10_check_hwcfg(&client->dev); if (ret) { dev_err(&client->dev, "failed to check hwcfg: %d", ret); return ret; } ov02c10 = devm_kzalloc(&client->dev, sizeof(*ov02c10), GFP_KERNEL); if (!ov02c10) return -ENOMEM; v4l2_i2c_subdev_init(&ov02c10->sd, client, &ov02c10_subdev_ops); ov02c10_get_pm_resources(&client->dev); ret = ov02c10_power_on(&client->dev); if (ret) { dev_err_probe(&client->dev, ret, "failed to power on\n"); return ret; } ret = ov02c10_identify_module(ov02c10); if (ret) { dev_err(&client->dev, "failed to find sensor: %d", ret); goto probe_error_ret; } ov02c10_read_module_name(ov02c10); ov02c10_read_mipi_lanes(ov02c10); mutex_init(&ov02c10->mutex); ov02c10->cur_mode = &supported_modes[0]; if (ov02c10->mipi_lanes == 2) ov02c10->cur_mode = &supported_modes[1]; ret = ov02c10_init_controls(ov02c10); if (ret) { dev_err(&client->dev, "failed to init controls: %d", ret); goto probe_error_v4l2_ctrl_handler_free; } ov02c10->sd.internal_ops = &ov02c10_internal_ops; ov02c10->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; ov02c10->sd.entity.ops = &ov02c10_subdev_entity_ops; ov02c10->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; ov02c10->pad.flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_pads_init(&ov02c10->sd.entity, 1, &ov02c10->pad); if (ret) { dev_err(&client->dev, "failed to init entity pads: %d", ret); goto probe_error_v4l2_ctrl_handler_free; } ret = v4l2_async_register_subdev_sensor(&ov02c10->sd); if (ret < 0) { dev_err(&client->dev, "failed to register V4L2 subdev: %d", ret); goto probe_error_media_entity_cleanup; } /* * Device is already turned on by i2c-core with ACPI domain PM. * Enable runtime PM and turn off the device. */ pm_runtime_set_active(&client->dev); pm_runtime_enable(&client->dev); pm_runtime_idle(&client->dev); return 0; probe_error_media_entity_cleanup: media_entity_cleanup(&ov02c10->sd.entity); probe_error_v4l2_ctrl_handler_free: v4l2_ctrl_handler_free(ov02c10->sd.ctrl_handler); mutex_destroy(&ov02c10->mutex); probe_error_ret: ov02c10_power_off(&client->dev); return ret; } static const struct dev_pm_ops ov02c10_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(ov02c10_suspend, ov02c10_resume) SET_RUNTIME_PM_OPS(ov02c10_power_off, ov02c10_power_on, NULL) }; #ifdef CONFIG_ACPI static const struct acpi_device_id ov02c10_acpi_ids[] = { {"OVTI02C1"}, {} }; MODULE_DEVICE_TABLE(acpi, ov02c10_acpi_ids); #endif static struct i2c_driver ov02c10_i2c_driver = { .driver = { .name = "ov02c10", .pm = &ov02c10_pm_ops, .acpi_match_table = ACPI_PTR(ov02c10_acpi_ids), }, #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) .probe_new = ov02c10_probe, #else .probe = ov02c10_probe, #endif .remove = ov02c10_remove, }; module_i2c_driver(ov02c10_i2c_driver); MODULE_AUTHOR("Hao Yao "); MODULE_DESCRIPTION("OmniVision OV02C10 sensor driver"); MODULE_LICENSE("GPL v2"); ipu6-drivers-0~git202411190607.0ad49882/drivers/media/i2c/ov02e10.c000066400000000000000000000755701471702545500234010ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2023 Intel Corporation. #include #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 12, 0) #include #else #include #endif #include #include #include #include #include #include #include #include #include #include #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) #include static const struct acpi_device_id cvfd_ids[] = { { "INTC1059", 0 }, { "INTC1095", 0 }, { "INTC100A", 0 }, { "INTC10CF", 0 }, {} }; #endif #define OV02E10_LINK_FREQ_360MHZ 360000000ULL #define OV02E10_SCLK 36000000LL #define OV02E10_MCLK 19200000 #define OV02E10_DATA_LANES 2 #define OV02E10_RGB_DEPTH 10 #define OV02E10_REG_DELAY 0xff #define OV02E10_REG_PAGE_FLAG 0xfd #define OV02E10_PAGE_0 0x0 #define OV02E10_PAGE_1 0x1 #define OV02E10_PAGE_2 0x2 #define OV02E10_PAGE_3 0x3 #define OV02E10_PAGE_5 0x4 #define OV02E10_PAGE_7 0x5 #define OV02E10_PAGE_8 0x6 #define OV02E10_PAGE_9 0xF #define OV02E10_PAGE_D 0x8 #define OV02E10_PAGE_E 0x9 #define OV02E10_PAGE_F 0xA #define OV02E10_REG_CHIP_ID 0x00 #define OV02E10_CHIP_ID 0x45025610 /* vertical-timings from sensor */ #define OV02E10_REG_VTS 0x35 #define OV02E10_VTS_DEF 2244 #define OV02E10_VTS_MIN 2244 #define OV02E10_VTS_MAX 0x7fff /* horizontal-timings from sensor */ #define OV02E10_REG_HTS 0x37 /* Exposure controls from sensor */ #define OV02E10_REG_EXPOSURE 0x03 #define OV02E10_EXPOSURE_MIN 1 #define OV02E10_EXPOSURE_MAX_MARGIN 2 #define OV02E10_EXPOSURE_STEP 1 /* Analog gain controls from sensor */ #define OV02E10_REG_ANALOG_GAIN 0x24 #define OV02E10_ANAL_GAIN_MIN 0x10 #define OV02E10_ANAL_GAIN_MAX 0xf8 #define OV02E10_ANAL_GAIN_STEP 1 /* Digital gain controls from sensor */ #define OV02E10_REG_DIGITAL_GAIN 0x21 #define OV02E10_DGTL_GAIN_MIN 256 #define OV02E10_DGTL_GAIN_MAX 1020 #define OV02E10_DGTL_GAIN_STEP 1 #define OV02E10_DGTL_GAIN_DEFAULT 256 /* Register update control */ #define OV02E10_REG_COMMAND_UPDATE 0xE7 #define OV02E10_COMMAND_UPDATE 0x00 #define OV02E10_COMMAND_HOLD 0x01 /* Test Pattern Control */ #define OV02E10_REG_TEST_PATTERN 0x12 #define OV02E10_TEST_PATTERN_ENABLE BIT(0) #define OV02E10_TEST_PATTERN_BAR_SHIFT 1 enum { OV02E10_LINK_FREQ_360MHZ_INDEX, }; struct ov02e10_reg { u16 address; u8 val; }; struct ov02e10_reg_list { u32 num_of_regs; const struct ov02e10_reg *regs; }; struct ov02e10_link_freq_config { const struct ov02e10_reg_list reg_list; }; struct ov02e10_mode { /* Frame width in pixels */ u32 width; /* Frame height in pixels */ u32 height; /* Horizontal timining size */ u32 hts; /* Default vertical timining size */ u32 vts_def; /* Min vertical timining size */ u32 vts_min; /* Link frequency needed for this resolution */ u32 link_freq_index; /* Sensor register settings for this resolution */ const struct ov02e10_reg_list reg_list; }; static const struct ov02e10_reg ov02e10_standby[] = { { OV02E10_REG_PAGE_FLAG, OV02E10_PAGE_0 }, { 0xa0, 0x00 }, { OV02E10_REG_PAGE_FLAG, OV02E10_PAGE_1 }, { 0x01, 0x02 } }; static const struct ov02e10_reg_list ov02e10_standby_list = { .num_of_regs = ARRAY_SIZE(ov02e10_standby), .regs = ov02e10_standby }; static const struct ov02e10_reg ov02e10_streaming[] = { { OV02E10_REG_PAGE_FLAG, OV02E10_PAGE_0 }, { 0xa0, 0x01 }, { OV02E10_REG_PAGE_FLAG, OV02E10_PAGE_1 }, { 0x01, 0x02 } }; static const struct ov02e10_reg_list ov02e10_streaming_list = { .num_of_regs = ARRAY_SIZE(ov02e10_streaming), .regs = ov02e10_streaming }; static const struct ov02e10_reg mode_1928x1088_30fps_2lane[] = { { 0xfd, 0x00 }, { 0x20, 0x00 }, { 0x20, 0x0b }, { 0x21, 0x02 }, { 0x10, 0x23 }, { 0xc5, 0x04 }, { 0x21, 0x00 }, { 0x14, 0x96 }, { 0x17, 0x01 }, { 0xfd, 0x01 }, { 0x03, 0x00 }, { 0x04, 0x04 }, { 0x05, 0x04 }, { 0x06, 0x62 }, { 0x07, 0x01 }, { 0x22, 0x80 }, { 0x24, 0xff }, { 0x40, 0xc6 }, { 0x41, 0x18 }, { 0x45, 0x3f }, { 0x48, 0x0c }, { 0x4c, 0x08 }, { 0x51, 0x12 }, { 0x52, 0x10 }, { 0x57, 0x98 }, { 0x59, 0x06 }, { 0x5a, 0x04 }, { 0x5c, 0x38 }, { 0x5e, 0x10 }, { 0x67, 0x11 }, { 0x7b, 0x04 }, { 0x81, 0x12 }, { 0x90, 0x51 }, { 0x91, 0x09 }, { 0x92, 0x21 }, { 0x93, 0x28 }, { 0x95, 0x54 }, { 0x9d, 0x20 }, { 0x9e, 0x04 }, { 0xb1, 0x9a }, { 0xb2, 0x86 }, { 0xb6, 0x3f }, { 0xb9, 0x30 }, { 0xc1, 0x01 }, { 0xc5, 0xa0 }, { 0xc6, 0x73 }, { 0xc7, 0x04 }, { 0xc8, 0x25 }, { 0xc9, 0x05 }, { 0xca, 0x28 }, { 0xcb, 0x00 }, { 0xcf, 0x16 }, { 0xd2, 0xd0 }, { 0xd7, 0x3f }, { 0xd8, 0x40 }, { 0xd9, 0x40 }, { 0xda, 0x44 }, { 0xdb, 0x3d }, { 0xdc, 0x3d }, { 0xdd, 0x3d }, { 0xde, 0x3d }, { 0xdf, 0xf0 }, { 0xea, 0x0f }, { 0xeb, 0x04 }, { 0xec, 0x29 }, { 0xee, 0x47 }, { 0xfd, 0x01 }, { 0x31, 0x01 }, { 0x27, 0x00 }, { 0x2f, 0x41 }, { 0xfd, 0x02 }, { 0xa1, 0x01 }, { 0xfd, 0x02 }, { 0x9a, 0x03 }, { 0xfd, 0x03 }, { 0x9d, 0x0f }, { 0xfd, 0x07 }, { 0x42, 0x00 }, { 0x43, 0xad }, { 0x44, 0x00 }, { 0x45, 0xa8 }, { 0x46, 0x00 }, { 0x47, 0xa8 }, { 0x48, 0x00 }, { 0x49, 0xad }, { 0xfd, 0x00 }, { 0xc4, 0x01 }, { 0xfd, 0x01 }, { 0x33, 0x03 }, { 0xfd, 0x00 }, { 0x20, 0x1f }, }; static const char *const ov02e10_test_pattern_menu[] = { "Disabled", "Color Bar", }; static const s64 link_freq_menu_items[] = { OV02E10_LINK_FREQ_360MHZ, }; static const struct ov02e10_mode supported_modes[] = { { .width = 1928, .height = 1088, .hts = 534, .vts_def = 2244, .vts_min = 2244, .reg_list = { .num_of_regs = ARRAY_SIZE(mode_1928x1088_30fps_2lane), .regs = mode_1928x1088_30fps_2lane, }, .link_freq_index = OV02E10_LINK_FREQ_360MHZ_INDEX, }, }; struct ov02e10 { struct v4l2_subdev sd; struct media_pad pad; struct v4l2_ctrl_handler ctrl_handler; /* V4L2 Controls */ struct v4l2_ctrl *link_freq; struct v4l2_ctrl *pixel_rate; struct v4l2_ctrl *vblank; struct v4l2_ctrl *hblank; struct v4l2_ctrl *exposure; struct clk *img_clk; struct regulator *avdd; struct gpio_desc *reset; struct gpio_desc *handshake; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) struct vsc_mipi_config conf; struct vsc_camera_status status; struct v4l2_ctrl *privacy_status; #endif /* Current mode */ const struct ov02e10_mode *cur_mode; /* To serialize asynchronus callbacks */ struct mutex mutex; /* Streaming on/off */ bool streaming; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) bool use_intel_vsc; #endif }; static inline struct ov02e10 *to_ov02e10(struct v4l2_subdev *subdev) { return container_of(subdev, struct ov02e10, sd); } static u64 to_pixel_rate(u32 f_index) { u64 pixel_rate = link_freq_menu_items[f_index] * 2 * OV02E10_DATA_LANES; do_div(pixel_rate, OV02E10_RGB_DEPTH); return pixel_rate; } static u64 to_pixels_per_line(u32 hts, u32 f_index) { u64 ppl = hts * to_pixel_rate(f_index); do_div(ppl, OV02E10_SCLK); return ppl; } static int ov02e10_read_reg(struct ov02e10 *ov02e10, u8 reg, u16 len, u32 *val) { struct i2c_client *client = v4l2_get_subdevdata(&ov02e10->sd); struct i2c_msg msgs[2]; u8 data_buf[4] = { 0 }; int ret; if (len > sizeof(data_buf)) return -EINVAL; msgs[0].addr = client->addr; msgs[0].flags = 0; msgs[0].len = 1; msgs[0].buf = ® msgs[1].addr = client->addr; msgs[1].flags = I2C_M_RD; msgs[1].len = len; msgs[1].buf = &data_buf[sizeof(data_buf) - len]; ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); if (ret != ARRAY_SIZE(msgs)) return ret < 0 ? ret : -EIO; *val = get_unaligned_be32(data_buf); return 0; } static int ov02e10_write_reg(struct ov02e10 *ov02e10, u8 reg, u16 len, u32 val) { struct i2c_client *client = v4l2_get_subdevdata(&ov02e10->sd); u8 buf[5]; int ret; if (len > 4) return -EINVAL; if (reg == OV02E10_REG_DELAY) { msleep(val); return 0; } dev_dbg(&client->dev, "%s, reg %x len %x, val %x\n", __func__, reg, len, val); buf[0] = reg; put_unaligned_be32(val << 8 * (4 - len), buf + 1); ret = i2c_master_send(client, buf, len + 1); if (ret != len + 1) { dev_err(&client->dev, "failed to write reg %d val %d", reg, val); return ret < 0 ? ret : -EIO; } return 0; } static int ov02e10_write_reg_list(struct ov02e10 *ov02e10, const struct ov02e10_reg_list *r_list) { struct i2c_client *client = v4l2_get_subdevdata(&ov02e10->sd); unsigned int i; int ret; for (i = 0; i < r_list->num_of_regs; i++) { ret = ov02e10_write_reg(ov02e10, r_list->regs[i].address, 1, r_list->regs[i].val); if (ret) { dev_err_ratelimited(&client->dev, "write reg 0x%4.4x return err = %d", r_list->regs[i].address, ret); return ret; } } return 0; } static int ov02e10_test_pattern(struct ov02e10 *ov02e10, u32 pattern) { if (pattern) pattern = pattern << OV02E10_TEST_PATTERN_BAR_SHIFT | OV02E10_TEST_PATTERN_ENABLE; return ov02e10_write_reg(ov02e10, (u8) OV02E10_REG_TEST_PATTERN, 1, pattern); } static int ov02e10_set_ctrl(struct v4l2_ctrl *ctrl) { struct ov02e10 *ov02e10 = container_of(ctrl->handler, struct ov02e10, ctrl_handler); struct i2c_client *client = v4l2_get_subdevdata(&ov02e10->sd); s64 exposure_max; int ret; /* Propagate change of current control to all related controls */ if (ctrl->id == V4L2_CID_VBLANK) { /* Update max exposure while meeting expected vblanking */ exposure_max = ov02e10->cur_mode->height + ctrl->val - OV02E10_EXPOSURE_MAX_MARGIN; __v4l2_ctrl_modify_range(ov02e10->exposure, ov02e10->exposure->minimum, exposure_max, ov02e10->exposure->step, exposure_max); } /* V4L2 controls values will be applied only when power is already up */ if (!pm_runtime_get_if_in_use(&client->dev)) return 0; ret = ov02e10_write_reg(ov02e10, OV02E10_REG_COMMAND_UPDATE, 1, OV02E10_COMMAND_HOLD); switch (ctrl->id) { case V4L2_CID_ANALOGUE_GAIN: dev_dbg(&client->dev, "set analog gain\n"); ret = ov02e10_write_reg(ov02e10, OV02E10_REG_PAGE_FLAG, 1, OV02E10_PAGE_1); ret = ov02e10_write_reg(ov02e10, OV02E10_REG_ANALOG_GAIN, 1, ctrl->val); break; case V4L2_CID_DIGITAL_GAIN: dev_dbg(&client->dev, "set digital gain\n"); ret = ov02e10_write_reg(ov02e10, OV02E10_REG_PAGE_FLAG, 1, OV02E10_PAGE_1); ret = ov02e10_write_reg(ov02e10, OV02E10_REG_DIGITAL_GAIN, 2, ctrl->val); break; case V4L2_CID_EXPOSURE: dev_dbg(&client->dev, "set exposure\n"); ret = ov02e10_write_reg(ov02e10, OV02E10_REG_PAGE_FLAG, 1, OV02E10_PAGE_1); ret = ov02e10_write_reg(ov02e10, OV02E10_REG_EXPOSURE, 2, ctrl->val); break; case V4L2_CID_VBLANK: dev_dbg(&client->dev, "set vblank\n"); ret = ov02e10_write_reg(ov02e10, OV02E10_REG_PAGE_FLAG, 1, OV02E10_PAGE_1); ret = ov02e10_write_reg(ov02e10, OV02E10_REG_VTS, 2, ov02e10->cur_mode->height + ctrl->val); break; case V4L2_CID_TEST_PATTERN: dev_dbg(&client->dev, "set test pattern\n"); ret = ov02e10_write_reg(ov02e10, OV02E10_REG_PAGE_FLAG, 1, OV02E10_PAGE_1); ret = ov02e10_test_pattern(ov02e10, ctrl->val); break; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) case V4L2_CID_PRIVACY: dev_dbg(&client->dev, "set privacy to %d", ctrl->val); break; #endif default: ret = -EINVAL; break; } dev_dbg(&client->dev, "will update cmd\n"); ret |= ov02e10_write_reg(ov02e10, OV02E10_REG_COMMAND_UPDATE, 1, OV02E10_COMMAND_UPDATE); pm_runtime_put(&client->dev); return ret; } static const struct v4l2_ctrl_ops ov02e10_ctrl_ops = { .s_ctrl = ov02e10_set_ctrl, }; static int ov02e10_init_controls(struct ov02e10 *ov02e10) { struct v4l2_ctrl_handler *ctrl_hdlr; const struct ov02e10_mode *cur_mode; s64 exposure_max, h_blank, pixel_rate; u32 vblank_min, vblank_max, vblank_default; int size; int ret; ctrl_hdlr = &ov02e10->ctrl_handler; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) ret = v4l2_ctrl_handler_init(ctrl_hdlr, 9); #else ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8); #endif if (ret) return ret; ctrl_hdlr->lock = &ov02e10->mutex; cur_mode = ov02e10->cur_mode; size = ARRAY_SIZE(link_freq_menu_items); ov02e10->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, &ov02e10_ctrl_ops, V4L2_CID_LINK_FREQ, size - 1, 0, link_freq_menu_items); if (ov02e10->link_freq) ov02e10->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY; pixel_rate = to_pixel_rate(OV02E10_LINK_FREQ_360MHZ_INDEX); ov02e10->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov02e10_ctrl_ops, V4L2_CID_PIXEL_RATE, 0, pixel_rate, 1, pixel_rate); vblank_min = cur_mode->vts_min - cur_mode->height; vblank_max = OV02E10_VTS_MAX - cur_mode->height; vblank_default = cur_mode->vts_def - cur_mode->height; ov02e10->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov02e10_ctrl_ops, V4L2_CID_VBLANK, vblank_min, vblank_max, 1, vblank_default); h_blank = to_pixels_per_line(cur_mode->hts, cur_mode->link_freq_index); h_blank -= cur_mode->width; ov02e10->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov02e10_ctrl_ops, V4L2_CID_HBLANK, h_blank, h_blank, 1, h_blank); if (ov02e10->hblank) ov02e10->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) ov02e10->privacy_status = v4l2_ctrl_new_std(ctrl_hdlr, &ov02e10_ctrl_ops, V4L2_CID_PRIVACY, 0, 1, 1, !(ov02e10->status.status)); #endif v4l2_ctrl_new_std(ctrl_hdlr, &ov02e10_ctrl_ops, V4L2_CID_ANALOGUE_GAIN, OV02E10_ANAL_GAIN_MIN, OV02E10_ANAL_GAIN_MAX, OV02E10_ANAL_GAIN_STEP, OV02E10_ANAL_GAIN_MIN); v4l2_ctrl_new_std(ctrl_hdlr, &ov02e10_ctrl_ops, V4L2_CID_DIGITAL_GAIN, OV02E10_DGTL_GAIN_MIN, OV02E10_DGTL_GAIN_MAX, OV02E10_DGTL_GAIN_STEP, OV02E10_DGTL_GAIN_DEFAULT); exposure_max = cur_mode->vts_def - OV02E10_EXPOSURE_MAX_MARGIN; ov02e10->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &ov02e10_ctrl_ops, V4L2_CID_EXPOSURE, OV02E10_EXPOSURE_MIN, exposure_max, OV02E10_EXPOSURE_STEP, exposure_max); v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &ov02e10_ctrl_ops, V4L2_CID_TEST_PATTERN, ARRAY_SIZE(ov02e10_test_pattern_menu) - 1, 0, 0, ov02e10_test_pattern_menu); if (ctrl_hdlr->error) return ctrl_hdlr->error; ov02e10->sd.ctrl_handler = ctrl_hdlr; return 0; } static void ov02e10_update_pad_format(const struct ov02e10_mode *mode, struct v4l2_mbus_framefmt *fmt) { fmt->width = mode->width; fmt->height = mode->height; fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10; fmt->field = V4L2_FIELD_NONE; } #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) static void ov02e10_vsc_privacy_callback(void *handle, enum vsc_privacy_status status) { struct ov02e10 *ov02e10 = handle; v4l2_ctrl_s_ctrl(ov02e10->privacy_status, !status); } #endif static int ov02e10_start_streaming(struct ov02e10 *ov02e10) { struct i2c_client *client = v4l2_get_subdevdata(&ov02e10->sd); const struct ov02e10_reg_list *reg_list; int ret; dev_dbg(&client->dev, "start to set sensor settings\n"); reg_list = &ov02e10->cur_mode->reg_list; ret = ov02e10_write_reg_list(ov02e10, reg_list); if (ret) { dev_err(&client->dev, "failed to set mode"); return ret; } dev_dbg(&client->dev, "start to set ctrl_handler\n"); ret = __v4l2_ctrl_handler_setup(ov02e10->sd.ctrl_handler); if (ret) { dev_err(&client->dev, "setup V4L2 ctrl handler fail\n"); return ret; } dev_dbg(&client->dev, "start to streaming\n"); ret = ov02e10_write_reg_list(ov02e10, &ov02e10_streaming_list); if (ret) { dev_err(&client->dev, "failed to streaming mode"); return ret; } return 0; } static void ov02e10_stop_streaming(struct ov02e10 *ov02e10) { struct i2c_client *client = v4l2_get_subdevdata(&ov02e10->sd); int ret; ret = ov02e10_write_reg_list(ov02e10, &ov02e10_standby_list); if (ret) dev_err(&client->dev, "failed to stop streaming: %d", ret); } static int ov02e10_set_stream(struct v4l2_subdev *sd, int enable) { struct ov02e10 *ov02e10 = to_ov02e10(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); int ret = 0; if (ov02e10->streaming == enable) return 0; mutex_lock(&ov02e10->mutex); if (enable) { ret = pm_runtime_get_sync(&client->dev); if (ret < 0) { pm_runtime_put_noidle(&client->dev); mutex_unlock(&ov02e10->mutex); return ret; } ret = ov02e10_start_streaming(ov02e10); if (ret) { dev_dbg(&client->dev, "start streaming failed\n"); enable = 0; ov02e10_stop_streaming(ov02e10); pm_runtime_put(&client->dev); } } else { ov02e10_stop_streaming(ov02e10); pm_runtime_put(&client->dev); } ov02e10->streaming = enable; mutex_unlock(&ov02e10->mutex); return ret; } /* This function tries to get power control resources */ static int ov02e10_get_pm_resources(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct ov02e10 *ov02e10 = to_ov02e10(sd); int ret; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) acpi_handle handle = ACPI_HANDLE(dev); struct acpi_handle_list dep_devices; acpi_status status; int i = 0; ov02e10->use_intel_vsc = false; if (!acpi_has_method(handle, "_DEP")) return false; status = acpi_evaluate_reference(handle, "_DEP", NULL, &dep_devices); if (ACPI_FAILURE(status)) { acpi_handle_debug(handle, "Failed to evaluate _DEP.\n"); return false; } for (i = 0; i < dep_devices.count; i++) { struct acpi_device *dep_device = NULL; if (dep_devices.handles[i]) #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0) acpi_bus_get_device(dep_devices.handles[i], &dep_device); #else dep_device = acpi_fetch_acpi_dev(dep_devices.handles[i]); #endif if (dep_device && acpi_match_device_ids(dep_device, cvfd_ids) == 0) { ov02e10->use_intel_vsc = true; return 0; } } #endif ov02e10->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(ov02e10->reset)) return dev_err_probe(dev, PTR_ERR(ov02e10->reset), "failed to get reset gpio\n"); ov02e10->handshake = devm_gpiod_get_optional(dev, "handshake", GPIOD_OUT_LOW); if (IS_ERR(ov02e10->handshake)) return dev_err_probe(dev, PTR_ERR(ov02e10->handshake), "failed to get handshake gpio\n"); ov02e10->img_clk = devm_clk_get_optional(dev, NULL); if (IS_ERR(ov02e10->img_clk)) return dev_err_probe(dev, PTR_ERR(ov02e10->img_clk), "failed to get imaging clock\n"); ov02e10->avdd = devm_regulator_get_optional(dev, "avdd"); if (IS_ERR(ov02e10->avdd)) { ret = PTR_ERR(ov02e10->avdd); ov02e10->avdd = NULL; if (ret != -ENODEV) return dev_err_probe(dev, ret, "failed to get avdd regulator\n"); } return 0; } static int ov02e10_power_off(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct ov02e10 *ov02e10 = to_ov02e10(sd); int ret = 0; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) if (ov02e10->use_intel_vsc) { ret = vsc_release_camera_sensor(&ov02e10->status); if (ret && ret != -EAGAIN) dev_err(dev, "Release VSC failed"); return ret; } #endif gpiod_set_value_cansleep(ov02e10->reset, 1); gpiod_set_value_cansleep(ov02e10->handshake, 0); if (ov02e10->avdd) ret = regulator_disable(ov02e10->avdd); clk_disable_unprepare(ov02e10->img_clk); return ret; } static int ov02e10_power_on(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct ov02e10 *ov02e10 = to_ov02e10(sd); int ret; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) && \ IS_ENABLED(CONFIG_INTEL_VSC) if (ov02e10->use_intel_vsc) { ov02e10->conf.lane_num = OV02E10_DATA_LANES; /* frequency unit 100k */ ov02e10->conf.freq = OV02E10_LINK_FREQ_360MHZ / 100000; ret = vsc_acquire_camera_sensor(&ov02e10->conf, ov02e10_vsc_privacy_callback, ov02e10, &ov02e10->status); if (ret == -EAGAIN) return -EPROBE_DEFER; if (ret) { dev_err(dev, "Acquire VSC failed"); return ret; } if (ov02e10->privacy_status) __v4l2_ctrl_s_ctrl(ov02e10->privacy_status, !(ov02e10->status.status)); return ret; } #endif ret = clk_prepare_enable(ov02e10->img_clk); if (ret < 0) { dev_err(dev, "failed to enable imaging clock: %d", ret); return ret; } if (ov02e10->avdd) { ret = regulator_enable(ov02e10->avdd); if (ret < 0) { dev_err(dev, "failed to enable avdd: %d", ret); clk_disable_unprepare(ov02e10->img_clk); return ret; } } gpiod_set_value_cansleep(ov02e10->handshake, 1); gpiod_set_value_cansleep(ov02e10->reset, 0); /* Lattice MIPI aggregator with some version FW needs longer delay after handshake triggered. We set 25ms as a safe value and wait for a stable version FW. */ msleep_interruptible(25); return ret; } static int __maybe_unused ov02e10_suspend(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct ov02e10 *ov02e10 = to_ov02e10(sd); mutex_lock(&ov02e10->mutex); if (ov02e10->streaming) ov02e10_stop_streaming(ov02e10); mutex_unlock(&ov02e10->mutex); return 0; } static int __maybe_unused ov02e10_resume(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct ov02e10 *ov02e10 = to_ov02e10(sd); int ret = 0; mutex_lock(&ov02e10->mutex); if (!ov02e10->streaming) goto exit; ret = ov02e10_start_streaming(ov02e10); if (ret) { ov02e10->streaming = false; ov02e10_stop_streaming(ov02e10); } exit: mutex_unlock(&ov02e10->mutex); return ret; } static int ov02e10_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { struct ov02e10 *ov02e10 = to_ov02e10(sd); const struct ov02e10_mode *mode; s32 vblank_def, h_blank; mode = v4l2_find_nearest_size(supported_modes, ARRAY_SIZE(supported_modes), width, height, fmt->format.width, fmt->format.height); mutex_lock(&ov02e10->mutex); ov02e10_update_pad_format(mode, &fmt->format); if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 8, 0) *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format; #else *v4l2_subdev_state_get_format(sd_state, fmt->pad) = fmt->format; #endif } else { ov02e10->cur_mode = mode; __v4l2_ctrl_s_ctrl(ov02e10->link_freq, mode->link_freq_index); __v4l2_ctrl_s_ctrl_int64(ov02e10->pixel_rate, to_pixel_rate(mode->link_freq_index)); /* Update limits and set FPS to default */ vblank_def = mode->vts_def - mode->height; __v4l2_ctrl_modify_range(ov02e10->vblank, mode->vts_min - mode->height, OV02E10_VTS_MAX - mode->height, 1, vblank_def); __v4l2_ctrl_s_ctrl(ov02e10->vblank, vblank_def); h_blank = to_pixels_per_line(mode->hts, mode->link_freq_index) - mode->width; __v4l2_ctrl_modify_range(ov02e10->hblank, h_blank, h_blank, 1, h_blank); } mutex_unlock(&ov02e10->mutex); return 0; } static int ov02e10_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { struct ov02e10 *ov02e10 = to_ov02e10(sd); mutex_lock(&ov02e10->mutex); if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 8, 0) fmt->format = *v4l2_subdev_get_try_format(&ov02e10->sd, sd_state, fmt->pad); #else fmt->format = *v4l2_subdev_state_get_format( sd_state, fmt->pad); #endif else ov02e10_update_pad_format(ov02e10->cur_mode, &fmt->format); mutex_unlock(&ov02e10->mutex); return 0; } static int ov02e10_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *code) { if (code->index > 0) return -EINVAL; code->code = MEDIA_BUS_FMT_SGRBG10_1X10; return 0; } static int ov02e10_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_frame_size_enum *fse) { if (fse->index >= ARRAY_SIZE(supported_modes)) return -EINVAL; if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10) return -EINVAL; fse->min_width = supported_modes[fse->index].width; fse->max_width = fse->min_width; fse->min_height = supported_modes[fse->index].height; fse->max_height = fse->min_height; return 0; } static int ov02e10_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { struct ov02e10 *ov02e10 = to_ov02e10(sd); mutex_lock(&ov02e10->mutex); ov02e10_update_pad_format(&supported_modes[0], #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 8, 0) v4l2_subdev_get_try_format(sd, fh->state, 0)); #else v4l2_subdev_state_get_format(fh->state, 0)); #endif mutex_unlock(&ov02e10->mutex); return 0; } static const struct v4l2_subdev_video_ops ov02e10_video_ops = { .s_stream = ov02e10_set_stream, }; static const struct v4l2_subdev_pad_ops ov02e10_pad_ops = { .set_fmt = ov02e10_set_format, .get_fmt = ov02e10_get_format, .enum_mbus_code = ov02e10_enum_mbus_code, .enum_frame_size = ov02e10_enum_frame_size, }; static const struct v4l2_subdev_ops ov02e10_subdev_ops = { .video = &ov02e10_video_ops, .pad = &ov02e10_pad_ops, }; static const struct media_entity_operations ov02e10_subdev_entity_ops = { .link_validate = v4l2_subdev_link_validate, }; static const struct v4l2_subdev_internal_ops ov02e10_internal_ops = { .open = ov02e10_open, }; static int ov02e10_identify_module(struct ov02e10 *ov02e10) { struct i2c_client *client = v4l2_get_subdevdata(&ov02e10->sd); int ret; u32 val; ret = ov02e10_write_reg(ov02e10, OV02E10_REG_PAGE_FLAG, 1, OV02E10_PAGE_0); if (ret) return ret; ret = ov02e10_read_reg(ov02e10, OV02E10_REG_CHIP_ID, 4, &val); if (ret) return ret; if (val != OV02E10_CHIP_ID) { dev_err(&client->dev, "chip id mismatch: %x!=%x", OV02E10_CHIP_ID, val); return -ENXIO; } return 0; } static int ov02e10_check_hwcfg(struct device *dev) { struct v4l2_fwnode_endpoint bus_cfg = { .bus_type = V4L2_MBUS_CSI2_DPHY }; struct fwnode_handle *ep; struct fwnode_handle *fwnode = dev_fwnode(dev); unsigned int i, j; int ret; u32 ext_clk; if (!fwnode) return -ENXIO; ep = fwnode_graph_get_next_endpoint(fwnode, NULL); if (!ep) return -EPROBE_DEFER; ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency", &ext_clk); if (ret) { dev_err(dev, "can't get clock frequency"); return ret; } ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg); fwnode_handle_put(ep); if (ret) return ret; if (bus_cfg.bus.mipi_csi2.num_data_lanes != OV02E10_DATA_LANES) { dev_err(dev, "number of CSI2 data lanes %d is not supported", bus_cfg.bus.mipi_csi2.num_data_lanes); ret = -EINVAL; goto out_err; } if (!bus_cfg.nr_of_link_frequencies) { dev_err(dev, "no link frequencies defined"); ret = -EINVAL; goto out_err; } for (i = 0; i < ARRAY_SIZE(link_freq_menu_items); i++) { for (j = 0; j < bus_cfg.nr_of_link_frequencies; j++) { if (link_freq_menu_items[i] == bus_cfg.link_frequencies[j]) break; } if (j == bus_cfg.nr_of_link_frequencies) { dev_err(dev, "no link frequency %lld supported", link_freq_menu_items[i]); ret = -EINVAL; goto out_err; } } out_err: v4l2_fwnode_endpoint_free(&bus_cfg); return ret; } #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) static int ov02e10_remove(struct i2c_client *client) #else static void ov02e10_remove(struct i2c_client *client) #endif { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct ov02e10 *ov02e10 = to_ov02e10(sd); v4l2_async_unregister_subdev(sd); media_entity_cleanup(&sd->entity); v4l2_ctrl_handler_free(sd->ctrl_handler); pm_runtime_disable(&client->dev); mutex_destroy(&ov02e10->mutex); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) return 0; #endif } static int ov02e10_probe(struct i2c_client *client) { struct ov02e10 *ov02e; int ret; /* Check HW config */ ret = ov02e10_check_hwcfg(&client->dev); if (ret) { dev_err(&client->dev, "failed to check hwcfg: %d", ret); return ret; } ov02e = devm_kzalloc(&client->dev, sizeof(*ov02e), GFP_KERNEL); if (!ov02e) return -ENOMEM; /* Initialize subdev */ v4l2_i2c_subdev_init(&ov02e->sd, client, &ov02e10_subdev_ops); ov02e10_get_pm_resources(&client->dev); ret = ov02e10_power_on(&client->dev); if (ret) { dev_err_probe(&client->dev, ret, "failed to power on\n"); goto error_power_off; } /* Check module identity */ ret = ov02e10_identify_module(ov02e); if (ret) { dev_err(&client->dev, "failed to find sensor: %d\n", ret); goto error_power_off; } /* Set default mode to max resolution */ ov02e->cur_mode = &supported_modes[0]; dev_dbg(&client->dev, "will Init controls\n"); ret = ov02e10_init_controls(ov02e); if (ret) return ret; /* Initialize subdev */ ov02e->sd.internal_ops = &ov02e10_internal_ops; ov02e->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; ov02e->sd.entity.ops = &ov02e10_subdev_entity_ops; ov02e->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; /* Initialize source pad */ ov02e->pad.flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_pads_init(&ov02e->sd.entity, 1, &ov02e->pad); if (ret) { dev_err(&client->dev, "%s failed:%d\n", __func__, ret); goto error_handler_free; } ret = v4l2_async_register_subdev_sensor(&ov02e->sd); if (ret < 0) { dev_err(&client->dev, "async reg subdev error\n"); goto error_media_entity; } /* * Device is already turned on by i2c-core with ACPI domain PM. * Enable runtime PM and turn off the device. */ pm_runtime_set_active(&client->dev); pm_runtime_enable(&client->dev); pm_runtime_idle(&client->dev); return 0; error_media_entity: media_entity_cleanup(&ov02e->sd.entity); error_handler_free: v4l2_ctrl_handler_free(ov02e->sd.ctrl_handler); mutex_destroy(&ov02e->mutex); dev_err(&client->dev, "%s failed:%d\n", __func__, ret); error_power_off: ov02e10_power_off(&client->dev); dev_dbg(&client->dev, "probe done\n"); return ret; } static const struct dev_pm_ops ov02e10_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(ov02e10_suspend, ov02e10_resume) SET_RUNTIME_PM_OPS(ov02e10_power_off, ov02e10_power_on, NULL) }; static const struct acpi_device_id ov02e10_acpi_ids[] = { { "OVTI02E1" }, { } }; MODULE_DEVICE_TABLE(acpi, ov02e10_acpi_ids); static struct i2c_driver ov02e10_i2c_driver = { .driver = { .name = "ov02e10", .pm = &ov02e10_pm_ops, .acpi_match_table = ov02e10_acpi_ids, }, #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) .probe_new = ov02e10_probe, #else .probe = ov02e10_probe, #endif .remove = ov02e10_remove, }; module_i2c_driver(ov02e10_i2c_driver); MODULE_AUTHOR("Jingjing Xiong "); MODULE_DESCRIPTION("OmniVision OV02E10 sensor driver"); MODULE_LICENSE("GPL v2"); ipu6-drivers-0~git202411190607.0ad49882/drivers/media/i2c/ov05c10.c000066400000000000000000000645611471702545500234000ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2024 Intel Corporation. #include #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 12, 0) #include #else #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #define OV05C10_REG_CHIP_ID_H CCI_REG16(0x00) //P0:0x00[7:0],0x01[7:0] #define OV05C10_REG_CHIP_ID_L CCI_REG16(0x02) //P0:0x02[7:0],0x03[7:0] #define OV05C10_CHIP_ID 0x43055610 #define REG_TIMING_HTS CCI_REG16(0x37) //P1:0x37[4:0],0x38[7:0] #define REG_TIMING_VTS CCI_REG16(0x35) //P1:0x35[7:0],0x36[7:0] #define REG_DUMMY_LINE CCI_REG16(0x05) //P1:0x05[7:0],0x06[7:0] #define REG_EXPOSURE CCI_REG24(0x02) //P1:0x02[7:0]--0x04[7:0] #define REG_ANALOG_GAIN CCI_REG8(0x24) #define REG_DIGITAL_GAIN CCI_REG16(0x21) //P1:0x21[2:0],0x22[7:0] #define REG_PAGE_FLAG CCI_REG8(0xfd) #define PAGE_0 0x0 #define PAGE_1 0x1 #define OV05C10_EXPOSURE_MARGIN 33 #define OV05C10_EXPOSURE_MIN 0x6 #define OV05C10_EXPOSURE_STEP 0x1 #define MAX_ANA_GAIN 0xf8 // 15.5x #define MIN_ANA_GAIN 0x10 // 1x #define OV05C10_ANAL_GAIN_STEP 0x01 #define OV05C10_ANAL_GAIN_DEFAULT 0x10 #define MAX_DIG_GAIN 0x100 // 4x #define MIN_DIG_GAIN 0x40 // 1x #define OV05C10_DGTL_GAIN_STEP 0x01 #define OV05C10_DGTL_GAIN_DEFAULT 0x40 #define OV05C10_VTS_MAX 0xffff #define OV05C10_PPL 3236 #define OV05C10_DEFAULT_VTS 1860 #define PIXEL_RATE 192000000ULL #define OV05C10_NATIVE_WIDTH 2888 #define OV05C10_NATIVE_HEIGHT 1808 #define to_ov05c10(_sd) container_of(_sd, struct ov05c10, sd) static const char *const ov05c10_test_pattern_menu[] = { "No Pattern", "Color Bar", }; static const s64 ov05c10_link_freq_menu_items[] = { 480000000ULL, }; struct ov05c10_reg_list { u32 num_of_regs; const struct cci_reg_sequence *regs; }; struct ov05c10_mode { u32 width; u32 height; u32 hts; u32 vts_def; u32 vts_min; u32 link_freq_index; u32 code; u32 fps; s32 lanes; /* Sensor register settings for this mode */ const struct ov05c10_reg_list reg_list; }; static const struct cci_reg_sequence ov05c10_soft_standby[] = { { REG_PAGE_FLAG, PAGE_0 }, { CCI_REG8(0x20), 0x5b }, { REG_PAGE_FLAG, PAGE_1 }, { CCI_REG8(0x33), 0x02 }, { CCI_REG8(0x01), 0x02 }, { CCI_REG8(0x01), 0x02 }, }; static const struct cci_reg_sequence ov05c10_streaming[] = { { REG_PAGE_FLAG, PAGE_1 }, { CCI_REG8(0x33), 0x03 }, { CCI_REG8(0x01), 0x02 }, { CCI_REG8(0x01), 0x02 }, { CCI_REG8(0x01), 0x02 }, { REG_PAGE_FLAG, PAGE_0 }, { CCI_REG8(0x20), 0x1f }, }; static const struct cci_reg_sequence ov05c10_test_enable[] = { { CCI_REG8(0xf3), 0x02 }, { CCI_REG8(0x12), 0x01 }, }; static const struct cci_reg_sequence ov05c10_test_disable[] = { { CCI_REG8(0xf3), 0x00 }, { CCI_REG8(0x12), 0x00 }, }; static const struct cci_reg_sequence ov05c10_trigger[] = { { REG_PAGE_FLAG, PAGE_1 }, { CCI_REG8(0x01), 0x01 }, }; //2800X1576_2lane_raw10_Mclk19.2M_pclk96M_30fps static const struct cci_reg_sequence mode_2800_1576_30fps[] = { { CCI_REG8(0xfd), 0x00 }, { CCI_REG8(0x20), 0x00 }, { CCI_REG8(0xfd), 0x00 }, { CCI_REG8(0x20), 0x0b }, { CCI_REG8(0xc1), 0x09 }, { CCI_REG8(0x21), 0x06 }, { CCI_REG8(0x11), 0x4e }, { CCI_REG8(0x12), 0x13 }, { CCI_REG8(0x14), 0x96 }, { CCI_REG8(0x1b), 0x64 }, { CCI_REG8(0x1d), 0x02 }, { CCI_REG8(0x1e), 0x40 }, { CCI_REG8(0xe7), 0x03 }, { CCI_REG8(0xe7), 0x00 }, { CCI_REG8(0x21), 0x00 }, { CCI_REG8(0xfd), 0x01 }, { CCI_REG8(0x03), 0x00 }, { CCI_REG8(0x04), 0x06 }, { CCI_REG8(0x06), 0x76 }, { CCI_REG8(0x07), 0x08 }, { CCI_REG8(0x1b), 0x01 }, { CCI_REG8(0x24), 0xff }, { CCI_REG8(0x42), 0x5d }, { CCI_REG8(0x43), 0x08 }, { CCI_REG8(0x44), 0x81 }, { CCI_REG8(0x46), 0x5f }, { CCI_REG8(0x48), 0x18 }, { CCI_REG8(0x49), 0x04 }, { CCI_REG8(0x5c), 0x18 }, { CCI_REG8(0x5e), 0x13 }, { CCI_REG8(0x70), 0x15 }, { CCI_REG8(0x77), 0x35 }, { CCI_REG8(0x79), 0xb2 }, { CCI_REG8(0x7b), 0x08 }, { CCI_REG8(0x7d), 0x08 }, { CCI_REG8(0x7e), 0x08 }, { CCI_REG8(0x7f), 0x08 }, { CCI_REG8(0x90), 0x37 }, { CCI_REG8(0x91), 0x05 }, { CCI_REG8(0x92), 0x18 }, { CCI_REG8(0x93), 0x27 }, { CCI_REG8(0x94), 0x05 }, { CCI_REG8(0x95), 0x38 }, { CCI_REG8(0x9b), 0x00 }, { CCI_REG8(0x9c), 0x06 }, { CCI_REG8(0x9d), 0x28 }, { CCI_REG8(0x9e), 0x06 }, { CCI_REG8(0xb2), 0x0f }, { CCI_REG8(0xb3), 0x29 }, { CCI_REG8(0xbf), 0x3c }, { CCI_REG8(0xc2), 0x04 }, { CCI_REG8(0xc4), 0x00 }, { CCI_REG8(0xca), 0x20 }, { CCI_REG8(0xcb), 0x20 }, { CCI_REG8(0xcc), 0x28 }, { CCI_REG8(0xcd), 0x28 }, { CCI_REG8(0xce), 0x20 }, { CCI_REG8(0xcf), 0x20 }, { CCI_REG8(0xd0), 0x2a }, { CCI_REG8(0xd1), 0x2a }, { CCI_REG8(0xfd), 0x0f }, { CCI_REG8(0x00), 0x00 }, { CCI_REG8(0x01), 0xa0 }, { CCI_REG8(0x02), 0x48 }, { CCI_REG8(0x07), 0x8e }, { CCI_REG8(0x08), 0x70 }, { CCI_REG8(0x09), 0x01 }, { CCI_REG8(0x0b), 0x40 }, { CCI_REG8(0x0d), 0x07 }, { CCI_REG8(0x11), 0x33 }, { CCI_REG8(0x12), 0x77 }, { CCI_REG8(0x13), 0x66 }, { CCI_REG8(0x14), 0x65 }, { CCI_REG8(0x15), 0x37 }, { CCI_REG8(0x16), 0xbf }, { CCI_REG8(0x17), 0xff }, { CCI_REG8(0x18), 0xff }, { CCI_REG8(0x19), 0x12 }, { CCI_REG8(0x1a), 0x10 }, { CCI_REG8(0x1c), 0x77 }, { CCI_REG8(0x1d), 0x77 }, { CCI_REG8(0x20), 0x0f }, { CCI_REG8(0x21), 0x0f }, { CCI_REG8(0x22), 0x0f }, { CCI_REG8(0x23), 0x0f }, { CCI_REG8(0x2b), 0x20 }, { CCI_REG8(0x2c), 0x20 }, { CCI_REG8(0x2d), 0x04 }, { CCI_REG8(0xfd), 0x03 }, { CCI_REG8(0x9d), 0x0f }, { CCI_REG8(0x9f), 0x40 }, { CCI_REG8(0xfd), 0x00 }, { CCI_REG8(0x20), 0x1b }, { CCI_REG8(0xfd), 0x04 }, { CCI_REG8(0x19), 0x60 }, { CCI_REG8(0xfd), 0x02 }, { CCI_REG8(0x75), 0x04 }, { CCI_REG8(0x7f), 0x06 }, { CCI_REG8(0x9a), 0x03 }, { CCI_REG8(0xa0), 0x00 }, { CCI_REG8(0xa1), 0x75 }, //;GRBG { CCI_REG8(0xa2), 0x06 }, //;vsize[11:8] { CCI_REG8(0xa3), 0x28 }, //;vsize[7:0] { CCI_REG8(0xa4), 0x00 }, { CCI_REG8(0xa5), 0x2e }, { CCI_REG8(0xa6), 0x0a }, //;hsize[11:8] { CCI_REG8(0xa7), 0xf0 }, //;hsize[7:0] { CCI_REG8(0xfd), 0x00 }, //;mipi size { CCI_REG8(0x8e), 0x0a }, //;hsize[11:8] { CCI_REG8(0x8f), 0xf0 }, //;hsize[7:0] { CCI_REG8(0x90), 0x06 }, //;vsize[11:8] { CCI_REG8(0x91), 0x28 }, //;vsize[7:0] { CCI_REG8(0xfd), 0x07 }, { CCI_REG8(0x42), 0x00 }, { CCI_REG8(0x43), 0x80 }, { CCI_REG8(0x44), 0x00 }, { CCI_REG8(0x45), 0x80 }, { CCI_REG8(0x46), 0x00 }, { CCI_REG8(0x47), 0x80 }, { CCI_REG8(0x48), 0x00 }, { CCI_REG8(0x49), 0x80 }, { CCI_REG8(0x00), 0xf7 }, { CCI_REG8(0xfd), 0x00 }, { CCI_REG8(0xe7), 0x03 }, { CCI_REG8(0xe7), 0x00 }, { CCI_REG8(0xfd), 0x00 }, { CCI_REG8(0x93), 0x18 }, { CCI_REG8(0x94), 0xff }, { CCI_REG8(0x95), 0xbd }, { CCI_REG8(0x96), 0x1a }, { CCI_REG8(0x98), 0x04 }, { CCI_REG8(0x99), 0x08 }, { CCI_REG8(0x9b), 0x10 }, { CCI_REG8(0x9c), 0x3f }, { CCI_REG8(0xa1), 0x05 }, { CCI_REG8(0xa4), 0x2f }, { CCI_REG8(0xc0), 0x0c }, { CCI_REG8(0xc1), 0x08 }, { CCI_REG8(0xc2), 0x00 }, { CCI_REG8(0xb6), 0x20 }, { CCI_REG8(0xbb), 0x80 }, { CCI_REG8(0xfd), 0x00 }, { CCI_REG8(0xa0), 0x01 }, //enable MIPI }; static const struct ov05c10_mode supported_modes[] = { { .width = 2800, .height = 1576, .hts = 758, .vts_def = 1978, .vts_min = 1978, .code = MEDIA_BUS_FMT_SGRBG10_1X10, .fps = 30, .reg_list = { .num_of_regs = ARRAY_SIZE(mode_2800_1576_30fps), .regs = mode_2800_1576_30fps, }, .link_freq_index = 0, }, }; struct ov05c10 { struct v4l2_subdev sd; struct media_pad pad; struct v4l2_ctrl_handler ctrl_handler; struct v4l2_ctrl *link_freq; struct v4l2_ctrl *exposure; struct v4l2_ctrl *analogue_gain; struct v4l2_ctrl *digital_gain; struct v4l2_ctrl *pixel_rate; struct v4l2_ctrl *vblank; struct v4l2_ctrl *hblank; struct regmap *regmap; unsigned long link_freq_bitmap; const struct ov05c10_mode *cur_mode; struct clk *img_clk; struct regulator *avdd; struct gpio_desc *reset; }; static int ov05c10_test_pattern(struct ov05c10 *ov05c10, u32 pattern) { int ret; ret = cci_write(ov05c10->regmap, REG_PAGE_FLAG, 0x04, NULL); if (ret) return ret; if (pattern) return cci_multi_reg_write(ov05c10->regmap, ov05c10_test_enable, ARRAY_SIZE(ov05c10_test_enable), NULL); else return cci_multi_reg_write(ov05c10->regmap, ov05c10_test_disable, ARRAY_SIZE(ov05c10_test_disable), NULL); } static int ov05c10_set_ctrl(struct v4l2_ctrl *ctrl) { struct ov05c10 *ov05c10 = container_of(ctrl->handler, struct ov05c10, ctrl_handler); struct i2c_client *client = v4l2_get_subdevdata(&ov05c10->sd); struct v4l2_subdev_state *state; const struct v4l2_mbus_framefmt *format; s64 exposure_max; u64 vts; int ret; state = v4l2_subdev_get_locked_active_state(&ov05c10->sd); format = v4l2_subdev_state_get_format(state, 0); /* Propagate change of current control to all related controls */ if (ctrl->id == V4L2_CID_VBLANK) { /* Update max exposure while meeting expected vblanking */ exposure_max = format->height + ctrl->val - OV05C10_EXPOSURE_MARGIN; __v4l2_ctrl_modify_range(ov05c10->exposure, ov05c10->exposure->minimum, exposure_max, ov05c10->exposure->step, ov05c10->cur_mode->height - OV05C10_EXPOSURE_MARGIN); /* * REG_TIMING_VTS is read-only and increased by writing to * REG_DUMMY_LINE in ov05c10. The calculation formula is * required VTS = dummyline + current VTS. * Here get the current VTS and calculate the required dummyline. */ cci_read(ov05c10->regmap, REG_TIMING_VTS, &vts, NULL); ctrl->val += format->height; ctrl->val = (ctrl->val > vts) ? ctrl->val - vts : 0; } /* V4L2 controls values will be applied only when power is already up */ if (!pm_runtime_get_if_in_use(&client->dev)) return 0; ret = cci_write(ov05c10->regmap, REG_PAGE_FLAG, PAGE_1, NULL); if (ret) { dev_err(&client->dev, "failed to set ctrl"); goto err; } switch (ctrl->id) { case V4L2_CID_ANALOGUE_GAIN: ret = cci_write(ov05c10->regmap, REG_ANALOG_GAIN, ctrl->val, NULL); break; case V4L2_CID_DIGITAL_GAIN: ret = cci_write(ov05c10->regmap, REG_DIGITAL_GAIN, ctrl->val, NULL); break; case V4L2_CID_EXPOSURE: ret = cci_write(ov05c10->regmap, REG_EXPOSURE, ctrl->val, NULL); break; case V4L2_CID_VBLANK: ret = cci_write(ov05c10->regmap, REG_DUMMY_LINE, ctrl->val, NULL); break; case V4L2_CID_TEST_PATTERN: ret = ov05c10_test_pattern(ov05c10, ctrl->val); break; default: ret = -EINVAL; break; } ret = cci_multi_reg_write(ov05c10->regmap, ov05c10_trigger, ARRAY_SIZE(ov05c10_trigger), NULL); if (ret) { dev_err(&client->dev, "failed to trigger write"); goto err; } err: pm_runtime_put(&client->dev); return ret; } static const struct v4l2_ctrl_ops ov05c10_ctrl_ops = { .s_ctrl = ov05c10_set_ctrl, }; static int ov05c10_init_controls(struct ov05c10 *ov05c10) { struct i2c_client *client = v4l2_get_subdevdata(&ov05c10->sd); struct v4l2_fwnode_device_properties props; struct v4l2_ctrl_handler *ctrl_hdlr; s64 exposure_max, vblank_max, vblank_min, vblank_def, hblank; int ret; ctrl_hdlr = &ov05c10->ctrl_handler; ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8); if (ret) return ret; ov05c10->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, &ov05c10_ctrl_ops, V4L2_CID_LINK_FREQ, ARRAY_SIZE(ov05c10_link_freq_menu_items) - 1, 0, ov05c10_link_freq_menu_items); if (ov05c10->link_freq) ov05c10->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY; vblank_min = ov05c10->cur_mode->vts_min - ov05c10->cur_mode->height; vblank_max = OV05C10_VTS_MAX - ov05c10->cur_mode->height; vblank_def = ov05c10->cur_mode->vts_def - ov05c10->cur_mode->height; ov05c10->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov05c10_ctrl_ops, V4L2_CID_VBLANK, vblank_min, vblank_max, 1, vblank_def); hblank = OV05C10_PPL - ov05c10->cur_mode->width; ov05c10->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov05c10_ctrl_ops, V4L2_CID_HBLANK, hblank, hblank, 1, hblank); if (ov05c10->hblank) ov05c10->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY; v4l2_ctrl_new_std(ctrl_hdlr, &ov05c10_ctrl_ops, V4L2_CID_ANALOGUE_GAIN, MIN_ANA_GAIN, MAX_ANA_GAIN, OV05C10_ANAL_GAIN_STEP, OV05C10_ANAL_GAIN_DEFAULT); v4l2_ctrl_new_std(ctrl_hdlr, &ov05c10_ctrl_ops, V4L2_CID_DIGITAL_GAIN, MIN_DIG_GAIN, MAX_DIG_GAIN, OV05C10_DGTL_GAIN_STEP, OV05C10_DGTL_GAIN_DEFAULT); exposure_max = ov05c10->cur_mode->vts_def - OV05C10_EXPOSURE_MARGIN; ov05c10->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &ov05c10_ctrl_ops, V4L2_CID_EXPOSURE, OV05C10_EXPOSURE_MIN, exposure_max, OV05C10_EXPOSURE_STEP, exposure_max); ov05c10->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov05c10_ctrl_ops, V4L2_CID_PIXEL_RATE, PIXEL_RATE, PIXEL_RATE, 1, PIXEL_RATE); v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &ov05c10_ctrl_ops, V4L2_CID_TEST_PATTERN, ARRAY_SIZE(ov05c10_test_pattern_menu) - 1, 0, 0, ov05c10_test_pattern_menu); if (ctrl_hdlr->error) return ctrl_hdlr->error; ret = v4l2_fwnode_device_parse(&client->dev, &props); if (ret) return ret; ret = v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &ov05c10_ctrl_ops, &props); if (ret) return ret; ov05c10->sd.ctrl_handler = ctrl_hdlr; return 0; } static int ov05c10_start_streaming(struct ov05c10 *ov05c10) { struct i2c_client *client = v4l2_get_subdevdata(&ov05c10->sd); const struct ov05c10_reg_list *reg_list; int ret; ret = pm_runtime_resume_and_get(&client->dev); if (ret < 0) return ret; reg_list = &ov05c10->cur_mode->reg_list; ret = cci_multi_reg_write(ov05c10->regmap, reg_list->regs, reg_list->num_of_regs, NULL); if (ret) { dev_err(&client->dev, "failed to set mode"); goto err_rpm_put; } ret = __v4l2_ctrl_handler_setup(ov05c10->sd.ctrl_handler); if (ret) goto err_rpm_put; ret = cci_multi_reg_write(ov05c10->regmap, ov05c10_streaming, ARRAY_SIZE(ov05c10_streaming), NULL); if (ret) { dev_err(&client->dev, "failed to start stream"); goto err_rpm_put; } return 0; err_rpm_put: pm_runtime_put(&client->dev); return ret; } static int ov05c10_stop_streaming(struct ov05c10 *ov05c10) { struct i2c_client *client = v4l2_get_subdevdata(&ov05c10->sd); int ret; ret = cci_multi_reg_write(ov05c10->regmap, ov05c10_soft_standby, ARRAY_SIZE(ov05c10_soft_standby), NULL); if (ret < 0) dev_err(&client->dev, "failed to stop stream"); pm_runtime_put(&client->dev); return ret; } static int ov05c10_s_stream(struct v4l2_subdev *sd, int enable) { struct ov05c10 *ov05c10 = to_ov05c10(sd); struct v4l2_subdev_state *state; int ret; state = v4l2_subdev_lock_and_get_active_state(sd); if (enable) ret = ov05c10_start_streaming(ov05c10); else ret = ov05c10_stop_streaming(ov05c10); v4l2_subdev_unlock_state(state); return ret; } static void ov05c10_update_pad_format(const struct ov05c10_mode *mode, struct v4l2_mbus_framefmt *fmt) { fmt->width = mode->width; fmt->height = mode->height; fmt->code = mode->code; fmt->field = V4L2_FIELD_NONE; } static int ov05c10_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { struct ov05c10 *ov05c10 = to_ov05c10(sd); struct i2c_client *client = v4l2_get_subdevdata(&ov05c10->sd); const struct ov05c10_mode *mode; s64 hblank, exposure_max; int ret; mode = v4l2_find_nearest_size(supported_modes, ARRAY_SIZE(supported_modes), width, height, fmt->format.width, fmt->format.height); ov05c10_update_pad_format(mode, &fmt->format); *v4l2_subdev_state_get_format(sd_state, fmt->pad) = fmt->format; if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) return 0; ov05c10->cur_mode = mode; ret = __v4l2_ctrl_s_ctrl(ov05c10->link_freq, mode->link_freq_index); if (ret) { dev_err(&client->dev, "Link Freq ctrl set failed\n"); return ret; } hblank = OV05C10_PPL - ov05c10->cur_mode->width; ret = __v4l2_ctrl_modify_range(ov05c10->hblank, hblank, hblank, 1, hblank); if (ret) { dev_err(&client->dev, "HB ctrl range update failed\n"); return ret; } /* Update limits and set FPS to default */ ret = __v4l2_ctrl_modify_range(ov05c10->vblank, mode->vts_min - mode->height, OV05C10_VTS_MAX - mode->height, 1, mode->vts_def - mode->height); if (ret) { dev_err(&client->dev, "VB ctrl range update failed\n"); return ret; } ret = __v4l2_ctrl_s_ctrl(ov05c10->vblank, mode->vts_def - mode->height); if (ret) { dev_err(&client->dev, "VB ctrl set failed\n"); return ret; } exposure_max = mode->vts_def - OV05C10_EXPOSURE_MARGIN; ret = __v4l2_ctrl_modify_range(ov05c10->exposure, OV05C10_EXPOSURE_MIN, exposure_max, OV05C10_EXPOSURE_STEP, exposure_max); if (ret) { dev_err(&client->dev, "exposure ctrl range update failed\n"); return ret; } return 0; } static int ov05c10_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *code) { if (code->index > 0) return -EINVAL; code->code = MEDIA_BUS_FMT_SGRBG10_1X10; return 0; } static int ov05c10_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_frame_size_enum *fse) { if (fse->index >= ARRAY_SIZE(supported_modes)) return -EINVAL; if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10) return -EINVAL; fse->min_width = supported_modes[fse->index].width; fse->max_width = fse->min_width; fse->min_height = supported_modes[fse->index].height; fse->max_height = fse->min_height; return 0; } static int ov05c10_get_selection(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_selection *sel) { switch (sel->target) { case V4L2_SEL_TGT_CROP_DEFAULT: case V4L2_SEL_TGT_CROP: sel->r = *v4l2_subdev_state_get_crop(state, 0); break; case V4L2_SEL_TGT_CROP_BOUNDS: sel->r.top = 0; sel->r.left = 0; sel->r.width = OV05C10_NATIVE_WIDTH; sel->r.height = OV05C10_NATIVE_HEIGHT; break; default: return -EINVAL; } return 0; } static int ov05c10_init_state(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state) { struct v4l2_subdev_format fmt = { .which = V4L2_SUBDEV_FORMAT_TRY, .pad = 0, .format = { .code = MEDIA_BUS_FMT_SGRBG10_1X10, .width = 2800, .height = 1576, }, }; ov05c10_set_format(sd, sd_state, &fmt); return 0; } static const struct v4l2_subdev_video_ops ov05c10_video_ops = { .s_stream = ov05c10_s_stream, }; static const struct v4l2_subdev_pad_ops ov05c10_pad_ops = { .set_fmt = ov05c10_set_format, .get_fmt = v4l2_subdev_get_fmt, .enum_mbus_code = ov05c10_enum_mbus_code, .enum_frame_size = ov05c10_enum_frame_size, .get_selection = ov05c10_get_selection, }; static const struct v4l2_subdev_core_ops ov05c10_core_ops = { .subscribe_event = v4l2_ctrl_subdev_subscribe_event, .unsubscribe_event = v4l2_event_subdev_unsubscribe, }; static const struct v4l2_subdev_ops ov05c10_subdev_ops = { .core = &ov05c10_core_ops, .video = &ov05c10_video_ops, .pad = &ov05c10_pad_ops, }; static const struct media_entity_operations ov05c10_subdev_entity_ops = { .link_validate = v4l2_subdev_link_validate, }; static const struct v4l2_subdev_internal_ops ov05c10_internal_ops = { .init_state = ov05c10_init_state, }; static int ov05c10_parse_fwnode(struct ov05c10 *ov05c10, struct device *dev) { struct fwnode_handle *endpoint; struct fwnode_handle *fwnode = dev_fwnode(dev); struct v4l2_fwnode_endpoint bus_cfg = { .bus_type = V4L2_MBUS_CSI2_DPHY, }; int ret; unsigned int i, j; if (!fwnode) return -ENXIO; endpoint = fwnode_graph_get_next_endpoint(fwnode, NULL); if (!endpoint) { dev_err(dev, "endpoint node not found\n"); return -EPROBE_DEFER; } ret = v4l2_fwnode_endpoint_alloc_parse(endpoint, &bus_cfg); fwnode_handle_put(endpoint); if (ret) { dev_err(dev, "parsing endpoint node failed\n"); goto out_err; } if (!bus_cfg.nr_of_link_frequencies) { dev_err(dev, "no link frequencies defined"); ret = -EINVAL; goto out_err; } for (i = 0; i < ARRAY_SIZE(ov05c10_link_freq_menu_items); i++) { for (j = 0; j < bus_cfg.nr_of_link_frequencies; j++) { if (ov05c10_link_freq_menu_items[i] == bus_cfg.link_frequencies[j]) break; } if (j == bus_cfg.nr_of_link_frequencies) { dev_err(dev, "no link frequency %lld supported", ov05c10_link_freq_menu_items[i]); ret = -EINVAL; goto out_err; } } out_err: v4l2_fwnode_endpoint_free(&bus_cfg); return ret; } static int ov05c10_identify_module(struct ov05c10 *ov05c10) { struct i2c_client *client = v4l2_get_subdevdata(&ov05c10->sd); int ret; u64 val_h, val_l; u32 val; ret = cci_write(ov05c10->regmap, REG_PAGE_FLAG, PAGE_0, NULL); if (ret) { dev_err(&client->dev, "chip page write err"); return ret; } ret = cci_read(ov05c10->regmap, OV05C10_REG_CHIP_ID_H, &val_h, NULL); if (ret) return ret; ret = cci_read(ov05c10->regmap, OV05C10_REG_CHIP_ID_L, &val_l, NULL); if (ret) return ret; val = ((val_h << 16) | val_l); if (val != OV05C10_CHIP_ID) { dev_err(&client->dev, "chip id mismatch: %x!=%u", OV05C10_CHIP_ID, val); return -ENXIO; } return 0; } /* This function tries to get power control resources */ static int ov05c10_get_pm_resources(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct ov05c10 *ov05c10 = to_ov05c10(sd); int ret; ov05c10->img_clk = devm_clk_get_optional(dev, NULL); if (IS_ERR(ov05c10->img_clk)) return dev_err_probe(dev, PTR_ERR(ov05c10->img_clk), "failed to get imaging clock\n"); ov05c10->avdd = devm_regulator_get_optional(dev, "avdd"); if (IS_ERR(ov05c10->avdd)) { ret = PTR_ERR(ov05c10->avdd); ov05c10->avdd = NULL; if (ret != -ENODEV) return dev_err_probe(dev, ret, "failed to get avdd regulator\n"); } ov05c10->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(ov05c10->reset)) return dev_err_probe(dev, PTR_ERR(ov05c10->reset), "failed to get reset gpio\n"); return 0; } static int ov05c10_power_off(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct ov05c10 *ov05c10 = to_ov05c10(sd); if (ov05c10->reset) gpiod_set_value_cansleep(ov05c10->reset, 1); if (ov05c10->avdd) regulator_disable(ov05c10->avdd); clk_disable_unprepare(ov05c10->img_clk); return 0; } static int ov05c10_power_on(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct ov05c10 *ov05c10 = to_ov05c10(sd); int ret; ret = clk_prepare_enable(ov05c10->img_clk); if (ret < 0) { dev_err(dev, "failed to enable imaging clock: %d", ret); return ret; } if (ov05c10->avdd) { ret = regulator_enable(ov05c10->avdd); if (ret < 0) { dev_err(dev, "failed to enable avdd: %d", ret); clk_disable_unprepare(ov05c10->img_clk); return ret; } } if (ov05c10->reset) { gpiod_set_value_cansleep(ov05c10->reset, 0); /* 5ms to wait ready after XSHUTDN assert */ usleep_range(5000, 5500); } return 0; } static const struct dev_pm_ops ov05c10_pm_ops = { SET_RUNTIME_PM_OPS(ov05c10_power_off, ov05c10_power_on, NULL) }; static int ov05c10_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct ov05c10 *ov05c10; bool full_power; int ret; ov05c10 = devm_kzalloc(&client->dev, sizeof(*ov05c10), GFP_KERNEL); if (!ov05c10) return -ENOMEM; ret = ov05c10_parse_fwnode(ov05c10, dev); if (ret) return ret; ov05c10->regmap = devm_cci_regmap_init_i2c(client, 8); if (IS_ERR(ov05c10->regmap)) return dev_err_probe(dev, PTR_ERR(ov05c10->regmap), "failed to init CCI\n"); v4l2_i2c_subdev_init(&ov05c10->sd, client, &ov05c10_subdev_ops); ret = ov05c10_get_pm_resources(dev); if (ret) return ret; full_power = acpi_dev_state_d0(dev); if (full_power) { ret = ov05c10_power_on(dev); if (ret) { dev_err(&client->dev, "failed to power on\n"); return ret; } /* Check module identity */ ret = ov05c10_identify_module(ov05c10); if (ret) { dev_err(dev, "failed to find sensor: %d\n", ret); goto error_power_off; } } ov05c10->cur_mode = &supported_modes[0]; ret = ov05c10_init_controls(ov05c10); if (ret) { dev_err(&client->dev, "failed to init controls: %d", ret); goto probe_error_v4l2_ctrl_handler_free; } ov05c10->sd.internal_ops = &ov05c10_internal_ops; ov05c10->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS; ov05c10->sd.entity.ops = &ov05c10_subdev_entity_ops; ov05c10->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; ov05c10->pad.flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_pads_init(&ov05c10->sd.entity, 1, &ov05c10->pad); if (ret) { dev_err(&client->dev, "failed to init entity pads: %d", ret); goto probe_error_v4l2_ctrl_handler_free; } ov05c10->sd.state_lock = ov05c10->ctrl_handler.lock; ret = v4l2_subdev_init_finalize(&ov05c10->sd); if (ret < 0) { dev_err(dev, "v4l2 subdev init error: %d\n", ret); goto probe_error_media_entity_cleanup; } /* * Device is already turned on by i2c-core with ACPI domain PM. * Enable runtime PM and turn off the device. */ if (full_power) pm_runtime_set_active(&client->dev); pm_runtime_enable(&client->dev); pm_runtime_idle(&client->dev); ret = v4l2_async_register_subdev_sensor(&ov05c10->sd); if (ret < 0) { dev_err(&client->dev, "failed to register V4L2 subdev: %d", ret); goto probe_error_rpm; } return 0; probe_error_rpm: pm_runtime_disable(&client->dev); v4l2_subdev_cleanup(&ov05c10->sd); probe_error_media_entity_cleanup: media_entity_cleanup(&ov05c10->sd.entity); probe_error_v4l2_ctrl_handler_free: v4l2_ctrl_handler_free(ov05c10->sd.ctrl_handler); error_power_off: ov05c10_power_off(&client->dev); return ret; } static void ov05c10_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct ov05c10 *ov05c10 = to_ov05c10(sd); v4l2_async_unregister_subdev(&ov05c10->sd); v4l2_subdev_cleanup(sd); media_entity_cleanup(&ov05c10->sd.entity); v4l2_ctrl_handler_free(&ov05c10->ctrl_handler); pm_runtime_disable(&client->dev); if (!pm_runtime_status_suspended(&client->dev)) ov05c10_power_off(&client->dev); pm_runtime_set_suspended(&client->dev); } static const struct acpi_device_id ov05c10_acpi_ids[] = { { "OVTI05C1" }, {} }; MODULE_DEVICE_TABLE(acpi, ov05c10_acpi_ids); static struct i2c_driver ov05c10_i2c_driver = { .driver = { .name = "ov05c10", .pm = pm_ptr(&ov05c10_pm_ops), .acpi_match_table = ACPI_PTR(ov05c10_acpi_ids), }, .probe = ov05c10_probe, .remove = ov05c10_remove, }; module_i2c_driver(ov05c10_i2c_driver); MODULE_DESCRIPTION("OmniVision ov05c10 camera driver"); MODULE_AUTHOR("Dongcheng Yan "); MODULE_LICENSE("GPL"); ipu6-drivers-0~git202411190607.0ad49882/drivers/media/i2c/ov08a10.c000066400000000000000000000703111471702545500233670ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2022-2023 Intel Corporation. #include #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 12, 0) #include #else #include #endif #include #include #include #include #include #include #include #include #include #include #define OV08A10_REG_VALUE_08BIT 1 #define OV08A10_REG_VALUE_16BIT 2 #define OV08A10_REG_VALUE_24BIT 3 #define OV08A10_LINK_FREQ_500MHZ 500000000ULL //1008Mbps #define OV08A10_SCLK 120000000LL #define OV08A10_MCLK 19200000 #define OV08A10_DATA_LANES 4 #define OV08A10_RGB_DEPTH 10 #define OV08A10_REG_CHIP_ID 0x300a #define OV08A10_CHIP_ID 0x560841 //ToDo #define OV08A10_REG_MODE_SELECT 0x0100 #define OV08A10_MODE_STANDBY 0x00 #define OV08A10_MODE_STREAMING 0x01 /* vertical-timings from sensor */ #define OV08A10_REG_VTS 0x380e #define OV08A10_VTS_30FPS 0x171a #define OV08A10_VTS_30FPS_MIN 0x171a #define OV08A10_VTS_MAX 0x7fff /* horizontal-timings from sensor */ #define OV08A10_REG_HTS 0x380c /* Exposure controls from sensor */ #define OV08A10_REG_EXPOSURE 0x3501 #define OV08A10_EXPOSURE_MIN 8 #define OV08A10_EXPOSURE_MAX_MARGIN 28 #define OV08A10_EXPOSURE_STEP 1 /* Analog gain controls from sensor */ #define OV08A10_REG_ANALOG_GAIN 0x3508 #define OV08A10_ANAL_GAIN_MIN 128 #define OV08A10_ANAL_GAIN_MAX 2047 #define OV08A10_ANAL_GAIN_STEP 1 /* Digital gain controls from sensor */ #define OV08A10_REG_DIG_GAIN 0x350a #define OV08A10_DGTL_GAIN_MIN 1024 #define OV08A10_DGTL_GAIN_MAX 16383 #define OV08A10_DGTL_GAIN_STEP 1 #define OV08A10_DGTL_GAIN_DEFAULT 1024 /* Test Pattern Control */ #define OV08A10_REG_TEST_PATTERN 0x5081 #define OV08A10_TEST_PATTERN_ENABLE BIT(0) #define OV08A10_TEST_PATTERN_BAR_SHIFT 4 #define to_ov08a10(_sd) container_of(_sd, struct ov08a10, sd) enum { OV08A10_LINK_FREQ_500MHZ_INDEX, }; struct ov08a10_reg { u16 address; u8 val; }; struct ov08a10_reg_list { u32 num_of_regs; const struct ov08a10_reg *regs; }; struct ov08a10_link_freq_config { const struct ov08a10_reg_list reg_list; }; struct ov08a10_mode { /* Frame width in pixels */ u32 width; /* Frame height in pixels */ u32 height; /* Horizontal timining size */ u32 hts; /* Default vertical timining size */ u32 vts_def; /* Min vertical timining size */ u32 vts_min; /* Link frequency needed for this resolution */ u32 link_freq_index; /* Sensor register settings for this resolution */ const struct ov08a10_reg_list reg_list; }; static const struct ov08a10_reg ov08a10_global_setting[] = { {0x0103, 0x01}, //sl 1 1 ; delay 1ms {0x0100, 0x00}, {0x0102, 0x01}, {0x0304, 0x01},//01;02;01 ;MCLK=19.2Mhz , 800 Mbps {0x0305, 0xe0},//f4;76;f8 {0x0306, 0x01}, {0x0307, 0x00}, {0x0323, 0x04}, {0x0324, 0x01}, {0x0325, 0x90},//;40 ;MCLK=19.2Mhz {0x4837, 0x15},//14;10;14 ;MCLK=19.2Mhz , 800 Mbps {0x3009, 0x06}, {0x3012, 0x41}, {0x301e, 0x98}, {0x3026, 0x10}, {0x3027, 0x08}, {0x3106, 0x00}, {0x3216, 0x01}, {0x3217, 0x00}, {0x3218, 0x00}, {0x3219, 0x55}, {0x3400, 0x00}, {0x3408, 0x02}, {0x340c, 0x20}, {0x340d, 0x00}, {0x3410, 0x00}, {0x3412, 0x00}, {0x3413, 0x00}, {0x3414, 0x00}, {0x3415, 0x00}, {0x3501, 0x16}, {0x3502, 0xfa}, {0x3504, 0x08}, {0x3508, 0x04}, {0x3509, 0x00}, {0x353c, 0x04}, {0x353d, 0x00}, {0x3600, 0x20}, {0x3608, 0x87}, {0x3609, 0xe0}, {0x360a, 0x66}, {0x360c, 0x20}, {0x361a, 0x80}, {0x361b, 0xd0}, {0x361c, 0x11}, {0x361d, 0x63}, {0x361e, 0x76}, {0x3620, 0x50}, {0x3621, 0x0a}, {0x3622, 0x8a}, {0x3625, 0x88}, {0x3626, 0x49}, {0x362a, 0x80}, {0x3632, 0x00}, {0x3633, 0x10}, {0x3634, 0x10}, {0x3635, 0x10}, {0x3636, 0x0e}, {0x3659, 0x11}, {0x365a, 0x23}, {0x365b, 0x38}, {0x365c, 0x80}, {0x3661, 0x0c}, {0x3663, 0x40}, {0x3665, 0x12}, {0x3668, 0xf0}, {0x3669, 0x0a}, {0x366a, 0x10}, {0x366b, 0x43}, {0x366c, 0x02}, {0x3674, 0x00}, {0x3706, 0x1b}, {0x3709, 0x25}, {0x370b, 0x3f}, {0x370c, 0x03}, {0x3713, 0x02}, {0x3714, 0x63}, {0x3726, 0x20}, {0x373b, 0x06}, {0x373d, 0x0a}, {0x3752, 0x00}, {0x3753, 0x00}, {0x3754, 0xee}, {0x3767, 0x08}, {0x3768, 0x0e}, {0x3769, 0x02}, {0x376a, 0x00}, {0x376b, 0x00}, {0x37d9, 0x08}, {0x37dc, 0x00}, {0x3800, 0x00}, {0x3801, 0x00}, {0x3802, 0x00}, {0x3803, 0x08}, {0x3804, 0x0c}, {0x3805, 0xdf}, {0x3806, 0x09}, {0x3807, 0xa7}, {0x3808, 0x0c}, {0x3809, 0xc0}, {0x380a, 0x09}, {0x380b, 0x90}, {0x380c, 0x03}, {0x380d, 0x86}, {0x380e, 0x17}, {0x380f, 0x1a}, //@@ GRBG {0x3810, 0x00}, {0x3811, 0x11}, {0x3812, 0x00}, {0x3813, 0x09}, {0x3814, 0x11}, {0x3815, 0x11}, {0x3820, 0x80}, {0x3821, 0x04}, {0x3823, 0x00}, {0x3824, 0x00}, {0x3825, 0x00}, {0x3826, 0x00}, {0x3827, 0x00}, {0x382b, 0x08}, {0x3834, 0xf4}, {0x3836, 0x14}, {0x3837, 0x04}, {0x3898, 0x00}, {0x38a0, 0x02}, {0x38a1, 0x02}, {0x38a2, 0x02}, {0x38a3, 0x04}, {0x38c3, 0x00}, {0x38c4, 0x00}, {0x38c5, 0x00}, {0x38c6, 0x00}, {0x38c7, 0x00}, {0x38c8, 0x00}, {0x3d8c, 0x60}, {0x3d8d, 0x30}, {0x3f00, 0x8b}, {0x4000, 0xf7}, {0x4001, 0x60}, {0x4002, 0x00}, {0x4003, 0x40}, {0x4008, 0x02}, {0x4009, 0x11}, {0x400a, 0x01}, {0x400b, 0x00}, {0x4020, 0x00}, {0x4021, 0x00}, {0x4022, 0x00}, {0x4023, 0x00}, {0x4024, 0x00}, {0x4025, 0x00}, {0x4026, 0x00}, {0x4027, 0x00}, {0x4030, 0x00}, {0x4031, 0x00}, {0x4032, 0x00}, {0x4033, 0x00}, {0x4034, 0x00}, {0x4035, 0x00}, {0x4036, 0x00}, {0x4037, 0x00}, {0x4040, 0x00}, {0x4041, 0x07}, {0x4201, 0x00}, {0x4202, 0x00}, {0x4204, 0x09}, {0x4205, 0x00}, {0x4300, 0xff}, {0x4301, 0x00}, {0x4302, 0x0f}, {0x4500, 0x08}, {0x4501, 0x00}, {0x450b, 0x00}, {0x4640, 0x01}, {0x4641, 0x04}, {0x4645, 0x03}, {0x4800, 0x00}, {0x4803, 0x18}, {0x4809, 0x2b}, {0x480e, 0x02}, {0x4813, 0x90}, {0x481b, 0x3c}, {0x4847, 0x01}, {0x4856, 0x58}, {0x4888, 0x90}, {0x4901, 0x00}, {0x4902, 0x00}, {0x4904, 0x09}, {0x4905, 0x00}, {0x5000, 0x89}, {0x5001, 0x5a}, {0x5002, 0x51}, {0x5005, 0xd0}, {0x5007, 0xa0}, {0x500a, 0x02}, {0x500b, 0x02}, {0x500c, 0x0a}, {0x500d, 0x0a}, {0x500e, 0x02}, {0x500f, 0x06}, {0x5010, 0x0a}, {0x5011, 0x0e}, {0x5013, 0x00}, {0x5015, 0x00}, {0x5017, 0x10}, {0x5019, 0x00}, {0x501b, 0xc0}, {0x501d, 0xa0}, {0x501e, 0x00}, {0x501f, 0x40}, {0x5058, 0x00}, {0x5081, 0x00}, {0x5180, 0x00}, {0x5181, 0x3c}, {0x5182, 0x01}, {0x5183, 0xfc}, {0x5200, 0x4f}, {0x5203, 0x07}, {0x5208, 0xff}, {0x520a, 0x3f}, {0x520b, 0xc0}, {0x520c, 0x05}, {0x520d, 0xc8}, {0x520e, 0x3f}, {0x520f, 0x0f}, {0x5210, 0x0a}, {0x5218, 0x02}, {0x5219, 0x01}, {0x521b, 0x02}, {0x521c, 0x01}, {0x58cb, 0x03}, }; static const struct ov08a10_reg mode_3264x2448_regs[] = { {0x3800, 0x00}, {0x3801, 0x00}, {0x3802, 0x00}, {0x3803, 0x08}, {0x3804, 0x0c}, {0x3805, 0xdf}, {0x3806, 0x09}, {0x3807, 0xa7}, {0x3808, 0x0c}, {0x3809, 0xc0}, {0x380a, 0x09}, {0x380b, 0x90}, {0x380c, 0x03}, {0x380d, 0x86}, {0x380e, 0x17}, {0x380f, 0x1a}, //@@ GRBG {0x3810, 0x00}, {0x3811, 0x11}, {0x3812, 0x00}, {0x3813, 0x09}, {0x3814, 0x11}, {0x3815, 0x11}, }; static const char * const ov08a10_test_pattern_menu[] = { "Disabled", "Standard Color Bar", "Top-Bottom Darker Color Bar", "Right-Left Darker Color Bar", "Bottom-Top Darker Color Bar" }; static const s64 link_freq_menu_items[] = { OV08A10_LINK_FREQ_500MHZ, }; static const struct ov08a10_link_freq_config link_freq_configs[] = { [OV08A10_LINK_FREQ_500MHZ_INDEX] = { .reg_list = { .num_of_regs = ARRAY_SIZE(ov08a10_global_setting), .regs = ov08a10_global_setting, } } }; static const struct ov08a10_mode supported_modes[] = { { .width = 3264, .height = 2448, .hts = 902, //0x0386 .vts_def = OV08A10_VTS_30FPS, .vts_min = OV08A10_VTS_30FPS_MIN, .reg_list = { .num_of_regs = ARRAY_SIZE(mode_3264x2448_regs), .regs = mode_3264x2448_regs, }, .link_freq_index = OV08A10_LINK_FREQ_500MHZ_INDEX, }, }; struct ov08a10 { struct v4l2_subdev sd; struct media_pad pad; struct v4l2_ctrl_handler ctrl_handler; /* V4L2 Controls */ struct v4l2_ctrl *link_freq; struct v4l2_ctrl *pixel_rate; struct v4l2_ctrl *vblank; struct v4l2_ctrl *hblank; struct v4l2_ctrl *exposure; /* Current mode */ const struct ov08a10_mode *cur_mode; /* To serialize asynchronus callbacks */ struct mutex mutex; /* Streaming on/off */ bool streaming; /* i2c client */ struct i2c_client *client; /* Power management */ struct clk *imgclk; struct gpio_desc *reset_gpio; struct regulator *avdd; bool identified; }; static u64 to_pixel_rate(u32 f_index) { u64 pixel_rate = link_freq_menu_items[f_index] * 2 * OV08A10_DATA_LANES; do_div(pixel_rate, OV08A10_RGB_DEPTH); return pixel_rate; } static u64 to_pixels_per_line(u32 hts, u32 f_index) { u64 ppl = hts * to_pixel_rate(f_index); do_div(ppl, OV08A10_SCLK); return ppl; } static int ov08a10_read_reg(struct ov08a10 *ov08a10, u16 reg, u16 len, u32 *val) { struct i2c_client *client = v4l2_get_subdevdata(&ov08a10->sd); struct i2c_msg msgs[2]; u8 addr_buf[2]; u8 data_buf[4] = {0}; int ret; if (len > 4) return -EINVAL; put_unaligned_be16(reg, addr_buf); msgs[0].addr = client->addr; msgs[0].flags = 0; msgs[0].len = sizeof(addr_buf); msgs[0].buf = addr_buf; msgs[1].addr = client->addr; msgs[1].flags = I2C_M_RD; msgs[1].len = len; msgs[1].buf = &data_buf[4 - len]; ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); if (ret != ARRAY_SIZE(msgs)) return -EIO; *val = get_unaligned_be32(data_buf); return 0; } static int ov08a10_write_reg(struct ov08a10 *ov08a10, u16 reg, u16 len, u32 val) { struct i2c_client *client = v4l2_get_subdevdata(&ov08a10->sd); u8 buf[6]; if (len > 4) return -EINVAL; put_unaligned_be16(reg, buf); put_unaligned_be32(val << 8 * (4 - len), buf + 2); if (i2c_master_send(client, buf, len + 2) != len + 2) return -EIO; return 0; } static int ov08a10_write_reg_list(struct ov08a10 *ov08a10, const struct ov08a10_reg_list *r_list) { struct i2c_client *client = v4l2_get_subdevdata(&ov08a10->sd); unsigned int i; int ret; for (i = 0; i < r_list->num_of_regs; i++) { ret = ov08a10_write_reg(ov08a10, r_list->regs[i].address, 1, r_list->regs[i].val); if (ret) { dev_err_ratelimited(&client->dev, "failed to write reg 0x%4.4x. error = %d", r_list->regs[i].address, ret); return ret; } } return 0; } static int ov08a10_test_pattern(struct ov08a10 *ov08a10, u32 pattern) { if (pattern) pattern = (pattern - 1) << OV08A10_TEST_PATTERN_BAR_SHIFT | OV08A10_TEST_PATTERN_ENABLE; return ov08a10_write_reg(ov08a10, OV08A10_REG_TEST_PATTERN, OV08A10_REG_VALUE_08BIT, pattern); } static int ov08a10_set_ctrl(struct v4l2_ctrl *ctrl) { struct ov08a10 *ov08a10 = container_of(ctrl->handler, struct ov08a10, ctrl_handler); struct i2c_client *client = v4l2_get_subdevdata(&ov08a10->sd); s64 exposure_max; int ret = 0; /* Propagate change of current control to all related controls */ if (ctrl->id == V4L2_CID_VBLANK) { /* Update max exposure while meeting expected vblanking */ exposure_max = ov08a10->cur_mode->height + ctrl->val - OV08A10_EXPOSURE_MAX_MARGIN; __v4l2_ctrl_modify_range(ov08a10->exposure, ov08a10->exposure->minimum, exposure_max, ov08a10->exposure->step, exposure_max); } /* V4L2 controls values will be applied only when power is already up */ if (!pm_runtime_get_if_in_use(&client->dev)) return 0; switch (ctrl->id) { case V4L2_CID_ANALOGUE_GAIN: ret = ov08a10_write_reg(ov08a10, OV08A10_REG_ANALOG_GAIN, OV08A10_REG_VALUE_16BIT, ctrl->val << 1); break; case V4L2_CID_DIGITAL_GAIN: ret = ov08a10_write_reg(ov08a10, OV08A10_REG_DIG_GAIN, OV08A10_REG_VALUE_24BIT, ctrl->val << 6); break; case V4L2_CID_EXPOSURE: ret = ov08a10_write_reg(ov08a10, OV08A10_REG_EXPOSURE, OV08A10_REG_VALUE_16BIT, ctrl->val); break; case V4L2_CID_VBLANK: ret = ov08a10_write_reg(ov08a10, OV08A10_REG_VTS, OV08A10_REG_VALUE_16BIT, ov08a10->cur_mode->height + ctrl->val); break; case V4L2_CID_TEST_PATTERN: ret = ov08a10_test_pattern(ov08a10, ctrl->val); break; default: ret = -EINVAL; break; } pm_runtime_put(&client->dev); return ret; } static const struct v4l2_ctrl_ops ov08a10_ctrl_ops = { .s_ctrl = ov08a10_set_ctrl, }; static int ov08a10_init_controls(struct ov08a10 *ov08a10) { struct v4l2_ctrl_handler *ctrl_hdlr; const struct ov08a10_mode *cur_mode; s64 exposure_max, h_blank; int ret = 0; int size; ctrl_hdlr = &ov08a10->ctrl_handler; ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8); if (ret) return ret; ctrl_hdlr->lock = &ov08a10->mutex; cur_mode = ov08a10->cur_mode; size = ARRAY_SIZE(link_freq_menu_items); ov08a10->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, &ov08a10_ctrl_ops, V4L2_CID_LINK_FREQ, ARRAY_SIZE (link_freq_menu_items) - 1, 0, link_freq_menu_items); if (ov08a10->link_freq) ov08a10->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY; ov08a10->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov08a10_ctrl_ops, V4L2_CID_PIXEL_RATE, 0, to_pixel_rate (OV08A10_LINK_FREQ_500MHZ_INDEX), 1, to_pixel_rate (OV08A10_LINK_FREQ_500MHZ_INDEX)); ov08a10->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov08a10_ctrl_ops, V4L2_CID_VBLANK, ov08a10->cur_mode->vts_min, OV08A10_VTS_MAX, 1, ov08a10->cur_mode->vts_def); h_blank = to_pixels_per_line(ov08a10->cur_mode->hts, ov08a10->cur_mode->link_freq_index) - ov08a10->cur_mode->width; ov08a10->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov08a10_ctrl_ops, V4L2_CID_HBLANK, h_blank, h_blank, 1, h_blank); if (ov08a10->hblank) ov08a10->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY; v4l2_ctrl_new_std(ctrl_hdlr, &ov08a10_ctrl_ops, V4L2_CID_ANALOGUE_GAIN, OV08A10_ANAL_GAIN_MIN, OV08A10_ANAL_GAIN_MAX, OV08A10_ANAL_GAIN_STEP, OV08A10_ANAL_GAIN_MIN); v4l2_ctrl_new_std(ctrl_hdlr, &ov08a10_ctrl_ops, V4L2_CID_DIGITAL_GAIN, OV08A10_DGTL_GAIN_MIN, OV08A10_DGTL_GAIN_MAX, OV08A10_DGTL_GAIN_STEP, OV08A10_DGTL_GAIN_DEFAULT); exposure_max = (ov08a10->cur_mode->vts_def - OV08A10_EXPOSURE_MAX_MARGIN); ov08a10->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &ov08a10_ctrl_ops, V4L2_CID_EXPOSURE, OV08A10_EXPOSURE_MIN, exposure_max, OV08A10_EXPOSURE_STEP, exposure_max); v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &ov08a10_ctrl_ops, V4L2_CID_TEST_PATTERN, ARRAY_SIZE(ov08a10_test_pattern_menu) - 1, 0, 0, ov08a10_test_pattern_menu); if (ctrl_hdlr->error) return ctrl_hdlr->error; ov08a10->sd.ctrl_handler = ctrl_hdlr; return 0; } static void ov08a10_update_pad_format(const struct ov08a10_mode *mode, struct v4l2_mbus_framefmt *fmt) { fmt->width = mode->width; fmt->height = mode->height; fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10; fmt->field = V4L2_FIELD_NONE; } static int ov08a10_power_off(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct ov08a10 *ov08a10 = to_ov08a10(sd); gpiod_set_value_cansleep(ov08a10->reset_gpio, 1); if (ov08a10->avdd) regulator_disable(ov08a10->avdd); clk_disable_unprepare(ov08a10->imgclk); msleep(50); return 0; } static int ov08a10_power_on(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct ov08a10 *ov08a10 = to_ov08a10(sd); int ret; ret = clk_prepare_enable(ov08a10->imgclk); if (ret < 0) { dev_err(dev, "failed to enable imgclk: %d", ret); return ret; } if (ov08a10->avdd) ret = regulator_enable(ov08a10->avdd); if (ret < 0) { dev_err(dev, "failed to enable avdd: %d", ret); return ret; } gpiod_set_value_cansleep(ov08a10->reset_gpio, 0); msleep(50); return 0; } static int ov08a10_start_streaming(struct ov08a10 *ov08a10) { struct i2c_client *client = v4l2_get_subdevdata(&ov08a10->sd); const struct ov08a10_reg_list *reg_list; int link_freq_index, ret; link_freq_index = ov08a10->cur_mode->link_freq_index; reg_list = &link_freq_configs[link_freq_index].reg_list; ret = ov08a10_write_reg_list(ov08a10, reg_list); if (ret) { dev_err(&client->dev, "failed to set plls"); return ret; } reg_list = &ov08a10->cur_mode->reg_list; ret = ov08a10_write_reg_list(ov08a10, reg_list); if (ret) { dev_err(&client->dev, "failed to set mode"); return ret; } ret = __v4l2_ctrl_handler_setup(ov08a10->sd.ctrl_handler); if (ret) return ret; ret = ov08a10_write_reg(ov08a10, OV08A10_REG_MODE_SELECT, OV08A10_REG_VALUE_08BIT, OV08A10_MODE_STREAMING); if (ret) { dev_err(&client->dev, "failed to set stream"); return ret; } return 0; } static void ov08a10_stop_streaming(struct ov08a10 *ov08a10) { struct i2c_client *client = v4l2_get_subdevdata(&ov08a10->sd); if (ov08a10_write_reg(ov08a10, OV08A10_REG_MODE_SELECT, OV08A10_REG_VALUE_08BIT, OV08A10_MODE_STANDBY)) dev_err(&client->dev, "failed to set stream"); } static int ov08a10_set_stream(struct v4l2_subdev *sd, int enable) { struct ov08a10 *ov08a10 = to_ov08a10(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); int ret = 0; if (ov08a10->streaming == enable) return 0; mutex_lock(&ov08a10->mutex); if (enable) { ret = pm_runtime_get_sync(&client->dev); if (ret < 0) { pm_runtime_put_noidle(&client->dev); mutex_unlock(&ov08a10->mutex); return ret; } ret = ov08a10_start_streaming(ov08a10); if (ret) { dev_err(&client->dev, "start streaming failed\n"); enable = 0; ov08a10_stop_streaming(ov08a10); pm_runtime_put(&client->dev); } } else { ov08a10_stop_streaming(ov08a10); pm_runtime_put(&client->dev); } ov08a10->streaming = enable; mutex_unlock(&ov08a10->mutex); return ret; } static int __maybe_unused ov08a10_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct v4l2_subdev *sd = i2c_get_clientdata(client); struct ov08a10 *ov08a10 = to_ov08a10(sd); mutex_lock(&ov08a10->mutex); if (ov08a10->streaming) ov08a10_stop_streaming(ov08a10); mutex_unlock(&ov08a10->mutex); return 0; } static int __maybe_unused ov08a10_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct v4l2_subdev *sd = i2c_get_clientdata(client); struct ov08a10 *ov08a10 = to_ov08a10(sd); int ret; mutex_lock(&ov08a10->mutex); if (ov08a10->streaming) { ret = ov08a10_start_streaming(ov08a10); if (ret) { ov08a10->streaming = false; ov08a10_stop_streaming(ov08a10); mutex_unlock(&ov08a10->mutex); return ret; } } mutex_unlock(&ov08a10->mutex); return 0; } static int ov08a10_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { struct ov08a10 *ov08a10 = to_ov08a10(sd); const struct ov08a10_mode *mode; s32 vblank_def, h_blank; mode = v4l2_find_nearest_size(supported_modes, ARRAY_SIZE(supported_modes), width, height, fmt->format.width, fmt->format.height); mutex_lock(&ov08a10->mutex); ov08a10_update_pad_format(mode, &fmt->format); if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 8, 0) *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format; #else *v4l2_subdev_state_get_format(sd_state, fmt->pad) = fmt->format; #endif } else { ov08a10->cur_mode = mode; __v4l2_ctrl_s_ctrl(ov08a10->link_freq, mode->link_freq_index); __v4l2_ctrl_s_ctrl_int64(ov08a10->pixel_rate, to_pixel_rate(mode->link_freq_index)); /* Update limits and set FPS to default */ vblank_def = mode->vts_def - mode->height; __v4l2_ctrl_modify_range(ov08a10->vblank, mode->vts_min - mode->height, OV08A10_VTS_MAX - mode->height, 1, vblank_def); __v4l2_ctrl_s_ctrl(ov08a10->vblank, vblank_def); h_blank = to_pixels_per_line(mode->hts, mode->link_freq_index) - mode->width; __v4l2_ctrl_modify_range(ov08a10->hblank, h_blank, h_blank, 1, h_blank); } mutex_unlock(&ov08a10->mutex); return 0; } static int ov08a10_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { struct ov08a10 *ov08a10 = to_ov08a10(sd); mutex_lock(&ov08a10->mutex); if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 8, 0) fmt->format = *v4l2_subdev_get_try_format(&ov08a10->sd, sd_state, fmt->pad); #else fmt->format = *v4l2_subdev_state_get_format( sd_state, fmt->pad); #endif else ov08a10_update_pad_format(ov08a10->cur_mode, &fmt->format); mutex_unlock(&ov08a10->mutex); return 0; } static int ov08a10_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *code) { if (code->index > 0) return -EINVAL; code->code = MEDIA_BUS_FMT_SGRBG10_1X10; return 0; } static int ov08a10_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_frame_size_enum *fse) { if (fse->index >= ARRAY_SIZE(supported_modes)) return -EINVAL; if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10) return -EINVAL; fse->min_width = supported_modes[fse->index].width; fse->max_width = fse->min_width; fse->min_height = supported_modes[fse->index].height; fse->max_height = fse->min_height; return 0; } static int ov08a10_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { struct ov08a10 *ov08a10 = to_ov08a10(sd); mutex_lock(&ov08a10->mutex); ov08a10_update_pad_format(&supported_modes[0], #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 8, 0) v4l2_subdev_get_try_format(sd, fh->state, 0)); #else v4l2_subdev_state_get_format(fh->state, 0)); #endif mutex_unlock(&ov08a10->mutex); return 0; } static const struct v4l2_subdev_video_ops ov08a10_video_ops = { .s_stream = ov08a10_set_stream, }; static const struct v4l2_subdev_pad_ops ov08a10_pad_ops = { .set_fmt = ov08a10_set_format, .get_fmt = ov08a10_get_format, .enum_mbus_code = ov08a10_enum_mbus_code, .enum_frame_size = ov08a10_enum_frame_size, }; static const struct v4l2_subdev_ops ov08a10_subdev_ops = { .video = &ov08a10_video_ops, .pad = &ov08a10_pad_ops, }; static const struct media_entity_operations ov08a10_subdev_entity_ops = { .link_validate = v4l2_subdev_link_validate, }; static const struct v4l2_subdev_internal_ops ov08a10_internal_ops = { .open = ov08a10_open, }; static int ov08a10_identify_module(struct ov08a10 *ov08a10) { struct i2c_client *client = v4l2_get_subdevdata(&ov08a10->sd); int ret; u32 val; if (ov08a10->identified) return 0; ret = ov08a10_read_reg(ov08a10, OV08A10_REG_CHIP_ID, OV08A10_REG_VALUE_24BIT, &val); if (ret) return ret; if (val != OV08A10_CHIP_ID) { dev_err(&client->dev, "chip id mismatch: %x!=%x", OV08A10_CHIP_ID, val); return -ENXIO; } ov08a10->identified = true; return 0; } static int ov08a10_check_hwcfg(struct device *dev) { struct v4l2_fwnode_endpoint bus_cfg = { .bus_type = V4L2_MBUS_CSI2_DPHY }; struct fwnode_handle *ep; struct fwnode_handle *fwnode = dev_fwnode(dev); unsigned int i, j; int ret; u32 ext_clk; if (!fwnode) return -ENXIO; ep = fwnode_graph_get_next_endpoint(fwnode, NULL); if (!ep) return -EPROBE_DEFER; ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency", &ext_clk); if (ret) { dev_err(dev, "can't get clock frequency"); return ret; } ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg); fwnode_handle_put(ep); if (ret) return ret; if (bus_cfg.bus.mipi_csi2.num_data_lanes != OV08A10_DATA_LANES) { dev_err(dev, "number of CSI2 data lanes %d is not supported", bus_cfg.bus.mipi_csi2.num_data_lanes); ret = -EINVAL; goto out_err; } if (!bus_cfg.nr_of_link_frequencies) { dev_err(dev, "no link frequencies defined"); ret = -EINVAL; goto out_err; } for (i = 0; i < ARRAY_SIZE(link_freq_menu_items); i++) { for (j = 0; j < bus_cfg.nr_of_link_frequencies; j++) { if (link_freq_menu_items[i] == bus_cfg.link_frequencies[j]) break; } if (j == bus_cfg.nr_of_link_frequencies) { dev_err(dev, "no link frequency %lld supported", link_freq_menu_items[i]); ret = -EINVAL; goto out_err; } } out_err: v4l2_fwnode_endpoint_free(&bus_cfg); return ret; } #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) static int ov08a10_remove(struct i2c_client *client) #else static void ov08a10_remove(struct i2c_client *client) #endif { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct ov08a10 *ov08a10 = to_ov08a10(sd); v4l2_async_unregister_subdev(sd); media_entity_cleanup(&sd->entity); v4l2_ctrl_handler_free(sd->ctrl_handler); pm_runtime_disable(&client->dev); mutex_destroy(&ov08a10->mutex); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) return 0; #endif } static void ov08a10_get_hwcfg(struct ov08a10 *ov08a10, struct device *dev) { ov08a10->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(ov08a10->reset_gpio)) { dev_dbg(dev, "could not get gpio reset: %ld", PTR_ERR(ov08a10->reset_gpio)); ov08a10->reset_gpio = NULL; } ov08a10->imgclk = devm_clk_get_optional(dev, NULL); if (IS_ERR(ov08a10->imgclk)) { dev_dbg(dev, "could not get imgclk: %ld", PTR_ERR(ov08a10->imgclk)); ov08a10->imgclk = NULL; } ov08a10->avdd = devm_regulator_get_optional(dev, "avdd"); if (IS_ERR(ov08a10->avdd)) { dev_dbg(dev, "could not get regulator avdd: %ld", PTR_ERR(ov08a10->avdd)); ov08a10->avdd = NULL; } } static int ov08a10_probe(struct i2c_client *client) { struct ov08a10 *ov08a10; int ret = 0; /* Check HW config */ ret = ov08a10_check_hwcfg(&client->dev); if (ret) { dev_err(&client->dev, "failed to check hwcfg: %d", ret); return ret; } ov08a10 = devm_kzalloc(&client->dev, sizeof(*ov08a10), GFP_KERNEL); if (!ov08a10) return -ENOMEM; ov08a10->client = client; ov08a10_get_hwcfg(ov08a10, &client->dev); v4l2_i2c_subdev_init(&ov08a10->sd, client, &ov08a10_subdev_ops); ov08a10_power_on(&client->dev); ret = ov08a10_identify_module(ov08a10); if (ret) { dev_err(&client->dev, "failed to find sensor: %d", ret); goto probe_error_power_down; } mutex_init(&ov08a10->mutex); ov08a10->cur_mode = &supported_modes[0]; ret = ov08a10_init_controls(ov08a10); if (ret) { dev_err(&client->dev, "failed to init controls: %d", ret); goto probe_error_v4l2_ctrl_handler_free; } ov08a10->sd.internal_ops = &ov08a10_internal_ops; ov08a10->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; ov08a10->sd.entity.ops = &ov08a10_subdev_entity_ops; ov08a10->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; ov08a10->pad.flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_pads_init(&ov08a10->sd.entity, 1, &ov08a10->pad); if (ret) { dev_err(&client->dev, "failed to init entity pads: %d", ret); goto probe_error_v4l2_ctrl_handler_free; } ret = v4l2_async_register_subdev_sensor(&ov08a10->sd); if (ret < 0) { dev_err(&client->dev, "failed to register V4L2 subdev: %d", ret); goto probe_error_media_entity_cleanup; } /* * Device is already turned on by i2c-core with ACPI domain PM. * Enable runtime PM and turn off the device. */ pm_runtime_set_active(&client->dev); pm_runtime_enable(&client->dev); pm_runtime_idle(&client->dev); return 0; probe_error_media_entity_cleanup: dev_err(&client->dev, "media_entity_cleanup , probe error\n"); media_entity_cleanup(&ov08a10->sd.entity); probe_error_v4l2_ctrl_handler_free: dev_err(&client->dev, "v4l2_ctrl_handle_free\n"); v4l2_ctrl_handler_free(ov08a10->sd.ctrl_handler); mutex_destroy(&ov08a10->mutex); probe_error_power_down: ov08a10_power_off(&client->dev); return ret; } static const struct dev_pm_ops ov08a10_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(ov08a10_suspend, ov08a10_resume) SET_RUNTIME_PM_OPS(ov08a10_power_off, ov08a10_power_on, NULL) }; #ifdef CONFIG_ACPI static const struct acpi_device_id ov08a10_acpi_ids[] = { {"OVTI08A1"}, {} }; MODULE_DEVICE_TABLE(acpi, ov08a10_acpi_ids); #endif static struct i2c_driver ov08a10_i2c_driver = { .driver = { .name = "ov08a10", .pm = &ov08a10_pm_ops, .acpi_match_table = ov08a10_acpi_ids, }, #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) .probe_new = ov08a10_probe, #else .probe = ov08a10_probe, #endif .remove = ov08a10_remove, }; module_i2c_driver(ov08a10_i2c_driver); MODULE_AUTHOR("Jason Chen "); MODULE_AUTHOR("Shawn Tu "); MODULE_DESCRIPTION("OmniVision OV08A10 sensor driver"); MODULE_LICENSE("GPL v2"); ipu6-drivers-0~git202411190607.0ad49882/drivers/media/i2c/ov13858_intel.c000066400000000000000000001277161471702545500245350ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2022 Intel Corporation. #include #include #include #include #include #include #include #include #define OV13858_REG_VALUE_08BIT 1 #define OV13858_REG_VALUE_16BIT 2 #define OV13858_REG_VALUE_24BIT 3 #define OV13858_REG_MODE_SELECT 0x0100 #define OV13858_MODE_STANDBY 0x00 #define OV13858_MODE_STREAMING 0x01 #define OV13858_REG_SOFTWARE_RST 0x0103 #define OV13858_SOFTWARE_RST 0x01 /* PLL1 generates PCLK and MIPI_PHY_CLK */ #define OV13858_REG_PLL1_CTRL_0 0x0300 #define OV13858_REG_PLL1_CTRL_1 0x0301 #define OV13858_REG_PLL1_CTRL_2 0x0302 #define OV13858_REG_PLL1_CTRL_3 0x0303 #define OV13858_REG_PLL1_CTRL_4 0x0304 #define OV13858_REG_PLL1_CTRL_5 0x0305 /* PLL2 generates DAC_CLK, SCLK and SRAM_CLK */ #define OV13858_REG_PLL2_CTRL_B 0x030b #define OV13858_REG_PLL2_CTRL_C 0x030c #define OV13858_REG_PLL2_CTRL_D 0x030d #define OV13858_REG_PLL2_CTRL_E 0x030e #define OV13858_REG_PLL2_CTRL_F 0x030f #define OV13858_REG_PLL2_CTRL_12 0x0312 #define OV13858_REG_MIPI_SC_CTRL0 0x3016 #define OV13858_REG_MIPI_SC_CTRL1 0x3022 /* Chip ID */ #define OV13858_REG_CHIP_ID 0x300a #define OV13858_CHIP_ID 0x00d855 /* V_TIMING internal */ #define OV13858_REG_VTS 0x380e #define OV13858_VTS_30FPS 0x0c8e /* 30 fps */ #define OV13858_VTS_60FPS 0x0648 /* 60 fps */ #define OV13858_VTS_MAX 0x7fff /* HBLANK control - read only */ #define OV13858_PPL_270MHZ 2244 #define OV13858_PPL_540MHZ 4488 /* Exposure control */ #define OV13858_REG_EXPOSURE 0x3500 #define OV13858_EXPOSURE_MIN 4 #define OV13858_EXPOSURE_STEP 1 #define OV13858_EXPOSURE_DEFAULT 0x640 /* Format1 control */ #define OV13858_REG_FORMAT1 0x3820 #define OV13858_FORMAT1_DFT 0xA0 /* Analog gain control */ #define OV13858_REG_ANALOG_GAIN 0x3508 #define OV13858_ANA_GAIN_MIN 0 #define OV13858_ANA_GAIN_MAX 0x1fff #define OV13858_ANA_GAIN_STEP 1 #define OV13858_ANA_GAIN_DEFAULT 0x80 /* Digital gain control */ #define OV13858_REG_B_MWB_GAIN 0x5100 #define OV13858_REG_G_MWB_GAIN 0x5102 #define OV13858_REG_R_MWB_GAIN 0x5104 #define OV13858_DGTL_GAIN_MIN 0 #define OV13858_DGTL_GAIN_MAX 16384 /* Max = 16 X */ #define OV13858_DGTL_GAIN_DEFAULT 1024 /* Default gain = 1 X */ #define OV13858_DGTL_GAIN_STEP 1 /* Each step = 1/1024 */ /* Test Pattern Control */ #define OV13858_REG_TEST_PATTERN 0x4503 #define OV13858_TEST_PATTERN_ENABLE BIT(7) #define OV13858_TEST_PATTERN_MASK 0xfc /* Number of frames to skip */ #define OV13858_NUM_OF_SKIP_FRAMES 2 struct ov13858_reg { u16 address; u8 val; }; struct ov13858_reg_list { u32 num_of_regs; const struct ov13858_reg *regs; }; /* Link frequency config */ struct ov13858_link_freq_config { u32 pixels_per_line; /* PLL registers for this link frequency */ struct ov13858_reg_list reg_list; }; /* Mode : resolution and related config&values */ struct ov13858_mode { /* Frame width */ u32 width; /* Frame height */ u32 height; /* V-timing */ u32 vts_def; u32 vts_min; /* Index of Link frequency config to be used */ u32 link_freq_index; /* Default register values */ struct ov13858_reg_list reg_list; }; /* Format1: used for vertical/horizontal flip */ union ov13858_format1 { u8 val; struct { u8 d0 : 3; /* bit[0:2] */ u8 hflip : 1; /* 0 enable, 1 disable */ u8 vflip : 1; /* 0 disable, 1 enable */ u8 d1 : 3; /* bit[5:7] */ } bits; }; /* 4224x3136 needs 1080Mbps/lane, 4 lanes */ static const struct ov13858_reg mipi_data_rate_1080mbps[] = { /* PLL1 registers */ {OV13858_REG_PLL1_CTRL_0, 0x07}, {OV13858_REG_PLL1_CTRL_1, 0x01}, {OV13858_REG_PLL1_CTRL_2, 0xc2}, {OV13858_REG_PLL1_CTRL_3, 0x00}, {OV13858_REG_PLL1_CTRL_4, 0x00}, {OV13858_REG_PLL1_CTRL_5, 0x01}, /* PLL2 registers */ {OV13858_REG_PLL2_CTRL_B, 0x05}, {OV13858_REG_PLL2_CTRL_C, 0x01}, {OV13858_REG_PLL2_CTRL_D, 0x0e}, {OV13858_REG_PLL2_CTRL_E, 0x05}, {OV13858_REG_PLL2_CTRL_F, 0x01}, {OV13858_REG_PLL2_CTRL_12, 0x01}, {OV13858_REG_MIPI_SC_CTRL0, 0x72}, {OV13858_REG_MIPI_SC_CTRL1, 0x01}, }; /* * 2112x1568, 2112x1188, 1056x784 need 540Mbps/lane, * 4 lanes */ static const struct ov13858_reg mipi_data_rate_540mbps[] = { /* PLL1 registers */ {OV13858_REG_PLL1_CTRL_0, 0x07}, {OV13858_REG_PLL1_CTRL_1, 0x01}, {OV13858_REG_PLL1_CTRL_2, 0xc2}, {OV13858_REG_PLL1_CTRL_3, 0x01}, {OV13858_REG_PLL1_CTRL_4, 0x00}, {OV13858_REG_PLL1_CTRL_5, 0x01}, /* PLL2 registers */ {OV13858_REG_PLL2_CTRL_B, 0x05}, {OV13858_REG_PLL2_CTRL_C, 0x01}, {OV13858_REG_PLL2_CTRL_D, 0x0e}, {OV13858_REG_PLL2_CTRL_E, 0x05}, {OV13858_REG_PLL2_CTRL_F, 0x01}, {OV13858_REG_PLL2_CTRL_12, 0x01}, {OV13858_REG_MIPI_SC_CTRL0, 0x72}, {OV13858_REG_MIPI_SC_CTRL1, 0x01}, }; static const struct ov13858_reg mode_4224x3136_regs[] = { {0x3013, 0x32}, {0x301b, 0xf0}, {0x301f, 0xd0}, {0x3106, 0x15}, {0x3107, 0x23}, {0x350a, 0x00}, {0x350e, 0x00}, {0x3510, 0x00}, {0x3511, 0x02}, {0x3512, 0x00}, {0x3600, 0x2b}, {0x3601, 0x52}, {0x3602, 0x60}, {0x3612, 0x05}, {0x3613, 0xa4}, {0x3620, 0x80}, {0x3621, 0x10}, {0x3622, 0x30}, {0x3624, 0x1c}, {0x3640, 0x10}, {0x3641, 0x70}, {0x3660, 0x04}, {0x3661, 0x80}, {0x3662, 0x12}, {0x3664, 0x73}, {0x3665, 0xa7}, {0x366e, 0xff}, {0x366f, 0xf4}, {0x3674, 0x00}, {0x3679, 0x0c}, {0x367f, 0x01}, {0x3680, 0x0c}, {0x3681, 0x50}, {0x3682, 0x50}, {0x3683, 0xa9}, {0x3684, 0xa9}, {0x3709, 0x5f}, {0x3714, 0x24}, {0x371a, 0x3e}, {0x3737, 0x04}, {0x3738, 0xcc}, {0x3739, 0x12}, {0x373d, 0x26}, {0x3764, 0x20}, {0x3765, 0x20}, {0x37a1, 0x36}, {0x37a8, 0x3b}, {0x37ab, 0x31}, {0x37c2, 0x04}, {0x37c3, 0xf1}, {0x37c5, 0x00}, {0x37d8, 0x03}, {0x37d9, 0x0c}, {0x37da, 0xc2}, {0x37dc, 0x02}, {0x37e0, 0x00}, {0x37e1, 0x0a}, {0x37e2, 0x14}, {0x37e3, 0x04}, {0x37e4, 0x2a}, {0x37e5, 0x03}, {0x37e6, 0x04}, {0x3800, 0x00}, {0x3801, 0x00}, {0x3802, 0x00}, {0x3803, 0x08}, {0x3804, 0x10}, {0x3805, 0x9f}, {0x3806, 0x0c}, {0x3807, 0x57}, {0x3808, 0x10}, {0x3809, 0x80}, {0x380a, 0x0c}, {0x380b, 0x40}, {0x380c, 0x04}, {0x380d, 0x62}, {0x380e, 0x0c}, {0x380f, 0x8e}, {0x3811, 0x04}, {0x3813, 0x05}, {0x3814, 0x01}, {0x3815, 0x01}, {0x3816, 0x01}, {0x3817, 0x01}, {0x3820, 0xa8}, {0x3821, 0x00}, {0x3822, 0xc2}, {0x3823, 0x18}, {0x3826, 0x11}, {0x3827, 0x1c}, {0x3829, 0x03}, {0x3832, 0x00}, {0x3c80, 0x00}, {0x3c87, 0x01}, {0x3c8c, 0x19}, {0x3c8d, 0x1c}, {0x3c90, 0x00}, {0x3c91, 0x00}, {0x3c92, 0x00}, {0x3c93, 0x00}, {0x3c94, 0x40}, {0x3c95, 0x54}, {0x3c96, 0x34}, {0x3c97, 0x04}, {0x3c98, 0x00}, {0x3d8c, 0x73}, {0x3d8d, 0xc0}, {0x3f00, 0x0b}, {0x3f03, 0x00}, {0x4001, 0xe0}, {0x4008, 0x00}, {0x4009, 0x0f}, {0x4011, 0xf0}, {0x4017, 0x08}, {0x4050, 0x04}, {0x4051, 0x0b}, {0x4052, 0x00}, {0x4053, 0x80}, {0x4054, 0x00}, {0x4055, 0x80}, {0x4056, 0x00}, {0x4057, 0x80}, {0x4058, 0x00}, {0x4059, 0x80}, {0x405e, 0x20}, {0x4500, 0x07}, {0x4503, 0x00}, {0x450a, 0x04}, {0x4809, 0x04}, {0x480c, 0x12}, {0x481f, 0x30}, {0x4833, 0x10}, {0x4837, 0x0e}, {0x4902, 0x01}, {0x4d00, 0x03}, {0x4d01, 0xc9}, {0x4d02, 0xbc}, {0x4d03, 0xd7}, {0x4d04, 0xf0}, {0x4d05, 0xa2}, {0x5000, 0xfd}, {0x5001, 0x01}, {0x5040, 0x39}, {0x5041, 0x10}, {0x5042, 0x10}, {0x5043, 0x84}, {0x5044, 0x62}, {0x5180, 0x00}, {0x5181, 0x10}, {0x5182, 0x02}, {0x5183, 0x0f}, {0x5200, 0x1b}, {0x520b, 0x07}, {0x520c, 0x0f}, {0x5300, 0x04}, {0x5301, 0x0c}, {0x5302, 0x0c}, {0x5303, 0x0f}, {0x5304, 0x00}, {0x5305, 0x70}, {0x5306, 0x00}, {0x5307, 0x80}, {0x5308, 0x00}, {0x5309, 0xa5}, {0x530a, 0x00}, {0x530b, 0xd3}, {0x530c, 0x00}, {0x530d, 0xf0}, {0x530e, 0x01}, {0x530f, 0x10}, {0x5310, 0x01}, {0x5311, 0x20}, {0x5312, 0x01}, {0x5313, 0x20}, {0x5314, 0x01}, {0x5315, 0x20}, {0x5316, 0x08}, {0x5317, 0x08}, {0x5318, 0x10}, {0x5319, 0x88}, {0x531a, 0x88}, {0x531b, 0xa9}, {0x531c, 0xaa}, {0x531d, 0x0a}, {0x5405, 0x02}, {0x5406, 0x67}, {0x5407, 0x01}, {0x5408, 0x4a}, }; static const struct ov13858_reg mode_4096x3072_regs[] = { {0x3012, 0x40}, {0x3013, 0x72}, {0x301b, 0xf0}, {0x301f, 0xd0}, {0x3106, 0x15}, {0x3107, 0x23}, {0x3500, 0x00}, {0x3501, 0x80}, {0x3502, 0x00}, {0x3508, 0x02}, {0x3509, 0x00}, {0x350a, 0x00}, {0x350e, 0x00}, {0x3510, 0x00}, {0x3511, 0x02}, {0x3512, 0x00}, {0x3600, 0x2b}, {0x3601, 0x52}, {0x3602, 0x60}, {0x3612, 0x05}, {0x3613, 0xa4}, {0x3620, 0x80}, {0x3621, 0x10}, {0x3622, 0x30}, {0x3624, 0x1c}, {0x3640, 0x10}, {0x3641, 0x70}, {0x3660, 0x04}, {0x3661, 0x80}, {0x3662, 0x12}, {0x3664, 0x73}, {0x3665, 0xa7}, {0x366e, 0xff}, {0x366f, 0xf4}, {0x3674, 0x00}, {0x3679, 0x0c}, {0x367f, 0x01}, {0x3680, 0x0c}, {0x3681, 0x50}, {0x3682, 0x50}, {0x3683, 0xa9}, {0x3684, 0xa9}, {0x3706, 0x40}, {0x3709, 0x5f}, {0x3714, 0x24}, {0x371a, 0x3e}, {0x3737, 0x04}, {0x3738, 0xcc}, {0x3739, 0x12}, {0x373d, 0x26}, {0x3764, 0x20}, {0x3765, 0x20}, {0x37a1, 0x36}, {0x37a8, 0x3b}, {0x37ab, 0x31}, {0x37c2, 0x04}, {0x37c3, 0xf1}, {0x37c5, 0x00}, {0x37d8, 0x03}, {0x37d9, 0x0c}, {0x37da, 0xc2}, {0x37dc, 0x02}, {0x37e0, 0x00}, {0x37e1, 0x0a}, {0x37e2, 0x14}, {0x37e3, 0x04}, {0x37e4, 0x2a}, {0x37e5, 0x03}, {0x37e6, 0x04}, {0x3800, 0x00}, {0x3801, 0x00}, {0x3802, 0x00}, {0x3803, 0x08}, {0x3804, 0x10}, {0x3805, 0x9f}, {0x3806, 0x0c}, {0x3807, 0x57}, {0x3808, 0x10}, {0x3809, 0x00}, {0x380a, 0x0c}, {0x380b, 0x00}, {0x380c, 0x04}, {0x380d, 0x62}, {0x380e, 0x0c}, {0x380f, 0x8e}, {0x3810, 0x00}, {0x3811, 0x50}, {0x3812, 0x00}, {0x3813, 0x29}, {0x3814, 0x01}, {0x3815, 0x01}, {0x3816, 0x01}, {0x3817, 0x01}, {0x3820, 0xa8}, {0x3821, 0x00}, {0x3822, 0xc2}, {0x3823, 0x18}, {0x3826, 0x11}, {0x3827, 0x1c}, {0x3829, 0x03}, {0x3832, 0x00}, {0x3c80, 0x00}, {0x3c87, 0x01}, {0x3c8c, 0x19}, {0x3c8d, 0x1c}, {0x3c90, 0x00}, {0x3c91, 0x00}, {0x3c92, 0x00}, {0x3c93, 0x00}, {0x3c94, 0x40}, {0x3c95, 0x54}, {0x3c96, 0x34}, {0x3c97, 0x04}, {0x3c98, 0x00}, {0x3d8c, 0x73}, {0x3d8d, 0xc0}, {0x3f00, 0x0b}, {0x3f03, 0x00}, {0x4001, 0xe0}, {0x4008, 0x00}, {0x4009, 0x0f}, {0x4011, 0xf0}, {0x4017, 0x08}, {0x4050, 0x04}, {0x4051, 0x0b}, {0x4052, 0x00}, {0x4053, 0x80}, {0x4054, 0x00}, {0x4055, 0x80}, {0x4056, 0x00}, {0x4057, 0x80}, {0x4058, 0x00}, {0x4059, 0x80}, {0x405e, 0x20}, {0x4500, 0x07}, {0x4503, 0x00}, {0x450a, 0x04}, {0x4809, 0x04}, {0x480c, 0x12}, {0x481f, 0x30}, {0x4833, 0x10}, {0x4837, 0x0e}, {0x4902, 0x01}, {0x4d00, 0x03}, {0x4d01, 0xc9}, {0x4d02, 0xbc}, {0x4d03, 0xd7}, {0x4d04, 0xf0}, {0x4d05, 0xa2}, {0x5000, 0xfd}, {0x5001, 0x01}, {0x5040, 0x39}, {0x5041, 0x10}, {0x5042, 0x10}, {0x5043, 0x84}, {0x5044, 0x62}, {0x5180, 0x00}, {0x5181, 0x10}, {0x5182, 0x02}, {0x5183, 0x0f}, {0x5200, 0x1b}, {0x520b, 0x07}, {0x520c, 0x0f}, {0x5300, 0x04}, {0x5301, 0x0c}, {0x5302, 0x0c}, {0x5303, 0x0f}, {0x5304, 0x00}, {0x5305, 0x70}, {0x5306, 0x00}, {0x5307, 0x80}, {0x5308, 0x00}, {0x5309, 0xa5}, {0x530a, 0x00}, {0x530b, 0xd3}, {0x530c, 0x00}, {0x530d, 0xf0}, {0x530e, 0x01}, {0x530f, 0x10}, {0x5310, 0x01}, {0x5311, 0x20}, {0x5312, 0x01}, {0x5313, 0x20}, {0x5314, 0x01}, {0x5315, 0x20}, {0x5316, 0x08}, {0x5317, 0x08}, {0x5318, 0x10}, {0x5319, 0x88}, {0x531a, 0x88}, {0x531b, 0xa9}, {0x531c, 0xaa}, {0x531d, 0x0a}, {0x5405, 0x02}, {0x5406, 0x67}, {0x5407, 0x01}, {0x5408, 0x4a}, }; static const struct ov13858_reg mode_2112x1568_regs[] = { {0x3013, 0x32}, {0x301b, 0xf0}, {0x301f, 0xd0}, {0x3106, 0x15}, {0x3107, 0x23}, {0x350a, 0x00}, {0x350e, 0x00}, {0x3510, 0x00}, {0x3511, 0x02}, {0x3512, 0x00}, {0x3600, 0x2b}, {0x3601, 0x52}, {0x3602, 0x60}, {0x3612, 0x05}, {0x3613, 0xa4}, {0x3620, 0x80}, {0x3621, 0x10}, {0x3622, 0x30}, {0x3624, 0x1c}, {0x3640, 0x10}, {0x3641, 0x70}, {0x3660, 0x04}, {0x3661, 0x80}, {0x3662, 0x10}, {0x3664, 0x73}, {0x3665, 0xa7}, {0x366e, 0xff}, {0x366f, 0xf4}, {0x3674, 0x00}, {0x3679, 0x0c}, {0x367f, 0x01}, {0x3680, 0x0c}, {0x3681, 0x50}, {0x3682, 0x50}, {0x3683, 0xa9}, {0x3684, 0xa9}, {0x3709, 0x5f}, {0x3714, 0x28}, {0x371a, 0x3e}, {0x3737, 0x08}, {0x3738, 0xcc}, {0x3739, 0x20}, {0x373d, 0x26}, {0x3764, 0x20}, {0x3765, 0x20}, {0x37a1, 0x36}, {0x37a8, 0x3b}, {0x37ab, 0x31}, {0x37c2, 0x14}, {0x37c3, 0xf1}, {0x37c5, 0x00}, {0x37d8, 0x03}, {0x37d9, 0x0c}, {0x37da, 0xc2}, {0x37dc, 0x02}, {0x37e0, 0x00}, {0x37e1, 0x0a}, {0x37e2, 0x14}, {0x37e3, 0x08}, {0x37e4, 0x38}, {0x37e5, 0x03}, {0x37e6, 0x08}, {0x3800, 0x00}, {0x3801, 0x00}, {0x3802, 0x00}, {0x3803, 0x00}, {0x3804, 0x10}, {0x3805, 0x9f}, {0x3806, 0x0c}, {0x3807, 0x5f}, {0x3808, 0x08}, {0x3809, 0x40}, {0x380a, 0x06}, {0x380b, 0x20}, {0x380c, 0x04}, {0x380d, 0x62}, {0x380e, 0x0c}, {0x380f, 0x8e}, {0x3811, 0x04}, {0x3813, 0x05}, {0x3814, 0x03}, {0x3815, 0x01}, {0x3816, 0x03}, {0x3817, 0x01}, {0x3820, 0xab}, {0x3821, 0x00}, {0x3822, 0xc2}, {0x3823, 0x18}, {0x3826, 0x04}, {0x3827, 0x90}, {0x3829, 0x07}, {0x3832, 0x00}, {0x3c80, 0x00}, {0x3c87, 0x01}, {0x3c8c, 0x19}, {0x3c8d, 0x1c}, {0x3c90, 0x00}, {0x3c91, 0x00}, {0x3c92, 0x00}, {0x3c93, 0x00}, {0x3c94, 0x40}, {0x3c95, 0x54}, {0x3c96, 0x34}, {0x3c97, 0x04}, {0x3c98, 0x00}, {0x3d8c, 0x73}, {0x3d8d, 0xc0}, {0x3f00, 0x0b}, {0x3f03, 0x00}, {0x4001, 0xe0}, {0x4008, 0x00}, {0x4009, 0x0d}, {0x4011, 0xf0}, {0x4017, 0x08}, {0x4050, 0x04}, {0x4051, 0x0b}, {0x4052, 0x00}, {0x4053, 0x80}, {0x4054, 0x00}, {0x4055, 0x80}, {0x4056, 0x00}, {0x4057, 0x80}, {0x4058, 0x00}, {0x4059, 0x80}, {0x405e, 0x20}, {0x4500, 0x07}, {0x4503, 0x00}, {0x450a, 0x04}, {0x4809, 0x04}, {0x480c, 0x12}, {0x481f, 0x30}, {0x4833, 0x10}, {0x4837, 0x1c}, {0x4902, 0x01}, {0x4d00, 0x03}, {0x4d01, 0xc9}, {0x4d02, 0xbc}, {0x4d03, 0xd7}, {0x4d04, 0xf0}, {0x4d05, 0xa2}, {0x5000, 0xfd}, {0x5001, 0x01}, {0x5040, 0x39}, {0x5041, 0x10}, {0x5042, 0x10}, {0x5043, 0x84}, {0x5044, 0x62}, {0x5180, 0x00}, {0x5181, 0x10}, {0x5182, 0x02}, {0x5183, 0x0f}, {0x5200, 0x1b}, {0x520b, 0x07}, {0x520c, 0x0f}, {0x5300, 0x04}, {0x5301, 0x0c}, {0x5302, 0x0c}, {0x5303, 0x0f}, {0x5304, 0x00}, {0x5305, 0x70}, {0x5306, 0x00}, {0x5307, 0x80}, {0x5308, 0x00}, {0x5309, 0xa5}, {0x530a, 0x00}, {0x530b, 0xd3}, {0x530c, 0x00}, {0x530d, 0xf0}, {0x530e, 0x01}, {0x530f, 0x10}, {0x5310, 0x01}, {0x5311, 0x20}, {0x5312, 0x01}, {0x5313, 0x20}, {0x5314, 0x01}, {0x5315, 0x20}, {0x5316, 0x08}, {0x5317, 0x08}, {0x5318, 0x10}, {0x5319, 0x88}, {0x531a, 0x88}, {0x531b, 0xa9}, {0x531c, 0xaa}, {0x531d, 0x0a}, {0x5405, 0x02}, {0x5406, 0x67}, {0x5407, 0x01}, {0x5408, 0x4a}, }; static const struct ov13858_reg mode_2112x1188_regs[] = { {0x3013, 0x32}, {0x301b, 0xf0}, {0x301f, 0xd0}, {0x3106, 0x15}, {0x3107, 0x23}, {0x350a, 0x00}, {0x350e, 0x00}, {0x3510, 0x00}, {0x3511, 0x02}, {0x3512, 0x00}, {0x3600, 0x2b}, {0x3601, 0x52}, {0x3602, 0x60}, {0x3612, 0x05}, {0x3613, 0xa4}, {0x3620, 0x80}, {0x3621, 0x10}, {0x3622, 0x30}, {0x3624, 0x1c}, {0x3640, 0x10}, {0x3641, 0x70}, {0x3660, 0x04}, {0x3661, 0x80}, {0x3662, 0x10}, {0x3664, 0x73}, {0x3665, 0xa7}, {0x366e, 0xff}, {0x366f, 0xf4}, {0x3674, 0x00}, {0x3679, 0x0c}, {0x367f, 0x01}, {0x3680, 0x0c}, {0x3681, 0x50}, {0x3682, 0x50}, {0x3683, 0xa9}, {0x3684, 0xa9}, {0x3709, 0x5f}, {0x3714, 0x28}, {0x371a, 0x3e}, {0x3737, 0x08}, {0x3738, 0xcc}, {0x3739, 0x20}, {0x373d, 0x26}, {0x3764, 0x20}, {0x3765, 0x20}, {0x37a1, 0x36}, {0x37a8, 0x3b}, {0x37ab, 0x31}, {0x37c2, 0x14}, {0x37c3, 0xf1}, {0x37c5, 0x00}, {0x37d8, 0x03}, {0x37d9, 0x0c}, {0x37da, 0xc2}, {0x37dc, 0x02}, {0x37e0, 0x00}, {0x37e1, 0x0a}, {0x37e2, 0x14}, {0x37e3, 0x08}, {0x37e4, 0x38}, {0x37e5, 0x03}, {0x37e6, 0x08}, {0x3800, 0x00}, {0x3801, 0x00}, {0x3802, 0x01}, {0x3803, 0x84}, {0x3804, 0x10}, {0x3805, 0x9f}, {0x3806, 0x0a}, {0x3807, 0xd3}, {0x3808, 0x08}, {0x3809, 0x40}, {0x380a, 0x04}, {0x380b, 0xa4}, {0x380c, 0x04}, {0x380d, 0x62}, {0x380e, 0x0c}, {0x380f, 0x8e}, {0x3811, 0x08}, {0x3813, 0x03}, {0x3814, 0x03}, {0x3815, 0x01}, {0x3816, 0x03}, {0x3817, 0x01}, {0x3820, 0xab}, {0x3821, 0x00}, {0x3822, 0xc2}, {0x3823, 0x18}, {0x3826, 0x04}, {0x3827, 0x90}, {0x3829, 0x07}, {0x3832, 0x00}, {0x3c80, 0x00}, {0x3c87, 0x01}, {0x3c8c, 0x19}, {0x3c8d, 0x1c}, {0x3c90, 0x00}, {0x3c91, 0x00}, {0x3c92, 0x00}, {0x3c93, 0x00}, {0x3c94, 0x40}, {0x3c95, 0x54}, {0x3c96, 0x34}, {0x3c97, 0x04}, {0x3c98, 0x00}, {0x3d8c, 0x73}, {0x3d8d, 0xc0}, {0x3f00, 0x0b}, {0x3f03, 0x00}, {0x4001, 0xe0}, {0x4008, 0x00}, {0x4009, 0x0d}, {0x4011, 0xf0}, {0x4017, 0x08}, {0x4050, 0x04}, {0x4051, 0x0b}, {0x4052, 0x00}, {0x4053, 0x80}, {0x4054, 0x00}, {0x4055, 0x80}, {0x4056, 0x00}, {0x4057, 0x80}, {0x4058, 0x00}, {0x4059, 0x80}, {0x405e, 0x20}, {0x4500, 0x07}, {0x4503, 0x00}, {0x450a, 0x04}, {0x4809, 0x04}, {0x480c, 0x12}, {0x481f, 0x30}, {0x4833, 0x10}, {0x4837, 0x1c}, {0x4902, 0x01}, {0x4d00, 0x03}, {0x4d01, 0xc9}, {0x4d02, 0xbc}, {0x4d03, 0xd7}, {0x4d04, 0xf0}, {0x4d05, 0xa2}, {0x5000, 0xfd}, {0x5001, 0x01}, {0x5040, 0x39}, {0x5041, 0x10}, {0x5042, 0x10}, {0x5043, 0x84}, {0x5044, 0x62}, {0x5180, 0x00}, {0x5181, 0x10}, {0x5182, 0x02}, {0x5183, 0x0f}, {0x5200, 0x1b}, {0x520b, 0x07}, {0x520c, 0x0f}, {0x5300, 0x04}, {0x5301, 0x0c}, {0x5302, 0x0c}, {0x5303, 0x0f}, {0x5304, 0x00}, {0x5305, 0x70}, {0x5306, 0x00}, {0x5307, 0x80}, {0x5308, 0x00}, {0x5309, 0xa5}, {0x530a, 0x00}, {0x530b, 0xd3}, {0x530c, 0x00}, {0x530d, 0xf0}, {0x530e, 0x01}, {0x530f, 0x10}, {0x5310, 0x01}, {0x5311, 0x20}, {0x5312, 0x01}, {0x5313, 0x20}, {0x5314, 0x01}, {0x5315, 0x20}, {0x5316, 0x08}, {0x5317, 0x08}, {0x5318, 0x10}, {0x5319, 0x88}, {0x531a, 0x88}, {0x531b, 0xa9}, {0x531c, 0xaa}, {0x531d, 0x0a}, {0x5405, 0x02}, {0x5406, 0x67}, {0x5407, 0x01}, {0x5408, 0x4a}, }; static const struct ov13858_reg mode_1056x784_regs[] = { {0x3013, 0x32}, {0x301b, 0xf0}, {0x301f, 0xd0}, {0x3106, 0x15}, {0x3107, 0x23}, {0x350a, 0x00}, {0x350e, 0x00}, {0x3510, 0x00}, {0x3511, 0x02}, {0x3512, 0x00}, {0x3600, 0x2b}, {0x3601, 0x52}, {0x3602, 0x60}, {0x3612, 0x05}, {0x3613, 0xa4}, {0x3620, 0x80}, {0x3621, 0x10}, {0x3622, 0x30}, {0x3624, 0x1c}, {0x3640, 0x10}, {0x3641, 0x70}, {0x3660, 0x04}, {0x3661, 0x80}, {0x3662, 0x08}, {0x3664, 0x73}, {0x3665, 0xa7}, {0x366e, 0xff}, {0x366f, 0xf4}, {0x3674, 0x00}, {0x3679, 0x0c}, {0x367f, 0x01}, {0x3680, 0x0c}, {0x3681, 0x50}, {0x3682, 0x50}, {0x3683, 0xa9}, {0x3684, 0xa9}, {0x3709, 0x5f}, {0x3714, 0x30}, {0x371a, 0x3e}, {0x3737, 0x08}, {0x3738, 0xcc}, {0x3739, 0x20}, {0x373d, 0x26}, {0x3764, 0x20}, {0x3765, 0x20}, {0x37a1, 0x36}, {0x37a8, 0x3b}, {0x37ab, 0x31}, {0x37c2, 0x2c}, {0x37c3, 0xf1}, {0x37c5, 0x00}, {0x37d8, 0x03}, {0x37d9, 0x06}, {0x37da, 0xc2}, {0x37dc, 0x02}, {0x37e0, 0x00}, {0x37e1, 0x0a}, {0x37e2, 0x14}, {0x37e3, 0x08}, {0x37e4, 0x36}, {0x37e5, 0x03}, {0x37e6, 0x08}, {0x3800, 0x00}, {0x3801, 0x00}, {0x3802, 0x00}, {0x3803, 0x00}, {0x3804, 0x10}, {0x3805, 0x9f}, {0x3806, 0x0c}, {0x3807, 0x5f}, {0x3808, 0x04}, {0x3809, 0x20}, {0x380a, 0x03}, {0x380b, 0x10}, {0x380c, 0x04}, {0x380d, 0x62}, {0x380e, 0x0c}, {0x380f, 0x8e}, {0x3811, 0x04}, {0x3813, 0x05}, {0x3814, 0x07}, {0x3815, 0x01}, {0x3816, 0x07}, {0x3817, 0x01}, {0x3820, 0xac}, {0x3821, 0x00}, {0x3822, 0xc2}, {0x3823, 0x18}, {0x3826, 0x04}, {0x3827, 0x48}, {0x3829, 0x03}, {0x3832, 0x00}, {0x3c80, 0x00}, {0x3c87, 0x01}, {0x3c8c, 0x19}, {0x3c8d, 0x1c}, {0x3c90, 0x00}, {0x3c91, 0x00}, {0x3c92, 0x00}, {0x3c93, 0x00}, {0x3c94, 0x40}, {0x3c95, 0x54}, {0x3c96, 0x34}, {0x3c97, 0x04}, {0x3c98, 0x00}, {0x3d8c, 0x73}, {0x3d8d, 0xc0}, {0x3f00, 0x0b}, {0x3f03, 0x00}, {0x4001, 0xe0}, {0x4008, 0x00}, {0x4009, 0x05}, {0x4011, 0xf0}, {0x4017, 0x08}, {0x4050, 0x02}, {0x4051, 0x05}, {0x4052, 0x00}, {0x4053, 0x80}, {0x4054, 0x00}, {0x4055, 0x80}, {0x4056, 0x00}, {0x4057, 0x80}, {0x4058, 0x00}, {0x4059, 0x80}, {0x405e, 0x20}, {0x4500, 0x07}, {0x4503, 0x00}, {0x450a, 0x04}, {0x4809, 0x04}, {0x480c, 0x12}, {0x481f, 0x30}, {0x4833, 0x10}, {0x4837, 0x1e}, {0x4902, 0x02}, {0x4d00, 0x03}, {0x4d01, 0xc9}, {0x4d02, 0xbc}, {0x4d03, 0xd7}, {0x4d04, 0xf0}, {0x4d05, 0xa2}, {0x5000, 0xfd}, {0x5001, 0x01}, {0x5040, 0x39}, {0x5041, 0x10}, {0x5042, 0x10}, {0x5043, 0x84}, {0x5044, 0x62}, {0x5180, 0x00}, {0x5181, 0x10}, {0x5182, 0x02}, {0x5183, 0x0f}, {0x5200, 0x1b}, {0x520b, 0x07}, {0x520c, 0x0f}, {0x5300, 0x04}, {0x5301, 0x0c}, {0x5302, 0x0c}, {0x5303, 0x0f}, {0x5304, 0x00}, {0x5305, 0x70}, {0x5306, 0x00}, {0x5307, 0x80}, {0x5308, 0x00}, {0x5309, 0xa5}, {0x530a, 0x00}, {0x530b, 0xd3}, {0x530c, 0x00}, {0x530d, 0xf0}, {0x530e, 0x01}, {0x530f, 0x10}, {0x5310, 0x01}, {0x5311, 0x20}, {0x5312, 0x01}, {0x5313, 0x20}, {0x5314, 0x01}, {0x5315, 0x20}, {0x5316, 0x08}, {0x5317, 0x08}, {0x5318, 0x10}, {0x5319, 0x88}, {0x531a, 0x88}, {0x531b, 0xa9}, {0x531c, 0xaa}, {0x531d, 0x0a}, {0x5405, 0x02}, {0x5406, 0x67}, {0x5407, 0x01}, {0x5408, 0x4a}, }; static const char * const ov13858_test_pattern_menu[] = { "Disabled", "Vertical Color Bar Type 1", "Vertical Color Bar Type 2", "Vertical Color Bar Type 3", "Vertical Color Bar Type 4" }; /* Configurations for supported link frequencies */ #define OV13858_NUM_OF_LINK_FREQS 2 #define OV13858_LINK_FREQ_540MHZ 540000000ULL #define OV13858_LINK_FREQ_270MHZ 270000000ULL #define OV13858_LINK_FREQ_INDEX_0 0 #define OV13858_LINK_FREQ_INDEX_1 1 /* * pixel_rate = link_freq * data-rate * nr_of_lanes / bits_per_sample * data rate => double data rate; number of lanes => 4; bits per pixel => 10 */ static u64 link_freq_to_pixel_rate(u64 f) { f *= 2 * 4; do_div(f, 10); return f; } /* Menu items for LINK_FREQ V4L2 control */ static const s64 link_freq_menu_items[OV13858_NUM_OF_LINK_FREQS] = { OV13858_LINK_FREQ_540MHZ, OV13858_LINK_FREQ_270MHZ }; /* Link frequency configs */ static const struct ov13858_link_freq_config link_freq_configs[OV13858_NUM_OF_LINK_FREQS] = { { .pixels_per_line = OV13858_PPL_540MHZ, .reg_list = { .num_of_regs = ARRAY_SIZE(mipi_data_rate_1080mbps), .regs = mipi_data_rate_1080mbps, } }, { .pixels_per_line = OV13858_PPL_270MHZ, .reg_list = { .num_of_regs = ARRAY_SIZE(mipi_data_rate_540mbps), .regs = mipi_data_rate_540mbps, } } }; /* Mode configs */ static const struct ov13858_mode supported_modes[] = { { .width = 4096, .height = 3072, .vts_def = OV13858_VTS_30FPS, .vts_min = OV13858_VTS_30FPS, .reg_list = { .num_of_regs = ARRAY_SIZE(mode_4096x3072_regs), .regs = mode_4096x3072_regs, }, .link_freq_index = OV13858_LINK_FREQ_INDEX_0, }, { .width = 4224, .height = 3136, .vts_def = OV13858_VTS_30FPS, .vts_min = OV13858_VTS_30FPS, .reg_list = { .num_of_regs = ARRAY_SIZE(mode_4224x3136_regs), .regs = mode_4224x3136_regs, }, .link_freq_index = OV13858_LINK_FREQ_INDEX_0, }, { .width = 2112, .height = 1568, .vts_def = OV13858_VTS_30FPS, .vts_min = 1608, .reg_list = { .num_of_regs = ARRAY_SIZE(mode_2112x1568_regs), .regs = mode_2112x1568_regs, }, .link_freq_index = OV13858_LINK_FREQ_INDEX_1, }, { .width = 2112, .height = 1188, .vts_def = OV13858_VTS_30FPS, .vts_min = 1608, .reg_list = { .num_of_regs = ARRAY_SIZE(mode_2112x1188_regs), .regs = mode_2112x1188_regs, }, .link_freq_index = OV13858_LINK_FREQ_INDEX_1, }, { .width = 1056, .height = 784, .vts_def = OV13858_VTS_30FPS, .vts_min = 804, .reg_list = { .num_of_regs = ARRAY_SIZE(mode_1056x784_regs), .regs = mode_1056x784_regs, }, .link_freq_index = OV13858_LINK_FREQ_INDEX_1, } }; struct ov13858 { struct v4l2_subdev sd; struct media_pad pad; struct v4l2_ctrl_handler ctrl_handler; /* V4L2 Controls */ struct v4l2_ctrl *link_freq; struct v4l2_ctrl *pixel_rate; struct v4l2_ctrl *vblank; struct v4l2_ctrl *hblank; struct v4l2_ctrl *exposure; struct v4l2_ctrl *hflip; struct v4l2_ctrl *vflip; /* Current mode */ const struct ov13858_mode *cur_mode; /* Current format1 */ union ov13858_format1 fmt1; /* Mutex for serialized access */ struct mutex mutex; /* Streaming on/off */ bool streaming; }; #define to_ov13858(_sd) container_of(_sd, struct ov13858, sd) /* Read registers up to 4 at a time */ static int ov13858_read_reg(struct ov13858 *ov13858, u16 reg, u32 len, u32 *val) { struct i2c_client *client = v4l2_get_subdevdata(&ov13858->sd); struct i2c_msg msgs[2]; u8 *data_be_p; int ret; __be32 data_be = 0; __be16 reg_addr_be = cpu_to_be16(reg); if (len > 4) return -EINVAL; data_be_p = (u8 *)&data_be; /* Write register address */ msgs[0].addr = client->addr; msgs[0].flags = 0; msgs[0].len = 2; msgs[0].buf = (u8 *)®_addr_be; /* Read data from register */ msgs[1].addr = client->addr; msgs[1].flags = I2C_M_RD; msgs[1].len = len; msgs[1].buf = &data_be_p[4 - len]; ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); if (ret != ARRAY_SIZE(msgs)) return -EIO; *val = be32_to_cpu(data_be); return 0; } /* Write registers up to 4 at a time */ static int ov13858_write_reg(struct ov13858 *ov13858, u16 reg, u32 len, u32 __val) { struct i2c_client *client = v4l2_get_subdevdata(&ov13858->sd); int buf_i, val_i; u8 buf[6], *val_p; __be32 val; if (len > 4) return -EINVAL; buf[0] = reg >> 8; buf[1] = reg & 0xff; val = cpu_to_be32(__val); val_p = (u8 *)&val; buf_i = 2; val_i = 4 - len; while (val_i < 4) buf[buf_i++] = val_p[val_i++]; if (i2c_master_send(client, buf, len + 2) != len + 2) return -EIO; return 0; } /* Write a list of registers */ static int ov13858_write_regs(struct ov13858 *ov13858, const struct ov13858_reg *regs, u32 len) { struct i2c_client *client = v4l2_get_subdevdata(&ov13858->sd); int ret; u32 i; for (i = 0; i < len; i++) { ret = ov13858_write_reg(ov13858, regs[i].address, 1, regs[i].val); if (ret) { dev_err_ratelimited( &client->dev, "Failed to write reg 0x%4.4x. error = %d\n", regs[i].address, ret); return ret; } } return 0; } static int ov13858_write_reg_list(struct ov13858 *ov13858, const struct ov13858_reg_list *r_list) { return ov13858_write_regs(ov13858, r_list->regs, r_list->num_of_regs); } /* Open sub-device */ static int ov13858_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { struct ov13858 *ov13858 = to_ov13858(sd); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 8, 0) struct v4l2_mbus_framefmt *try_fmt = v4l2_subdev_get_try_format(sd, fh->pad, 0); #else struct v4l2_mbus_framefmt *try_fmt = v4l2_subdev_state_get_format( fh->pad, 0); #endif mutex_lock(&ov13858->mutex); /* Initialize try_fmt */ try_fmt->width = ov13858->cur_mode->width; try_fmt->height = ov13858->cur_mode->height; try_fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10; try_fmt->field = V4L2_FIELD_NONE; /* No crop or compose */ mutex_unlock(&ov13858->mutex); return 0; } static int ov13858_update_digital_gain(struct ov13858 *ov13858, u32 d_gain) { int ret; ret = ov13858_write_reg(ov13858, OV13858_REG_B_MWB_GAIN, OV13858_REG_VALUE_16BIT, d_gain); if (ret) return ret; ret = ov13858_write_reg(ov13858, OV13858_REG_G_MWB_GAIN, OV13858_REG_VALUE_16BIT, d_gain); if (ret) return ret; ret = ov13858_write_reg(ov13858, OV13858_REG_R_MWB_GAIN, OV13858_REG_VALUE_16BIT, d_gain); return ret; } static int ov13858_enable_test_pattern(struct ov13858 *ov13858, u32 pattern) { int ret; u32 val; ret = ov13858_read_reg(ov13858, OV13858_REG_TEST_PATTERN, OV13858_REG_VALUE_08BIT, &val); if (ret) return ret; if (pattern) { val &= OV13858_TEST_PATTERN_MASK; val |= (pattern - 1) | OV13858_TEST_PATTERN_ENABLE; } else { val &= ~OV13858_TEST_PATTERN_ENABLE; } return ov13858_write_reg(ov13858, OV13858_REG_TEST_PATTERN, OV13858_REG_VALUE_08BIT, val); } static int ov13858_set_ctrl(struct v4l2_ctrl *ctrl) { struct ov13858 *ov13858 = container_of(ctrl->handler, struct ov13858, ctrl_handler); struct i2c_client *client = v4l2_get_subdevdata(&ov13858->sd); s64 max; int ret; /* Propagate change of current control to all related controls */ switch (ctrl->id) { case V4L2_CID_VBLANK: /* Update max exposure while meeting expected vblanking */ max = ov13858->cur_mode->height + ctrl->val - 8; __v4l2_ctrl_modify_range(ov13858->exposure, ov13858->exposure->minimum, max, ov13858->exposure->step, max); break; } /* * Applying V4L2 control value only happens * when power is up for streaming */ if (!pm_runtime_get_if_in_use(&client->dev)) return 0; ret = 0; switch (ctrl->id) { case V4L2_CID_ANALOGUE_GAIN: ret = ov13858_write_reg(ov13858, OV13858_REG_ANALOG_GAIN, OV13858_REG_VALUE_16BIT, ctrl->val); break; case V4L2_CID_DIGITAL_GAIN: ret = ov13858_update_digital_gain(ov13858, ctrl->val); break; case V4L2_CID_EXPOSURE: ret = ov13858_write_reg(ov13858, OV13858_REG_EXPOSURE, OV13858_REG_VALUE_24BIT, ctrl->val << 4); break; case V4L2_CID_VBLANK: /* Update VTS that meets expected vertical blanking */ ret = ov13858_write_reg(ov13858, OV13858_REG_VTS, OV13858_REG_VALUE_16BIT, ov13858->cur_mode->height + ctrl->val); break; case V4L2_CID_TEST_PATTERN: ret = ov13858_enable_test_pattern(ov13858, ctrl->val); break; case V4L2_CID_HFLIP: ov13858->fmt1.bits.hflip = !ctrl->val; ret = ov13858_write_reg(ov13858, OV13858_REG_FORMAT1, OV13858_REG_VALUE_08BIT, ov13858->fmt1.val); break; case V4L2_CID_VFLIP: ov13858->fmt1.bits.vflip = ctrl->val; ret = ov13858_write_reg(ov13858, OV13858_REG_FORMAT1, OV13858_REG_VALUE_08BIT, ov13858->fmt1.val); break; default: dev_info(&client->dev, "ctrl(id:0x%x,val:0x%x) is not handled\n", ctrl->id, ctrl->val); break; } pm_runtime_put(&client->dev); return ret; } static const struct v4l2_ctrl_ops ov13858_ctrl_ops = { .s_ctrl = ov13858_set_ctrl, }; static int ov13858_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_mbus_code_enum *code) { /* Only one bayer order(GRBG) is supported */ if (code->index > 0) return -EINVAL; code->code = MEDIA_BUS_FMT_SGRBG10_1X10; return 0; } static int ov13858_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_frame_size_enum *fse) { if (fse->index >= ARRAY_SIZE(supported_modes)) return -EINVAL; if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10) return -EINVAL; fse->min_width = supported_modes[fse->index].width; fse->max_width = fse->min_width; fse->min_height = supported_modes[fse->index].height; fse->max_height = fse->min_height; return 0; } static void ov13858_update_pad_format(const struct ov13858_mode *mode, struct v4l2_subdev_format *fmt) { fmt->format.width = mode->width; fmt->format.height = mode->height; fmt->format.code = MEDIA_BUS_FMT_SGRBG10_1X10; fmt->format.field = V4L2_FIELD_NONE; } static int ov13858_do_get_pad_format(struct ov13858 *ov13858, struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_format *fmt) { struct v4l2_mbus_framefmt *framefmt; struct v4l2_subdev *sd = &ov13858->sd; if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 8, 0) framefmt = v4l2_subdev_get_try_format(sd, cfg, fmt->pad); #else framefmt = v4l2_subdev_state_get_format(cfg, fmt->pad); #endif fmt->format = *framefmt; } else { ov13858_update_pad_format(ov13858->cur_mode, fmt); } return 0; } static int ov13858_get_pad_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_format *fmt) { struct ov13858 *ov13858 = to_ov13858(sd); int ret; mutex_lock(&ov13858->mutex); ret = ov13858_do_get_pad_format(ov13858, cfg, fmt); mutex_unlock(&ov13858->mutex); return ret; } static int ov13858_set_pad_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_format *fmt) { struct ov13858 *ov13858 = to_ov13858(sd); const struct ov13858_mode *mode; struct v4l2_mbus_framefmt *framefmt; s32 vblank_def; s32 vblank_min; s64 h_blank; s64 pixel_rate; s64 link_freq; mutex_lock(&ov13858->mutex); /* Only one raw bayer(GRBG) order is supported */ if (fmt->format.code != MEDIA_BUS_FMT_SGRBG10_1X10) fmt->format.code = MEDIA_BUS_FMT_SGRBG10_1X10; mode = v4l2_find_nearest_size(supported_modes, ARRAY_SIZE(supported_modes), width, height, fmt->format.width, fmt->format.height); ov13858_update_pad_format(mode, fmt); if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 8, 0) framefmt = v4l2_subdev_get_try_format(sd, cfg, fmt->pad); #else framefmt = v4l2_subdev_state_get_format(cfg, fmt->pad); #endif *framefmt = fmt->format; } else { ov13858->cur_mode = mode; __v4l2_ctrl_s_ctrl(ov13858->link_freq, mode->link_freq_index); link_freq = link_freq_menu_items[mode->link_freq_index]; pixel_rate = link_freq_to_pixel_rate(link_freq); __v4l2_ctrl_s_ctrl_int64(ov13858->pixel_rate, pixel_rate); /* Update limits and set FPS to default */ vblank_def = ov13858->cur_mode->vts_def - ov13858->cur_mode->height; vblank_min = ov13858->cur_mode->vts_min - ov13858->cur_mode->height; __v4l2_ctrl_modify_range( ov13858->vblank, vblank_min, OV13858_VTS_MAX - ov13858->cur_mode->height, 1, vblank_def); __v4l2_ctrl_s_ctrl(ov13858->vblank, vblank_def); h_blank = link_freq_configs[mode->link_freq_index].pixels_per_line - ov13858->cur_mode->width; __v4l2_ctrl_modify_range(ov13858->hblank, h_blank, h_blank, 1, h_blank); } mutex_unlock(&ov13858->mutex); return 0; } static int ov13858_get_skip_frames(struct v4l2_subdev *sd, u32 *frames) { *frames = OV13858_NUM_OF_SKIP_FRAMES; return 0; } /* Start streaming */ static int ov13858_start_streaming(struct ov13858 *ov13858) { struct i2c_client *client = v4l2_get_subdevdata(&ov13858->sd); const struct ov13858_reg_list *reg_list; int ret, link_freq_index; /* Get out of from software reset */ ret = ov13858_write_reg(ov13858, OV13858_REG_SOFTWARE_RST, OV13858_REG_VALUE_08BIT, OV13858_SOFTWARE_RST); if (ret) { dev_err(&client->dev, "%s failed to set powerup registers\n", __func__); return ret; } /* Setup PLL */ link_freq_index = ov13858->cur_mode->link_freq_index; reg_list = &link_freq_configs[link_freq_index].reg_list; ret = ov13858_write_reg_list(ov13858, reg_list); if (ret) { dev_err(&client->dev, "%s failed to set plls\n", __func__); return ret; } /* Apply default values of current mode */ reg_list = &ov13858->cur_mode->reg_list; ret = ov13858_write_reg_list(ov13858, reg_list); if (ret) { dev_err(&client->dev, "%s failed to set mode\n", __func__); return ret; } /* Apply customized values from user */ ret = __v4l2_ctrl_handler_setup(ov13858->sd.ctrl_handler); if (ret) return ret; return ov13858_write_reg(ov13858, OV13858_REG_MODE_SELECT, OV13858_REG_VALUE_08BIT, OV13858_MODE_STREAMING); } /* Stop streaming */ static int ov13858_stop_streaming(struct ov13858 *ov13858) { return ov13858_write_reg(ov13858, OV13858_REG_MODE_SELECT, OV13858_REG_VALUE_08BIT, OV13858_MODE_STANDBY); } static int ov13858_set_stream(struct v4l2_subdev *sd, int enable) { struct ov13858 *ov13858 = to_ov13858(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); int ret = 0; mutex_lock(&ov13858->mutex); if (ov13858->streaming == enable) { mutex_unlock(&ov13858->mutex); return 0; } if (enable) { ret = pm_runtime_get_sync(&client->dev); if (ret < 0) { pm_runtime_put_noidle(&client->dev); goto err_unlock; } /* * Apply default & customized values * and then start streaming. */ ret = ov13858_start_streaming(ov13858); if (ret) goto err_rpm_put; } else { ov13858_stop_streaming(ov13858); pm_runtime_put(&client->dev); } ov13858->streaming = enable; mutex_unlock(&ov13858->mutex); return ret; err_rpm_put: pm_runtime_put(&client->dev); err_unlock: mutex_unlock(&ov13858->mutex); return ret; } static int __maybe_unused ov13858_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct v4l2_subdev *sd = i2c_get_clientdata(client); struct ov13858 *ov13858 = to_ov13858(sd); if (ov13858->streaming) ov13858_stop_streaming(ov13858); return 0; } static int __maybe_unused ov13858_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct v4l2_subdev *sd = i2c_get_clientdata(client); struct ov13858 *ov13858 = to_ov13858(sd); int ret; if (ov13858->streaming) { ret = ov13858_start_streaming(ov13858); if (ret) goto error; } return 0; error: ov13858_stop_streaming(ov13858); ov13858->streaming = false; return ret; } /* Verify chip ID */ static int ov13858_identify_module(struct ov13858 *ov13858) { struct i2c_client *client = v4l2_get_subdevdata(&ov13858->sd); int ret; u32 val; ret = ov13858_read_reg(ov13858, OV13858_REG_CHIP_ID, OV13858_REG_VALUE_24BIT, &val); if (ret) return ret; if (val != OV13858_CHIP_ID) { dev_err(&client->dev, "chip id mismatch: %x!=%x\n", OV13858_CHIP_ID, val); return -EIO; } return 0; } static const struct v4l2_subdev_video_ops ov13858_video_ops = { .s_stream = ov13858_set_stream, }; static const struct v4l2_subdev_pad_ops ov13858_pad_ops = { .enum_mbus_code = ov13858_enum_mbus_code, .get_fmt = ov13858_get_pad_format, .set_fmt = ov13858_set_pad_format, .enum_frame_size = ov13858_enum_frame_size, }; static const struct v4l2_subdev_sensor_ops ov13858_sensor_ops = { .g_skip_frames = ov13858_get_skip_frames, }; static const struct v4l2_subdev_ops ov13858_subdev_ops = { .video = &ov13858_video_ops, .pad = &ov13858_pad_ops, .sensor = &ov13858_sensor_ops, }; static const struct media_entity_operations ov13858_subdev_entity_ops = { .link_validate = v4l2_subdev_link_validate, }; static const struct v4l2_subdev_internal_ops ov13858_internal_ops = { .open = ov13858_open, }; /* Initialize control handlers */ static int ov13858_init_controls(struct ov13858 *ov13858) { struct i2c_client *client = v4l2_get_subdevdata(&ov13858->sd); struct v4l2_fwnode_device_properties props; struct v4l2_ctrl_handler *ctrl_hdlr; s64 exposure_max; s64 vblank_def; s64 vblank_min; s64 hblank; s64 pixel_rate_min; s64 pixel_rate_max; const struct ov13858_mode *mode; int ret; ctrl_hdlr = &ov13858->ctrl_handler; ret = v4l2_ctrl_handler_init(ctrl_hdlr, 10); if (ret) return ret; mutex_init(&ov13858->mutex); ctrl_hdlr->lock = &ov13858->mutex; ov13858->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, &ov13858_ctrl_ops, V4L2_CID_LINK_FREQ, OV13858_NUM_OF_LINK_FREQS - 1, 0, link_freq_menu_items); if (ov13858->link_freq) ov13858->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY; pixel_rate_max = link_freq_to_pixel_rate(link_freq_menu_items[0]); pixel_rate_min = link_freq_to_pixel_rate(link_freq_menu_items[1]); /* By default, PIXEL_RATE is read only */ ov13858->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov13858_ctrl_ops, V4L2_CID_PIXEL_RATE, pixel_rate_min, pixel_rate_max, 1, pixel_rate_max); mode = ov13858->cur_mode; vblank_def = mode->vts_def - mode->height; vblank_min = mode->vts_min - mode->height; ov13858->vblank = v4l2_ctrl_new_std( ctrl_hdlr, &ov13858_ctrl_ops, V4L2_CID_VBLANK, vblank_min, OV13858_VTS_MAX - mode->height, 1, vblank_def); hblank = link_freq_configs[mode->link_freq_index].pixels_per_line - mode->width; ov13858->hblank = v4l2_ctrl_new_std( ctrl_hdlr, &ov13858_ctrl_ops, V4L2_CID_HBLANK, hblank, hblank, 1, hblank); if (ov13858->hblank) ov13858->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY; exposure_max = mode->vts_def - 8; ov13858->exposure = v4l2_ctrl_new_std( ctrl_hdlr, &ov13858_ctrl_ops, V4L2_CID_EXPOSURE, OV13858_EXPOSURE_MIN, exposure_max, OV13858_EXPOSURE_STEP, OV13858_EXPOSURE_DEFAULT); ov13858->hflip = v4l2_ctrl_new_std( ctrl_hdlr, &ov13858_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0); ov13858->vflip = v4l2_ctrl_new_std( ctrl_hdlr, &ov13858_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0); ov13858->fmt1.val = OV13858_FORMAT1_DFT; v4l2_ctrl_new_std(ctrl_hdlr, &ov13858_ctrl_ops, V4L2_CID_ANALOGUE_GAIN, OV13858_ANA_GAIN_MIN, OV13858_ANA_GAIN_MAX, OV13858_ANA_GAIN_STEP, OV13858_ANA_GAIN_DEFAULT); /* Digital gain */ v4l2_ctrl_new_std(ctrl_hdlr, &ov13858_ctrl_ops, V4L2_CID_DIGITAL_GAIN, OV13858_DGTL_GAIN_MIN, OV13858_DGTL_GAIN_MAX, OV13858_DGTL_GAIN_STEP, OV13858_DGTL_GAIN_DEFAULT); v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &ov13858_ctrl_ops, V4L2_CID_TEST_PATTERN, ARRAY_SIZE(ov13858_test_pattern_menu) - 1, 0, 0, ov13858_test_pattern_menu); if (ctrl_hdlr->error) { ret = ctrl_hdlr->error; dev_err(&client->dev, "%s control init failed (%d)\n", __func__, ret); goto error; } ret = v4l2_fwnode_device_parse(&client->dev, &props); if (ret) goto error; ret = v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &ov13858_ctrl_ops, &props); if (ret) goto error; ov13858->sd.ctrl_handler = ctrl_hdlr; return 0; error: v4l2_ctrl_handler_free(ctrl_hdlr); mutex_destroy(&ov13858->mutex); return ret; } static void ov13858_free_controls(struct ov13858 *ov13858) { v4l2_ctrl_handler_free(ov13858->sd.ctrl_handler); mutex_destroy(&ov13858->mutex); } static int ov13858_probe(struct i2c_client *client, const struct i2c_device_id *devid) { struct ov13858 *ov13858; int ret; u32 val = 0; device_property_read_u32(&client->dev, "clock-frequency", &val); if (val != 19200000) return -EINVAL; ov13858 = devm_kzalloc(&client->dev, sizeof(*ov13858), GFP_KERNEL); if (!ov13858) return -ENOMEM; /* Initialize subdev */ v4l2_i2c_subdev_init(&ov13858->sd, client, &ov13858_subdev_ops); /* Check module identity */ ret = ov13858_identify_module(ov13858); if (ret) { dev_err(&client->dev, "failed to find sensor: %d\n", ret); return ret; } /* Set default mode to max resolution */ ov13858->cur_mode = &supported_modes[0]; ret = ov13858_init_controls(ov13858); if (ret) return ret; /* Initialize subdev */ ov13858->sd.internal_ops = &ov13858_internal_ops; ov13858->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; ov13858->sd.entity.ops = &ov13858_subdev_entity_ops; ov13858->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; /* Initialize source pad */ ov13858->pad.flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_pads_init(&ov13858->sd.entity, 1, &ov13858->pad); if (ret) { dev_err(&client->dev, "%s failed:%d\n", __func__, ret); goto error_handler_free; } ret = v4l2_async_register_subdev_sensor_common(&ov13858->sd); if (ret < 0) goto error_media_entity; /* * Device is already turned on by i2c-core with ACPI domain PM. * Enable runtime PM and turn off the device. */ pm_runtime_set_active(&client->dev); pm_runtime_enable(&client->dev); pm_runtime_idle(&client->dev); return 0; error_media_entity: media_entity_cleanup(&ov13858->sd.entity); error_handler_free: ov13858_free_controls(ov13858); dev_err(&client->dev, "%s failed:%d\n", __func__, ret); return ret; } #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) static int ov13858_remove(struct i2c_client *client) #else static void ov13858_remove(struct i2c_client *client) #endif { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct ov13858 *ov13858 = to_ov13858(sd); v4l2_async_unregister_subdev(sd); media_entity_cleanup(&sd->entity); ov13858_free_controls(ov13858); pm_runtime_disable(&client->dev); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) return 0; #endif } static const struct i2c_device_id ov13858_id_table[] = { {"ov13858", 0}, {}, }; MODULE_DEVICE_TABLE(i2c, ov13858_id_table); static const struct dev_pm_ops ov13858_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(ov13858_suspend, ov13858_resume) }; #ifdef CONFIG_ACPI static const struct acpi_device_id ov13858_acpi_ids[] = { {"OVTID858"}, { /* sentinel */ } }; MODULE_DEVICE_TABLE(acpi, ov13858_acpi_ids); #endif static struct i2c_driver ov13858_i2c_driver = { .driver = { .name = "ov13858", .pm = &ov13858_pm_ops, .acpi_match_table = ACPI_PTR(ov13858_acpi_ids), }, .probe = ov13858_probe, .remove = ov13858_remove, .id_table = ov13858_id_table, }; module_i2c_driver(ov13858_i2c_driver); MODULE_AUTHOR("Kan, Chris "); MODULE_AUTHOR("Rapolu, Chiranjeevi "); MODULE_AUTHOR("Yang, Hyungwoo "); MODULE_DESCRIPTION("Omnivision ov13858 sensor driver"); MODULE_LICENSE("GPL v2"); ipu6-drivers-0~git202411190607.0ad49882/drivers/media/i2c/ov2740.c000066400000000000000000001122441471702545500232340ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2022 Intel Corporation. #include #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 12, 0) #include #else #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #define OV2740_LINK_FREQ_360MHZ 360000000ULL #define OV2740_LINK_FREQ_180MHZ 180000000ULL #define OV2740_SCLK 72000000LL #define OV2740_MCLK 19200000 #define OV2740_DATA_LANES 2 #define OV2740_RGB_DEPTH 10 #define OV2740_REG_CHIP_ID 0x300a #define OV2740_CHIP_ID 0x2740 #define OV2740_REG_MODE_SELECT 0x0100 #define OV2740_MODE_STANDBY 0x00 #define OV2740_MODE_STREAMING 0x01 #define OV2740_REG_DELAY 0xffff /* vertical-timings from sensor */ #define OV2740_REG_VTS 0x380e /* horizontal-timings from sensor */ #define OV2740_REG_HTS 0x380c /* Exposure controls from sensor */ #define OV2740_REG_EXPOSURE 0x3500 #define OV2740_EXPOSURE_MIN 4 #define OV2740_EXPOSURE_MAX_MARGIN 8 #define OV2740_EXPOSURE_STEP 1 /* Analog gain controls from sensor */ #define OV2740_REG_ANALOG_GAIN 0x3508 #define OV2740_ANAL_GAIN_MIN 128 #define OV2740_ANAL_GAIN_MAX 1983 #define OV2740_ANAL_GAIN_STEP 1 /* Digital gain controls from sensor */ #define OV2740_REG_MWB_R_GAIN 0x500a #define OV2740_REG_MWB_G_GAIN 0x500c #define OV2740_REG_MWB_B_GAIN 0x500e #define OV2740_DGTL_GAIN_MIN 1024 #define OV2740_DGTL_GAIN_MAX 4095 #define OV2740_DGTL_GAIN_STEP 1 #define OV2740_DGTL_GAIN_DEFAULT 1024 /* Test Pattern Control */ #define OV2740_REG_TEST_PATTERN 0x5040 #define OV2740_TEST_PATTERN_ENABLE BIT(7) #define OV2740_TEST_PATTERN_BAR_SHIFT 2 /* Group Access */ #define OV2740_REG_GROUP_ACCESS 0x3208 #define OV2740_GROUP_HOLD_START 0x0 #define OV2740_GROUP_HOLD_END 0x10 #define OV2740_GROUP_HOLD_LAUNCH 0xa0 /* ISP CTRL00 */ #define OV2740_REG_ISP_CTRL00 0x5000 /* ISP CTRL01 */ #define OV2740_REG_ISP_CTRL01 0x5001 /* Customer Addresses: 0x7010 - 0x710F */ #define CUSTOMER_USE_OTP_SIZE 0x100 /* OTP registers from sensor */ #define OV2740_REG_OTP_CUSTOMER 0x7010 struct nvm_data { struct i2c_client *client; struct nvmem_device *nvmem; struct regmap *regmap; char *nvm_buffer; }; enum { OV2740_LINK_FREQ_360MHZ_INDEX, OV2740_LINK_FREQ_180MHZ_INDEX, }; struct ov2740_reg { u16 address; u8 val; }; struct ov2740_reg_list { u32 num_of_regs; const struct ov2740_reg *regs; }; struct ov2740_link_freq_config { const struct ov2740_reg_list reg_list; }; struct ov2740_mode { /* Frame width in pixels */ u32 width; /* Frame height in pixels */ u32 height; /* Horizontal timining size */ u32 hts; /* Default vertical timining size */ u32 vts_def; /* Min vertical timining size */ u32 vts_min; /* Max vertical timining size */ u32 vts_max; /* Link frequency needed for this resolution */ u32 link_freq_index; /* Sensor register settings for this resolution */ const struct ov2740_reg_list reg_list; }; /* * 822ace8f-2814-4174-a56b-5f029fe079ee * This _DSM GUID returns a string from the sensor device, which acts as a * module identifier. */ static const guid_t cio2_sensor_module_guid = GUID_INIT(0x822ace8f, 0x2814, 0x4174, 0xa5, 0x6b, 0x5f, 0x02, 0x9f, 0xe0, 0x79, 0xee); static const struct ov2740_reg mipi_data_rate_720mbps[] = { {0x0103, 0x01}, {0xffff, 0x10}, {0x0302, 0x4b}, {0x030d, 0x4b}, {0x030e, 0x02}, {0x030a, 0x01}, {0x0312, 0x11}, }; static const struct ov2740_reg mipi_data_rate_360mbps[] = { {0x0103, 0x01}, {0xffff, 0x10}, {0x0302, 0x4b}, {0x0303, 0x01}, {0x030d, 0x4b}, {0x030e, 0x02}, {0x030a, 0x01}, {0x0312, 0x11}, {0x4837, 0x2c}, }; static const struct ov2740_reg mode_1932x1092_regs[] = { {0x3000, 0x00}, {0x3018, 0x32}, {0x3031, 0x0a}, {0x3080, 0x08}, {0x3083, 0xB4}, {0x3103, 0x00}, {0x3104, 0x01}, {0x3106, 0x01}, {0x3500, 0x00}, {0x3501, 0x44}, {0x3502, 0x40}, {0x3503, 0x88}, {0x3507, 0x00}, {0x3508, 0x00}, {0x3509, 0x80}, {0x350c, 0x00}, {0x350d, 0x80}, {0x3510, 0x00}, {0x3511, 0x00}, {0x3512, 0x20}, {0x3632, 0x00}, {0x3633, 0x10}, {0x3634, 0x10}, {0x3635, 0x10}, {0x3645, 0x13}, {0x3646, 0x81}, {0x3636, 0x10}, {0x3651, 0x0a}, {0x3656, 0x02}, {0x3659, 0x04}, {0x365a, 0xda}, {0x365b, 0xa2}, {0x365c, 0x04}, {0x365d, 0x1d}, {0x365e, 0x1a}, {0x3662, 0xd7}, {0x3667, 0x78}, {0x3669, 0x0a}, {0x366a, 0x92}, {0x3700, 0x54}, {0x3702, 0x10}, {0x3706, 0x42}, {0x3709, 0x30}, {0x370b, 0xc2}, {0x3714, 0x63}, {0x3715, 0x01}, {0x3716, 0x00}, {0x371a, 0x3e}, {0x3732, 0x0e}, {0x3733, 0x10}, {0x375f, 0x0e}, {0x3768, 0x30}, {0x3769, 0x44}, {0x376a, 0x22}, {0x377b, 0x20}, {0x377c, 0x00}, {0x377d, 0x0c}, {0x3798, 0x00}, {0x37a1, 0x55}, {0x37a8, 0x6d}, {0x37c2, 0x04}, {0x37c5, 0x00}, {0x37c8, 0x00}, {0x3800, 0x00}, {0x3801, 0x00}, {0x3802, 0x00}, {0x3803, 0x00}, {0x3804, 0x07}, {0x3805, 0x8f}, {0x3806, 0x04}, {0x3807, 0x47}, {0x3808, 0x07}, {0x3809, 0x88}, {0x380a, 0x04}, {0x380b, 0x40}, {0x380c, 0x04}, {0x380d, 0x38}, {0x380e, 0x04}, {0x380f, 0x60}, {0x3810, 0x00}, {0x3811, 0x04}, {0x3812, 0x00}, {0x3813, 0x04}, {0x3814, 0x01}, {0x3815, 0x01}, {0x3820, 0x80}, {0x3821, 0x46}, {0x3822, 0x84}, {0x3829, 0x00}, {0x382a, 0x01}, {0x382b, 0x01}, {0x3830, 0x04}, {0x3836, 0x01}, {0x3837, 0x08}, {0x3839, 0x01}, {0x383a, 0x00}, {0x383b, 0x08}, {0x383c, 0x00}, {0x3f0b, 0x00}, {0x4001, 0x20}, {0x4009, 0x07}, {0x4003, 0x10}, {0x4010, 0xe0}, {0x4016, 0x00}, {0x4017, 0x10}, {0x4044, 0x02}, {0x4304, 0x08}, {0x4307, 0x30}, {0x4320, 0x80}, {0x4322, 0x00}, {0x4323, 0x00}, {0x4324, 0x00}, {0x4325, 0x00}, {0x4326, 0x00}, {0x4327, 0x00}, {0x4328, 0x00}, {0x4329, 0x00}, {0x432c, 0x03}, {0x432d, 0x81}, {0x4501, 0x84}, {0x4502, 0x40}, {0x4503, 0x18}, {0x4504, 0x04}, {0x4508, 0x02}, {0x4601, 0x10}, {0x4800, 0x00}, {0x4816, 0x52}, {0x4837, 0x16}, {0x5000, 0x7f}, {0x5001, 0x00}, {0x5005, 0x38}, {0x501e, 0x0d}, {0x5040, 0x00}, {0x5901, 0x00}, {0x3800, 0x00}, {0x3801, 0x00}, {0x3802, 0x00}, {0x3803, 0x00}, {0x3804, 0x07}, {0x3805, 0x8f}, {0x3806, 0x04}, {0x3807, 0x47}, {0x3808, 0x07}, {0x3809, 0x8c}, {0x380a, 0x04}, {0x380b, 0x44}, {0x3810, 0x00}, {0x3811, 0x00}, {0x3812, 0x00}, {0x3813, 0x01}, }; static const struct ov2740_reg mode_cjfle23_1932x1092_regs[] = { {0x3000, 0x00}, {0x3018, 0x32}, //12(2 lane for 32; 1lane for 12) {0x3031, 0x0a}, {0x3080, 0x08}, {0x3083, 0xB4}, {0x3103, 0x00}, {0x3104, 0x01}, {0x3106, 0x01}, {0x3500, 0x00}, {0x3501, 0x44}, {0x3502, 0x40}, {0x3503, 0x88}, {0x3507, 0x00}, {0x3508, 0x00}, {0x3509, 0x80}, {0x350c, 0x00}, {0x350d, 0x80}, {0x3510, 0x00}, {0x3511, 0x00}, {0x3512, 0x20}, {0x3632, 0x00}, {0x3633, 0x10}, {0x3634, 0x10}, {0x3635, 0x10}, {0x3645, 0x13}, {0x3646, 0x81}, {0x3636, 0x10}, {0x3651, 0x0a}, {0x3656, 0x02}, {0x3659, 0x04}, {0x365a, 0xda}, {0x365b, 0xa2}, {0x365c, 0x04}, {0x365d, 0x1d}, {0x365e, 0x1a}, {0x3662, 0xd7}, {0x3667, 0x78}, {0x3669, 0x0a}, {0x366a, 0x92}, {0x3700, 0x54}, {0x3702, 0x10}, {0x3706, 0x42}, {0x3709, 0x30}, {0x370b, 0xc2}, {0x3714, 0x63}, {0x3715, 0x01}, {0x3716, 0x00}, {0x371a, 0x3e}, {0x3732, 0x0e}, {0x3733, 0x10}, {0x375f, 0x0e}, {0x3768, 0x30}, {0x3769, 0x44}, {0x376a, 0x22}, {0x377b, 0x20}, {0x377c, 0x00}, {0x377d, 0x0c}, {0x3798, 0x00}, {0x37a1, 0x55}, {0x37a8, 0x6d}, {0x37c2, 0x04}, {0x37c5, 0x00}, {0x37c8, 0x00}, {0x3800, 0x00}, {0x3801, 0x00}, {0x3802, 0x00}, {0x3803, 0x00}, {0x3804, 0x07}, {0x3805, 0x8f}, {0x3806, 0x04}, {0x3807, 0x47}, {0x3808, 0x07}, {0x3809, 0x88}, {0x380a, 0x04}, {0x380b, 0x40}, {0x380c, 0x08}, {0x380d, 0x70}, {0x380e, 0x04}, {0x380f, 0x56}, {0x3810, 0x00}, {0x3811, 0x04}, {0x3812, 0x00}, {0x3813, 0x04}, {0x3814, 0x01}, {0x3815, 0x01}, {0x3820, 0x80}, {0x3821, 0x46}, {0x3822, 0x84}, {0x3829, 0x00}, {0x382a, 0x01}, {0x382b, 0x01}, {0x3830, 0x04}, {0x3836, 0x01}, {0x3837, 0x08}, {0x3839, 0x01}, {0x383a, 0x00}, {0x383b, 0x08}, {0x383c, 0x00}, {0x3f0b, 0x00}, {0x4001, 0x20}, {0x4009, 0x07}, {0x4003, 0x10}, {0x4010, 0xe0}, {0x4016, 0x00}, {0x4017, 0x10}, {0x4044, 0x02}, {0x4304, 0x08}, {0x4307, 0x30}, {0x4320, 0x80}, {0x4322, 0x00}, {0x4323, 0x00}, {0x4324, 0x00}, {0x4325, 0x00}, {0x4326, 0x00}, {0x4327, 0x00}, {0x4328, 0x00}, {0x4329, 0x00}, {0x432c, 0x03}, {0x432d, 0x81}, {0x4501, 0x84}, {0x4502, 0x40}, {0x4503, 0x18}, {0x4504, 0x04}, {0x4508, 0x02}, {0x4601, 0x10}, {0x4800, 0x00}, {0x4816, 0x52}, {0x5000, 0x73}, //0x7f enable DPC {0x5001, 0x00}, {0x5005, 0x38}, {0x501e, 0x0d}, {0x5040, 0x00}, {0x5901, 0x00}, {0x3800, 0x00}, {0x3801, 0x00}, {0x3802, 0x00}, {0x3803, 0x00}, {0x3804, 0x07}, {0x3805, 0x8f}, {0x3806, 0x04}, {0x3807, 0x47}, {0x3808, 0x07}, {0x3809, 0x8c}, {0x380a, 0x04}, {0x380b, 0x44}, {0x3810, 0x00}, {0x3811, 0x00}, {0x3812, 0x00}, {0x3813, 0x01}, {0x4003, 0x40}, //set Black level to 0x40 }; static const char * const ov2740_test_pattern_menu[] = { "Disabled", "Color Bar", "Top-Bottom Darker Color Bar", "Right-Left Darker Color Bar", "Bottom-Top Darker Color Bar", }; static const char * const ov2740_module_names[] = { "", "CJFLE23", }; static const s64 link_freq_menu_items[] = { OV2740_LINK_FREQ_360MHZ, OV2740_LINK_FREQ_180MHZ, }; static const struct ov2740_link_freq_config link_freq_configs[] = { [OV2740_LINK_FREQ_360MHZ_INDEX] = { .reg_list = { .num_of_regs = ARRAY_SIZE(mipi_data_rate_720mbps), .regs = mipi_data_rate_720mbps, } }, [OV2740_LINK_FREQ_180MHZ_INDEX] = { .reg_list = { .num_of_regs = ARRAY_SIZE(mipi_data_rate_360mbps), .regs = mipi_data_rate_360mbps, } }, }; static const struct ov2740_mode supported_modes[] = { { .width = 1932, .height = 1092, .hts = 1080, .vts_def = 0x088a, .vts_min = 0x0460, .vts_max = 0x7fff, .reg_list = { .num_of_regs = ARRAY_SIZE(mode_1932x1092_regs), .regs = mode_1932x1092_regs, }, .link_freq_index = OV2740_LINK_FREQ_360MHZ_INDEX, }, }; static const struct ov2740_mode supported_modes_360mbps[] = { { .width = 1932, .height = 1092, .hts = 2160, .vts_def = 0x0456, .vts_min = 0x0456, .vts_max = 0x07ff, .reg_list = { .num_of_regs = ARRAY_SIZE(mode_cjfle23_1932x1092_regs), .regs = mode_cjfle23_1932x1092_regs, }, .link_freq_index = OV2740_LINK_FREQ_180MHZ_INDEX, }, }; struct ov2740 { struct v4l2_subdev sd; struct media_pad pad; struct v4l2_ctrl_handler ctrl_handler; /* V4L2 Controls */ struct v4l2_ctrl *link_freq; struct v4l2_ctrl *pixel_rate; struct v4l2_ctrl *vblank; struct v4l2_ctrl *hblank; struct v4l2_ctrl *exposure; /* Current mode */ const struct ov2740_mode *cur_mode; /* To serialize asynchronus callbacks */ struct mutex mutex; /* Streaming on/off */ bool streaming; /* NVM data inforamtion */ struct nvm_data *nvm; /* i2c client */ struct i2c_client *client; /* GPIO for reset */ struct gpio_desc *reset_gpio; /* Clock provider */ struct clk *clk; /* Module name index */ u8 module_name_index; }; static inline struct ov2740 *to_ov2740(struct v4l2_subdev *subdev) { return container_of(subdev, struct ov2740, sd); } static u64 to_pixel_rate(u32 f_index) { u64 pixel_rate = link_freq_menu_items[f_index] * 2 * OV2740_DATA_LANES; do_div(pixel_rate, OV2740_RGB_DEPTH); return pixel_rate; } static u64 to_pixels_per_line(u32 hts, u32 f_index) { u64 ppl = hts * to_pixel_rate(f_index); do_div(ppl, OV2740_SCLK); return ppl; } static int ov2740_read_reg(struct ov2740 *ov2740, u16 reg, u16 len, u32 *val) { struct i2c_client *client = ov2740->client; struct i2c_msg msgs[2]; u8 addr_buf[2]; u8 data_buf[4] = {0}; int ret = 0; if (len > sizeof(data_buf)) return -EINVAL; put_unaligned_be16(reg, addr_buf); msgs[0].addr = client->addr; msgs[0].flags = 0; msgs[0].len = sizeof(addr_buf); msgs[0].buf = addr_buf; msgs[1].addr = client->addr; msgs[1].flags = I2C_M_RD; msgs[1].len = len; msgs[1].buf = &data_buf[sizeof(data_buf) - len]; ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); if (ret != ARRAY_SIZE(msgs)) return ret < 0 ? ret : -EIO; *val = get_unaligned_be32(data_buf); return 0; } static int ov2740_write_reg(struct ov2740 *ov2740, u16 reg, u16 len, u32 val) { struct i2c_client *client = ov2740->client; u8 buf[6]; int ret = 0; if (len > 4) return -EINVAL; if (reg == OV2740_REG_DELAY) { msleep(val); return 0; } put_unaligned_be16(reg, buf); put_unaligned_be32(val << 8 * (4 - len), buf + 2); ret = i2c_master_send(client, buf, len + 2); if (ret != len + 2) return ret < 0 ? ret : -EIO; return 0; } static int ov2740_write_reg_list(struct ov2740 *ov2740, const struct ov2740_reg_list *r_list) { struct i2c_client *client = ov2740->client; unsigned int i; int ret = 0; for (i = 0; i < r_list->num_of_regs; i++) { ret = ov2740_write_reg(ov2740, r_list->regs[i].address, 1, r_list->regs[i].val); if (ret) { dev_err_ratelimited(&client->dev, "write reg 0x%4.4x return err = %d", r_list->regs[i].address, ret); return ret; } } return 0; } static int ov2740_update_digital_gain(struct ov2740 *ov2740, u32 d_gain) { int ret = 0; ret = ov2740_write_reg(ov2740, OV2740_REG_GROUP_ACCESS, 1, OV2740_GROUP_HOLD_START); if (ret) return ret; ret = ov2740_write_reg(ov2740, OV2740_REG_MWB_R_GAIN, 2, d_gain); if (ret) return ret; ret = ov2740_write_reg(ov2740, OV2740_REG_MWB_G_GAIN, 2, d_gain); if (ret) return ret; ret = ov2740_write_reg(ov2740, OV2740_REG_MWB_B_GAIN, 2, d_gain); if (ret) return ret; ret = ov2740_write_reg(ov2740, OV2740_REG_GROUP_ACCESS, 1, OV2740_GROUP_HOLD_END); if (ret) return ret; ret = ov2740_write_reg(ov2740, OV2740_REG_GROUP_ACCESS, 1, OV2740_GROUP_HOLD_LAUNCH); return ret; } static int ov2740_test_pattern(struct ov2740 *ov2740, u32 pattern) { if (pattern) pattern = (pattern - 1) << OV2740_TEST_PATTERN_BAR_SHIFT | OV2740_TEST_PATTERN_ENABLE; return ov2740_write_reg(ov2740, OV2740_REG_TEST_PATTERN, 1, pattern); } static int ov2740_set_ctrl(struct v4l2_ctrl *ctrl) { struct ov2740 *ov2740 = container_of(ctrl->handler, struct ov2740, ctrl_handler); struct i2c_client *client = ov2740->client; s64 exposure_max; int ret = 0; /* Propagate change of current control to all related controls */ if (ctrl->id == V4L2_CID_VBLANK) { /* Update max exposure while meeting expected vblanking */ exposure_max = ov2740->cur_mode->height + ctrl->val - OV2740_EXPOSURE_MAX_MARGIN; __v4l2_ctrl_modify_range(ov2740->exposure, ov2740->exposure->minimum, exposure_max, ov2740->exposure->step, exposure_max); } /* V4L2 controls values will be applied only when power is already up */ if (!pm_runtime_get_if_in_use(&client->dev)) return 0; switch (ctrl->id) { case V4L2_CID_ANALOGUE_GAIN: ret = ov2740_write_reg(ov2740, OV2740_REG_ANALOG_GAIN, 2, ctrl->val); break; case V4L2_CID_DIGITAL_GAIN: ret = ov2740_update_digital_gain(ov2740, ctrl->val); break; case V4L2_CID_EXPOSURE: /* 4 least significant bits of expsoure are fractional part */ ret = ov2740_write_reg(ov2740, OV2740_REG_EXPOSURE, 3, ctrl->val << 4); break; case V4L2_CID_VBLANK: ret = ov2740_write_reg(ov2740, OV2740_REG_VTS, 2, ov2740->cur_mode->height + ctrl->val); break; case V4L2_CID_TEST_PATTERN: ret = ov2740_test_pattern(ov2740, ctrl->val); break; default: ret = -EINVAL; break; } pm_runtime_put(&client->dev); return ret; } static const struct v4l2_ctrl_ops ov2740_ctrl_ops = { .s_ctrl = ov2740_set_ctrl, }; static int ov2740_init_controls(struct ov2740 *ov2740) { struct v4l2_ctrl_handler *ctrl_hdlr; const struct ov2740_mode *cur_mode; s64 exposure_max, h_blank, pixel_rate; u32 vblank_min, vblank_max, vblank_default; int size; int ret = 0; ctrl_hdlr = &ov2740->ctrl_handler; ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8); if (ret) return ret; ctrl_hdlr->lock = &ov2740->mutex; cur_mode = ov2740->cur_mode; size = ARRAY_SIZE(link_freq_menu_items); ov2740->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, &ov2740_ctrl_ops, V4L2_CID_LINK_FREQ, size - 1, 0, link_freq_menu_items); if (ov2740->link_freq) ov2740->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY; pixel_rate = to_pixel_rate(OV2740_LINK_FREQ_360MHZ_INDEX); ov2740->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov2740_ctrl_ops, V4L2_CID_PIXEL_RATE, 0, pixel_rate, 1, pixel_rate); vblank_min = cur_mode->vts_min - cur_mode->height; vblank_max = cur_mode->vts_max - cur_mode->height; vblank_default = cur_mode->vts_def - cur_mode->height; ov2740->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov2740_ctrl_ops, V4L2_CID_VBLANK, vblank_min, vblank_max, 1, vblank_default); h_blank = to_pixels_per_line(cur_mode->hts, cur_mode->link_freq_index); h_blank -= cur_mode->width; ov2740->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov2740_ctrl_ops, V4L2_CID_HBLANK, h_blank, h_blank, 1, h_blank); if (ov2740->hblank) ov2740->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY; v4l2_ctrl_new_std(ctrl_hdlr, &ov2740_ctrl_ops, V4L2_CID_ANALOGUE_GAIN, OV2740_ANAL_GAIN_MIN, OV2740_ANAL_GAIN_MAX, OV2740_ANAL_GAIN_STEP, OV2740_ANAL_GAIN_MIN); v4l2_ctrl_new_std(ctrl_hdlr, &ov2740_ctrl_ops, V4L2_CID_DIGITAL_GAIN, OV2740_DGTL_GAIN_MIN, OV2740_DGTL_GAIN_MAX, OV2740_DGTL_GAIN_STEP, OV2740_DGTL_GAIN_DEFAULT); exposure_max = cur_mode->vts_def - OV2740_EXPOSURE_MAX_MARGIN; ov2740->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &ov2740_ctrl_ops, V4L2_CID_EXPOSURE, OV2740_EXPOSURE_MIN, exposure_max, OV2740_EXPOSURE_STEP, exposure_max); v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &ov2740_ctrl_ops, V4L2_CID_TEST_PATTERN, ARRAY_SIZE(ov2740_test_pattern_menu) - 1, 0, 0, ov2740_test_pattern_menu); if (ctrl_hdlr->error) return ctrl_hdlr->error; ov2740->sd.ctrl_handler = ctrl_hdlr; return 0; } static void ov2740_update_pad_format(const struct ov2740_mode *mode, struct v4l2_mbus_framefmt *fmt) { fmt->width = mode->width; fmt->height = mode->height; fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10; fmt->field = V4L2_FIELD_NONE; } static int ov2740_load_otp_data(struct nvm_data *nvm) { struct i2c_client *client; struct ov2740 *ov2740; u32 isp_ctrl00 = 0; u32 isp_ctrl01 = 0; int ret; if (!nvm) return -EINVAL; if (nvm->nvm_buffer) return 0; client = nvm->client; ov2740 = to_ov2740(i2c_get_clientdata(client)); nvm->nvm_buffer = kzalloc(CUSTOMER_USE_OTP_SIZE, GFP_KERNEL); if (!nvm->nvm_buffer) return -ENOMEM; ret = ov2740_read_reg(ov2740, OV2740_REG_ISP_CTRL00, 1, &isp_ctrl00); if (ret) { dev_err(&client->dev, "failed to read ISP CTRL00\n"); goto err; } ret = ov2740_read_reg(ov2740, OV2740_REG_ISP_CTRL01, 1, &isp_ctrl01); if (ret) { dev_err(&client->dev, "failed to read ISP CTRL01\n"); goto err; } /* Clear bit 5 of ISP CTRL00 */ ret = ov2740_write_reg(ov2740, OV2740_REG_ISP_CTRL00, 1, isp_ctrl00 & ~BIT(5)); if (ret) { dev_err(&client->dev, "failed to set ISP CTRL00\n"); goto err; } /* Clear bit 7 of ISP CTRL01 */ ret = ov2740_write_reg(ov2740, OV2740_REG_ISP_CTRL01, 1, isp_ctrl01 & ~BIT(7)); if (ret) { dev_err(&client->dev, "failed to set ISP CTRL01\n"); goto err; } ret = ov2740_write_reg(ov2740, OV2740_REG_MODE_SELECT, 1, OV2740_MODE_STREAMING); if (ret) { dev_err(&client->dev, "failed to set streaming mode\n"); goto err; } /* * Users are not allowed to access OTP-related registers and memory * during the 20 ms period after streaming starts (0x100 = 0x01). */ msleep(20); ret = regmap_bulk_read(nvm->regmap, OV2740_REG_OTP_CUSTOMER, nvm->nvm_buffer, CUSTOMER_USE_OTP_SIZE); if (ret) { dev_err(&client->dev, "failed to read OTP data, ret %d\n", ret); goto err; } ret = ov2740_write_reg(ov2740, OV2740_REG_MODE_SELECT, 1, OV2740_MODE_STANDBY); if (ret) { dev_err(&client->dev, "failed to set streaming mode\n"); goto err; } ret = ov2740_write_reg(ov2740, OV2740_REG_ISP_CTRL01, 1, isp_ctrl01); if (ret) { dev_err(&client->dev, "failed to set ISP CTRL01\n"); goto err; } ret = ov2740_write_reg(ov2740, OV2740_REG_ISP_CTRL00, 1, isp_ctrl00); if (ret) { dev_err(&client->dev, "failed to set ISP CTRL00\n"); goto err; } return 0; err: kfree(nvm->nvm_buffer); nvm->nvm_buffer = NULL; return ret; } static int ov2740_start_streaming(struct ov2740 *ov2740) { struct i2c_client *client = ov2740->client; struct nvm_data *nvm = ov2740->nvm; const struct ov2740_reg_list *reg_list; int link_freq_index; int ret = 0; ov2740_load_otp_data(nvm); link_freq_index = ov2740->cur_mode->link_freq_index; reg_list = &link_freq_configs[link_freq_index].reg_list; ret = ov2740_write_reg_list(ov2740, reg_list); if (ret) { dev_err(&client->dev, "failed to set plls"); return ret; } reg_list = &ov2740->cur_mode->reg_list; ret = ov2740_write_reg_list(ov2740, reg_list); if (ret) { dev_err(&client->dev, "failed to set mode"); return ret; } ret = __v4l2_ctrl_handler_setup(ov2740->sd.ctrl_handler); if (ret) return ret; ret = ov2740_write_reg(ov2740, OV2740_REG_MODE_SELECT, 1, OV2740_MODE_STREAMING); if (ret) dev_err(&client->dev, "failed to start streaming"); return ret; } static void ov2740_stop_streaming(struct ov2740 *ov2740) { struct i2c_client *client = ov2740->client; if (ov2740_write_reg(ov2740, OV2740_REG_MODE_SELECT, 1, OV2740_MODE_STANDBY)) dev_err(&client->dev, "failed to stop streaming"); } static int ov2740_set_stream(struct v4l2_subdev *sd, int enable) { struct ov2740 *ov2740 = to_ov2740(sd); struct i2c_client *client = ov2740->client; int ret = 0; if (ov2740->streaming == enable) return 0; mutex_lock(&ov2740->mutex); if (enable) { ret = pm_runtime_resume_and_get(&client->dev); if (ret < 0) { mutex_unlock(&ov2740->mutex); return ret; } ret = ov2740_start_streaming(ov2740); if (ret) { enable = 0; ov2740_stop_streaming(ov2740); pm_runtime_put(&client->dev); } } else { ov2740_stop_streaming(ov2740); pm_runtime_put(&client->dev); } ov2740->streaming = enable; mutex_unlock(&ov2740->mutex); return ret; } static int ov2740_power_off(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct ov2740 *ov2740 = to_ov2740(sd); int ret = 0; gpiod_set_value_cansleep(ov2740->reset_gpio, 1); clk_disable_unprepare(ov2740->clk); msleep(20); return ret; } static int ov2740_power_on(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct ov2740 *ov2740 = to_ov2740(sd); int ret = 0; ret = clk_prepare_enable(ov2740->clk); gpiod_set_value_cansleep(ov2740->reset_gpio, 0); msleep(20); return ret; } static int __maybe_unused ov2740_suspend(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct ov2740 *ov2740 = to_ov2740(sd); mutex_lock(&ov2740->mutex); if (ov2740->streaming) ov2740_stop_streaming(ov2740); mutex_unlock(&ov2740->mutex); return 0; } static int __maybe_unused ov2740_resume(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct ov2740 *ov2740 = to_ov2740(sd); int ret = 0; mutex_lock(&ov2740->mutex); if (!ov2740->streaming) goto exit; ret = ov2740_start_streaming(ov2740); if (ret) { ov2740->streaming = false; ov2740_stop_streaming(ov2740); } exit: mutex_unlock(&ov2740->mutex); return ret; } static int ov2740_set_format(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *sd_state, #endif struct v4l2_subdev_format *fmt) { struct ov2740 *ov2740 = to_ov2740(sd); const struct ov2740_mode *mode; s32 vblank_def, h_blank; if (ov2740->module_name_index == 1) { mode = v4l2_find_nearest_size(supported_modes_360mbps, ARRAY_SIZE(supported_modes_360mbps), width, height, fmt->format.width, fmt->format.height); } else { mode = v4l2_find_nearest_size(supported_modes, ARRAY_SIZE(supported_modes), width, height, fmt->format.width, fmt->format.height); } mutex_lock(&ov2740->mutex); ov2740_update_pad_format(mode, &fmt->format); if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format; #elif LINUX_VERSION_CODE < KERNEL_VERSION(6, 8, 0) *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format; #else *v4l2_subdev_state_get_format(sd_state, fmt->pad) = fmt->format; #endif } else { ov2740->cur_mode = mode; __v4l2_ctrl_s_ctrl(ov2740->link_freq, mode->link_freq_index); __v4l2_ctrl_s_ctrl_int64(ov2740->pixel_rate, to_pixel_rate(mode->link_freq_index)); /* Update limits and set FPS to default */ vblank_def = mode->vts_def - mode->height; __v4l2_ctrl_modify_range(ov2740->vblank, mode->vts_min - mode->height, mode->vts_max - mode->height, 1, vblank_def); __v4l2_ctrl_s_ctrl(ov2740->vblank, vblank_def); h_blank = to_pixels_per_line(mode->hts, mode->link_freq_index) - mode->width; __v4l2_ctrl_modify_range(ov2740->hblank, h_blank, h_blank, 1, h_blank); } mutex_unlock(&ov2740->mutex); return 0; } static int ov2740_get_format(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *sd_state, #endif struct v4l2_subdev_format *fmt) { struct ov2740 *ov2740 = to_ov2740(sd); mutex_lock(&ov2740->mutex); if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) fmt->format = *v4l2_subdev_get_try_format(&ov2740->sd, cfg, fmt->pad); #elif LINUX_VERSION_CODE < KERNEL_VERSION(6, 8, 0) fmt->format = *v4l2_subdev_get_try_format(&ov2740->sd, sd_state, fmt->pad); #else fmt->format = *v4l2_subdev_state_get_format( sd_state, fmt->pad); #endif else ov2740_update_pad_format(ov2740->cur_mode, &fmt->format); mutex_unlock(&ov2740->mutex); return 0; } static int ov2740_enum_mbus_code(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *sd_state, #endif struct v4l2_subdev_mbus_code_enum *code) { if (code->index > 0) return -EINVAL; code->code = MEDIA_BUS_FMT_SGRBG10_1X10; return 0; } static int ov2740_enum_frame_size(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *sd_state, #endif struct v4l2_subdev_frame_size_enum *fse) { struct ov2740 *ov2740 = to_ov2740(sd); if (ov2740->module_name_index == 1) { if (fse->index >= ARRAY_SIZE(supported_modes_360mbps)) return -EINVAL; if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10) return -EINVAL; fse->min_width = supported_modes_360mbps[fse->index].width; fse->max_width = fse->min_width; fse->min_height = supported_modes_360mbps[fse->index].height; fse->max_height = fse->min_height; } else { if (fse->index >= ARRAY_SIZE(supported_modes)) return -EINVAL; if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10) return -EINVAL; fse->min_width = supported_modes[fse->index].width; fse->max_width = fse->min_width; fse->min_height = supported_modes[fse->index].height; fse->max_height = fse->min_height; } return 0; } static int ov2740_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { struct ov2740 *ov2740 = to_ov2740(sd); mutex_lock(&ov2740->mutex); #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) ov2740_update_pad_format(ov2740->cur_mode, v4l2_subdev_get_try_format(sd, fh->pad, 0)); #elif LINUX_VERSION_CODE < KERNEL_VERSION(6, 8, 0) ov2740_update_pad_format(ov2740->cur_mode, v4l2_subdev_get_try_format(sd, fh->state, 0)); #else ov2740_update_pad_format(ov2740->cur_mode, v4l2_subdev_state_get_format(fh->state, 0)); #endif mutex_unlock(&ov2740->mutex); return 0; } static const struct v4l2_subdev_video_ops ov2740_video_ops = { .s_stream = ov2740_set_stream, }; static const struct v4l2_subdev_pad_ops ov2740_pad_ops = { .set_fmt = ov2740_set_format, .get_fmt = ov2740_get_format, .enum_mbus_code = ov2740_enum_mbus_code, .enum_frame_size = ov2740_enum_frame_size, }; static const struct v4l2_subdev_ops ov2740_subdev_ops = { .video = &ov2740_video_ops, .pad = &ov2740_pad_ops, }; static const struct media_entity_operations ov2740_subdev_entity_ops = { .link_validate = v4l2_subdev_link_validate, }; static const struct v4l2_subdev_internal_ops ov2740_internal_ops = { .open = ov2740_open, }; static int ov2740_identify_module(struct ov2740 *ov2740) { struct i2c_client *client = ov2740->client; int ret; u32 val; ret = ov2740_read_reg(ov2740, OV2740_REG_CHIP_ID, 3, &val); if (ret) return ret; if (val != OV2740_CHIP_ID) { dev_err(&client->dev, "chip id mismatch: %x!=%x", OV2740_CHIP_ID, val); return -ENXIO; } return 0; } static int ov2740_check_hwcfg(struct device *dev) { struct fwnode_handle *ep; struct fwnode_handle *fwnode = dev_fwnode(dev); struct v4l2_fwnode_endpoint bus_cfg = { .bus_type = V4L2_MBUS_CSI2_DPHY }; bool found = false; u32 mclk; int ret; unsigned int i, j; ep = fwnode_graph_get_next_endpoint(fwnode, NULL); if (!ep) return -EPROBE_DEFER; ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk); if (ret) return ret; if (mclk != OV2740_MCLK) { dev_err(dev, "external clock %d is not supported", mclk); return -EINVAL; } ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg); fwnode_handle_put(ep); if (ret) return ret; if (bus_cfg.bus.mipi_csi2.num_data_lanes != OV2740_DATA_LANES) { dev_err(dev, "number of CSI2 data lanes %d is not supported", bus_cfg.bus.mipi_csi2.num_data_lanes); ret = -EINVAL; goto check_hwcfg_error; } if (!bus_cfg.nr_of_link_frequencies) { dev_err(dev, "no link frequencies defined"); ret = -EINVAL; goto check_hwcfg_error; } for (i = 0; i < ARRAY_SIZE(link_freq_menu_items); i++) { for (j = 0; j < bus_cfg.nr_of_link_frequencies; j++) { if (link_freq_menu_items[i] == bus_cfg.link_frequencies[j]) { found = true; break; } } } if (!found) { dev_err(dev, "no supported link frequencies\n"); ret = -EINVAL; } check_hwcfg_error: v4l2_fwnode_endpoint_free(&bus_cfg); return ret; } #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) static int ov2740_remove(struct i2c_client *client) #else static void ov2740_remove(struct i2c_client *client) #endif { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct ov2740 *ov2740 = to_ov2740(sd); v4l2_async_unregister_subdev(sd); media_entity_cleanup(&sd->entity); v4l2_ctrl_handler_free(sd->ctrl_handler); pm_runtime_disable(&client->dev); mutex_destroy(&ov2740->mutex); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) return 0; #endif } static int ov2740_nvmem_read(void *priv, unsigned int off, void *val, size_t count) { struct nvm_data *nvm = priv; struct v4l2_subdev *sd = i2c_get_clientdata(nvm->client); struct device *dev = &nvm->client->dev; struct ov2740 *ov2740 = to_ov2740(sd); int ret = 0; mutex_lock(&ov2740->mutex); if (nvm->nvm_buffer) { memcpy(val, nvm->nvm_buffer + off, count); goto exit; } ret = pm_runtime_resume_and_get(dev); if (ret < 0) goto exit; ret = ov2740_load_otp_data(nvm); if (!ret) memcpy(val, nvm->nvm_buffer + off, count); pm_runtime_put(dev); exit: mutex_unlock(&ov2740->mutex); return ret; } static int ov2740_register_nvmem(struct i2c_client *client, struct ov2740 *ov2740) { struct nvm_data *nvm; struct regmap_config regmap_config = { }; struct nvmem_config nvmem_config = { }; struct regmap *regmap; struct device *dev = &client->dev; int ret; nvm = devm_kzalloc(dev, sizeof(*nvm), GFP_KERNEL); if (!nvm) return -ENOMEM; regmap_config.val_bits = 8; regmap_config.reg_bits = 16; regmap_config.disable_locking = true; regmap = devm_regmap_init_i2c(client, ®map_config); if (IS_ERR(regmap)) return PTR_ERR(regmap); nvm->regmap = regmap; nvm->client = client; nvmem_config.name = dev_name(dev); nvmem_config.dev = dev; nvmem_config.read_only = true; nvmem_config.root_only = true; nvmem_config.owner = THIS_MODULE; nvmem_config.compat = true; nvmem_config.base_dev = dev; nvmem_config.reg_read = ov2740_nvmem_read; nvmem_config.reg_write = NULL; nvmem_config.priv = nvm; nvmem_config.stride = 1; nvmem_config.word_size = 1; nvmem_config.size = CUSTOMER_USE_OTP_SIZE; nvm->nvmem = devm_nvmem_register(dev, &nvmem_config); ret = PTR_ERR_OR_ZERO(nvm->nvmem); if (!ret) ov2740->nvm = nvm; return ret; } static int ov2740_read_module_name(struct ov2740 *ov2740) { struct device *dev = &ov2740->client->dev; int i = 0; union acpi_object *obj; struct acpi_device *adev = ACPI_COMPANION(dev); ov2740->module_name_index = 0; if (!adev) return 0; obj = acpi_evaluate_dsm_typed(adev->handle, &cio2_sensor_module_guid, 0x00, 0x01, NULL, ACPI_TYPE_STRING); if (obj && obj->string.type == ACPI_TYPE_STRING) { dev_dbg(dev, "module name: %s", obj->string.pointer); for (i = 1; i < ARRAY_SIZE(ov2740_module_names); i++) { if (!strcmp(ov2740_module_names[i], obj->string.pointer)) { ov2740->module_name_index = i; break; } } } ACPI_FREE(obj); return 0; } static int ov2740_parse_power(struct ov2740 *ov2740) { struct device *dev = &ov2740->client->dev; long ret; ov2740->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(ov2740->reset_gpio)) { ret = PTR_ERR(ov2740->reset_gpio); dev_err(dev, "error while getting reset gpio: %ld\n", ret); ov2740->reset_gpio = NULL; return (int)ret; } ov2740->clk = devm_clk_get_optional(dev, "clk"); if (IS_ERR(ov2740->clk)) { ret = PTR_ERR(ov2740->clk); dev_err(dev, "error while getting clk: %ld\n", ret); ov2740->clk = NULL; return (int)ret; } return 0; } static int ov2740_probe(struct i2c_client *client) { struct ov2740 *ov2740; int ret = 0; ov2740 = devm_kzalloc(&client->dev, sizeof(*ov2740), GFP_KERNEL); if (!ov2740) return -ENOMEM; ov2740->client = client; ov2740_read_module_name(ov2740); ret = ov2740_check_hwcfg(&client->dev); if (ret) return dev_err_probe(&client->dev, ret, "failed to check HW configuration: %d", ret); v4l2_i2c_subdev_init(&ov2740->sd, client, &ov2740_subdev_ops); ret = ov2740_parse_power(ov2740); if (ret) return ret; ov2740_power_on(&client->dev); ret = ov2740_identify_module(ov2740); if (ret) { dev_err(&client->dev, "failed to find sensor: %d", ret); goto probe_error_power_down; } mutex_init(&ov2740->mutex); if (ov2740->module_name_index >= ARRAY_SIZE(ov2740_module_names)) { ov2740->module_name_index = 0; dev_err(&client->dev, "unknown module_name_index: %d", ov2740->module_name_index); } if (ov2740->module_name_index == 1) ov2740->cur_mode = &supported_modes_360mbps[0]; else ov2740->cur_mode = &supported_modes[0]; ret = ov2740_init_controls(ov2740); if (ret) { dev_err(&client->dev, "failed to init controls: %d", ret); goto probe_error_v4l2_ctrl_handler_free; } ov2740->sd.internal_ops = &ov2740_internal_ops; ov2740->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; ov2740->sd.entity.ops = &ov2740_subdev_entity_ops; ov2740->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; ov2740->pad.flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_pads_init(&ov2740->sd.entity, 1, &ov2740->pad); if (ret) { dev_err(&client->dev, "failed to init entity pads: %d", ret); goto probe_error_v4l2_ctrl_handler_free; } #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 13, 0) ret = v4l2_async_register_subdev_sensor_common(&ov2740->sd); #else ret = v4l2_async_register_subdev_sensor(&ov2740->sd); #endif if (ret < 0) { dev_err(&client->dev, "failed to register V4L2 subdev: %d", ret); goto probe_error_media_entity_cleanup; } ret = ov2740_register_nvmem(client, ov2740); if (ret) dev_warn(&client->dev, "register nvmem failed, ret %d\n", ret); /* * Device is already turned on by i2c-core with ACPI domain PM. * Enable runtime PM and turn off the device. */ pm_runtime_set_active(&client->dev); pm_runtime_enable(&client->dev); pm_runtime_idle(&client->dev); return 0; probe_error_media_entity_cleanup: media_entity_cleanup(&ov2740->sd.entity); probe_error_v4l2_ctrl_handler_free: v4l2_ctrl_handler_free(ov2740->sd.ctrl_handler); mutex_destroy(&ov2740->mutex); probe_error_power_down: ov2740_power_off(&client->dev); return ret; } static const struct dev_pm_ops ov2740_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(ov2740_suspend, ov2740_resume) SET_RUNTIME_PM_OPS(ov2740_power_off, ov2740_power_on, NULL) }; static const struct acpi_device_id ov2740_acpi_ids[] = { {"INT3474"}, {} }; MODULE_DEVICE_TABLE(acpi, ov2740_acpi_ids); static struct i2c_driver ov2740_i2c_driver = { .driver = { .name = "ov2740", .pm = &ov2740_pm_ops, .acpi_match_table = ov2740_acpi_ids, }, #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) .probe_new = ov2740_probe, #else .probe = ov2740_probe, #endif .remove = ov2740_remove, }; module_i2c_driver(ov2740_i2c_driver); MODULE_AUTHOR("Qiu, Tianshu "); MODULE_AUTHOR("Shawn Tu "); MODULE_AUTHOR("Bingbu Cao "); MODULE_DESCRIPTION("OmniVision OV2740 sensor driver"); MODULE_LICENSE("GPL v2"); ipu6-drivers-0~git202411190607.0ad49882/drivers/media/i2c/ov8856.c000066400000000000000000000732451471702545500232610ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2021-2022 Intel Corporation. #include #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 12, 0) #include #else #include #endif #include #include #include #include #include #include #include #include #define OV8856_REG_VALUE_08BIT 1 #define OV8856_REG_VALUE_16BIT 2 #define OV8856_REG_VALUE_24BIT 3 #define OV8856_LINK_FREQ_360MHZ 360000000ULL #define OV8856_LINK_FREQ_264MHZ 264000000ULL #define OV8856_SCLK 144000000ULL #define OV8856_MCLK 19200000 #define OV8856_DATA_LANES 4 #define OV8856_RGB_DEPTH 10 #define OV8856_REG_CHIP_ID 0x300a #define OV8856_CHIP_ID 0x00885a #define OV8856_REG_MODE_SELECT 0x0100 #define OV8856_MODE_STANDBY 0x00 #define OV8856_MODE_STREAMING 0x01 /* vertical-timings from sensor */ #define OV8856_REG_VTS 0x380e #define OV8856_VTS_MAX 0x7fff /* horizontal-timings from sensor */ #define OV8856_REG_HTS 0x380c /* Exposure controls from sensor */ #define OV8856_REG_EXPOSURE 0x3500 #define OV8856_EXPOSURE_MIN 6 #define OV8856_EXPOSURE_MAX_MARGIN 6 #define OV8856_EXPOSURE_STEP 1 /* Analog gain controls from sensor */ #define OV8856_REG_ANALOG_GAIN 0x3508 #define OV8856_ANAL_GAIN_MIN 128 #define OV8856_ANAL_GAIN_MAX 2047 #define OV8856_ANAL_GAIN_STEP 1 /* Digital gain controls from sensor */ #define OV8856_REG_MWB_R_GAIN 0x5019 #define OV8856_REG_MWB_G_GAIN 0x501b #define OV8856_REG_MWB_B_GAIN 0x501d #define OV8856_DGTL_GAIN_MIN 0 #define OV8856_DGTL_GAIN_MAX 4095 #define OV8856_DGTL_GAIN_STEP 1 #define OV8856_DGTL_GAIN_DEFAULT 1024 /* Test Pattern Control */ #define OV8856_REG_TEST_PATTERN 0x5e00 #define OV8856_TEST_PATTERN_ENABLE BIT(7) #define OV8856_TEST_PATTERN_BAR_SHIFT 2 /* Flip Mirror Controls from sensor */ #define OV8856_REG_FORMAT1 0x3820 #define OV8856_REG_FORMAT2 0x3821 #define OV8856_REG_FORMAT1_A_VFLIP BIT(1) #define OV8856_REG_FORMAT1_D_VFLIP BIT(2) #define OV8856_REG_FORMAT1_VFLIP_BLC BIT(6) #define OV8856_REG_FORMAT2_A_HFLIP BIT(1) #define OV8856_REG_FORMAT2_D_HFLIP BIT(2) #define OV8856_REG_ISP_CTRL_1 0x376b #define OV8856_REG_ISP_CTRL_2 0x5001 #define OV8856_REG_ISP_CTRL_3 0x5004 #define OV8856_REG_ISP_CTRL_4 0x502e #define to_ov8856(_sd) container_of(_sd, struct ov8856, sd) enum { OV8856_LINK_FREQ_720MBPS, OV8856_LINK_FREQ_528MBPS, }; struct ov8856_reg { u16 address; u8 val; }; struct ov8856_reg_list { u32 num_of_regs; const struct ov8856_reg *regs; }; struct ov8856_link_freq_config { const struct ov8856_reg_list reg_list; }; struct ov8856_mode { /* Frame width in pixels */ u32 width; /* Frame height in pixels */ u32 height; /* Horizontal timining size */ u32 hts; /* Default vertical timining size */ u32 vts_def; /* Min vertical timining size */ u32 vts_min; /* Link frequency needed for this resolution */ u32 link_freq_index; /* Sensor register settings for this resolution */ const struct ov8856_reg_list reg_list; }; static const struct ov8856_reg mipi_data_rate_720mbps[] = { {0x0103, 0x01}, {0x0100, 0x00}, {0x0302, 0x4b}, {0x0303, 0x01}, {0x030b, 0x02}, {0x030d, 0x4b}, {0x031e, 0x0c} }; static const struct ov8856_reg mipi_data_rate_528mbps[] = { {0x0103, 0x01}, {0x0100, 0x00}, {0x0302, 0x37}, {0x0303, 0x01}, {0x030b, 0x02}, {0x030d, 0x4b}, {0x031e, 0x0c} }; static const struct ov8856_reg mode_3280x2464_regs[] = { {0x0303, 0x01}, {0x031e, 0x0c}, {0x3000, 0x00}, {0x300e, 0x00}, {0x3010, 0x00}, {0x3015, 0x84}, {0x3018, 0x72}, {0x3021, 0x23}, {0x3033, 0x24}, {0x3500, 0x00}, {0x3501, 0x9c}, {0x3502, 0x80}, {0x3503, 0x08}, {0x3505, 0x83}, {0x3508, 0x01}, {0x3509, 0x80}, {0x350c, 0x00}, {0x350d, 0x80}, {0x350e, 0x04}, {0x350f, 0x00}, {0x3510, 0x00}, {0x3511, 0x02}, {0x3512, 0x00}, {0x3600, 0x72}, {0x3601, 0x40}, {0x3602, 0x30}, {0x3610, 0xc5}, {0x3611, 0x58}, {0x3612, 0x5c}, {0x3613, 0xca}, {0x3614, 0x20}, {0x3628, 0xff}, {0x3629, 0xff}, {0x362a, 0xff}, {0x3633, 0x10}, {0x3634, 0x10}, {0x3635, 0x10}, {0x3636, 0x10}, {0x3663, 0x08}, {0x3669, 0x34}, {0x366e, 0x10}, {0x3706, 0x86}, {0x370b, 0x7e}, {0x3714, 0x23}, {0x3730, 0x12}, {0x3733, 0x10}, {0x3764, 0x00}, {0x3765, 0x00}, {0x3769, 0x62}, {0x376a, 0x2a}, {0x376b, 0x30}, {0x3780, 0x00}, {0x3781, 0x24}, {0x3782, 0x00}, {0x3783, 0x23}, {0x3798, 0x2f}, {0x37a1, 0x60}, {0x37a8, 0x6a}, {0x37ab, 0x3f}, {0x37c2, 0x04}, {0x37c3, 0xf1}, {0x37c9, 0x80}, {0x37cb, 0x16}, {0x37cc, 0x16}, {0x37cd, 0x16}, {0x37ce, 0x16}, {0x3800, 0x00}, {0x3801, 0x00}, {0x3802, 0x00}, {0x3803, 0x00}, {0x3804, 0x0c}, {0x3805, 0xdf}, {0x3806, 0x09}, {0x3807, 0xaf}, {0x3808, 0x0c}, {0x3809, 0xe0}, {0x380a, 0x09}, {0x380b, 0xb0}, {0x380c, 0x07}, {0x380d, 0x90}, {0x380e, 0x09}, {0x380f, 0xd8}, {0x3810, 0x00}, {0x3811, 0x00}, {0x3812, 0x00}, {0x3813, 0x00}, {0x3814, 0x01}, {0x3815, 0x01}, {0x3816, 0x00}, {0x3817, 0x00}, {0x3818, 0x00}, {0x3819, 0x00}, {0x3820, 0x80}, {0x3821, 0x46}, {0x382a, 0x01}, {0x382b, 0x01}, {0x3830, 0x06}, {0x3836, 0x02}, {0x3837, 0x10}, {0x3862, 0x04}, {0x3863, 0x08}, {0x3cc0, 0x33}, {0x3d85, 0x17}, {0x3d8c, 0x73}, {0x3d8d, 0xde}, {0x4001, 0xe0}, {0x4003, 0x40}, {0x4008, 0x00}, {0x4009, 0x0b}, {0x400a, 0x00}, {0x400b, 0x84}, {0x400f, 0x80}, {0x4010, 0xf0}, {0x4011, 0xff}, {0x4012, 0x02}, {0x4013, 0x01}, {0x4014, 0x01}, {0x4015, 0x01}, {0x4042, 0x00}, {0x4043, 0x80}, {0x4044, 0x00}, {0x4045, 0x80}, {0x4046, 0x00}, {0x4047, 0x80}, {0x4048, 0x00}, {0x4049, 0x80}, {0x4041, 0x03}, {0x404c, 0x20}, {0x404d, 0x00}, {0x404e, 0x20}, {0x4203, 0x80}, {0x4307, 0x30}, {0x4317, 0x00}, {0x4503, 0x08}, {0x4601, 0x80}, {0x4800, 0x44}, {0x4816, 0x53}, {0x481b, 0x58}, {0x481f, 0x27}, {0x4837, 0x16}, {0x483c, 0x0f}, {0x484b, 0x05}, {0x5000, 0x57}, {0x5001, 0x0a}, {0x5004, 0x00}, {0x502e, 0x03}, {0x5030, 0x41}, {0x5795, 0x02}, {0x5796, 0x20}, {0x5797, 0x20}, {0x5798, 0x2a}, {0x5799, 0x2a}, {0x579a, 0x00}, {0x579b, 0x50}, {0x579c, 0x00}, {0x579d, 0x38}, {0x579e, 0x0c}, {0x579f, 0x40}, {0x57a0, 0x09}, {0x57a1, 0x40}, {0x5780, 0x14}, {0x5781, 0x0f}, {0x5782, 0x44}, {0x5783, 0x02}, {0x5784, 0x01}, {0x5785, 0x01}, {0x5786, 0x00}, {0x5787, 0x04}, {0x5788, 0x02}, {0x5789, 0x0f}, {0x578a, 0xfd}, {0x578b, 0xf5}, {0x578c, 0xf5}, {0x578d, 0x03}, {0x578e, 0x08}, {0x578f, 0x0c}, {0x5790, 0x08}, {0x5791, 0x04}, {0x5792, 0x00}, {0x5793, 0x52}, {0x5794, 0xa3}, {0x59f8, 0x3d}, {0x5a08, 0x02}, {0x5b00, 0x02}, {0x5b01, 0x10}, {0x5b02, 0x03}, {0x5b03, 0xcf}, {0x5b05, 0x6c}, {0x5e00, 0x00}, {0x3800, 0x00}, {0x3801, 0x00}, {0x3802, 0x00}, {0x3803, 0x06}, {0x3804, 0x0c}, {0x3805, 0xdf}, {0x3806, 0x09}, {0x3807, 0xa7}, {0x3808, 0x0c}, {0x3809, 0xd0}, {0x380a, 0x09}, {0x380b, 0xa0}, {0x380c, 0x07}, {0x380d, 0x8c}, {0x380e, 0x09}, {0x380f, 0xca}, {0x3810, 0x00}, {0x3811, 0x08}, {0x3812, 0x00}, {0x3813, 0x01} }; static const struct ov8856_reg mode_1640x1232_regs[] = { {0x3000, 0x00}, {0x300e, 0x00}, {0x3010, 0x00}, {0x3015, 0x84}, {0x3018, 0x72}, {0x3021, 0x23}, {0x3033, 0x24}, {0x3500, 0x00}, {0x3501, 0x4c}, {0x3502, 0xe0}, {0x3503, 0x08}, {0x3505, 0x83}, {0x3508, 0x01}, {0x3509, 0x80}, {0x350c, 0x00}, {0x350d, 0x80}, {0x350e, 0x04}, {0x350f, 0x00}, {0x3510, 0x00}, {0x3511, 0x02}, {0x3512, 0x00}, {0x3600, 0x72}, {0x3601, 0x40}, {0x3602, 0x30}, {0x3610, 0xc5}, {0x3611, 0x58}, {0x3612, 0x5c}, {0x3613, 0xca}, {0x3614, 0x20}, {0x3628, 0xff}, {0x3629, 0xff}, {0x362a, 0xff}, {0x3633, 0x10}, {0x3634, 0x10}, {0x3635, 0x10}, {0x3636, 0x10}, {0x3663, 0x08}, {0x3669, 0x34}, {0x366e, 0x08}, {0x3706, 0x86}, {0x370b, 0x7e}, {0x3714, 0x27}, {0x3730, 0x12}, {0x3733, 0x10}, {0x3764, 0x00}, {0x3765, 0x00}, {0x3769, 0x62}, {0x376a, 0x2a}, {0x376b, 0x30}, {0x3780, 0x00}, {0x3781, 0x24}, {0x3782, 0x00}, {0x3783, 0x23}, {0x3798, 0x2f}, {0x37a1, 0x60}, {0x37a8, 0x6a}, {0x37ab, 0x3f}, {0x37c2, 0x14}, {0x37c3, 0xf1}, {0x37c9, 0x80}, {0x37cb, 0x16}, {0x37cc, 0x16}, {0x37cd, 0x16}, {0x37ce, 0x16}, {0x3800, 0x00}, {0x3801, 0x00}, {0x3802, 0x00}, {0x3803, 0x0c}, {0x3804, 0x0c}, {0x3805, 0xdf}, {0x3806, 0x09}, {0x3807, 0xa3}, {0x3808, 0x06}, {0x3809, 0x60}, {0x380a, 0x04}, {0x380b, 0xc8}, {0x380c, 0x07}, {0x380d, 0x8c}, {0x380e, 0x04}, {0x380f, 0xde}, {0x3810, 0x00}, {0x3811, 0x08}, {0x3812, 0x00}, {0x3813, 0x02}, {0x3814, 0x03}, {0x3815, 0x01}, {0x3816, 0x00}, {0x3817, 0x00}, {0x3818, 0x00}, {0x3819, 0x00}, {0x3820, 0x90}, {0x3821, 0x67}, {0x382a, 0x03}, {0x382b, 0x01}, {0x3830, 0x06}, {0x3836, 0x02}, {0x3837, 0x10}, {0x3862, 0x04}, {0x3863, 0x08}, {0x3cc0, 0x33}, {0x3d85, 0x17}, {0x3d8c, 0x73}, {0x3d8d, 0xde}, {0x4001, 0xe0}, {0x4003, 0x40}, {0x4008, 0x00}, {0x4009, 0x05}, {0x400a, 0x00}, {0x400b, 0x84}, {0x400f, 0x80}, {0x4010, 0xf0}, {0x4011, 0xff}, {0x4012, 0x02}, {0x4013, 0x01}, {0x4014, 0x01}, {0x4015, 0x01}, {0x4042, 0x00}, {0x4043, 0x80}, {0x4044, 0x00}, {0x4045, 0x80}, {0x4046, 0x00}, {0x4047, 0x80}, {0x4048, 0x00}, {0x4049, 0x80}, {0x4041, 0x03}, {0x404c, 0x20}, {0x404d, 0x00}, {0x404e, 0x20}, {0x4203, 0x80}, {0x4307, 0x30}, {0x4317, 0x00}, {0x4503, 0x08}, {0x4601, 0x80}, {0x4800, 0x44}, {0x4816, 0x53}, {0x481b, 0x58}, {0x481f, 0x27}, {0x4837, 0x16}, {0x483c, 0x0f}, {0x484b, 0x05}, {0x5000, 0x57}, {0x5001, 0x0a}, {0x5004, 0x04}, {0x502e, 0x03}, {0x5030, 0x41}, {0x5795, 0x00}, {0x5796, 0x10}, {0x5797, 0x10}, {0x5798, 0x73}, {0x5799, 0x73}, {0x579a, 0x00}, {0x579b, 0x28}, {0x579c, 0x00}, {0x579d, 0x16}, {0x579e, 0x06}, {0x579f, 0x20}, {0x57a0, 0x04}, {0x57a1, 0xa0}, {0x5780, 0x14}, {0x5781, 0x0f}, {0x5782, 0x44}, {0x5783, 0x02}, {0x5784, 0x01}, {0x5785, 0x01}, {0x5786, 0x00}, {0x5787, 0x04}, {0x5788, 0x02}, {0x5789, 0x0f}, {0x578a, 0xfd}, {0x578b, 0xf5}, {0x578c, 0xf5}, {0x578d, 0x03}, {0x578e, 0x08}, {0x578f, 0x0c}, {0x5790, 0x08}, {0x5791, 0x04}, {0x5792, 0x00}, {0x5793, 0x52}, {0x5794, 0xa3}, {0x59f8, 0x3d}, {0x5a08, 0x02}, {0x5b00, 0x02}, {0x5b01, 0x10}, {0x5b02, 0x03}, {0x5b03, 0xcf}, {0x5b05, 0x6c}, {0x5e00, 0x00}, {0x3800, 0x00}, {0x3801, 0x00}, {0x3802, 0x00}, {0x3803, 0x00}, {0x3804, 0x0c}, {0x3805, 0xdf}, {0x3806, 0x09}, {0x3807, 0xaf}, {0x3808, 0x06}, {0x3809, 0x68}, {0x380a, 0x04}, {0x380b, 0xd0}, {0x380e, 0x04}, {0x380f, 0xee}, {0x3810, 0x00}, {0x3811, 0x04}, {0x3812, 0x00}, {0x3813, 0x05}, {0x3500, 0x00}, {0x3501, 0x4e}, {0x3502, 0x80}, {0x3820, 0x90}, {0x3821, 0x67}, {0x502e, 0x03}, {0x5001, 0x0a}, {0x5004, 0x04}, {0x376b, 0x30}, {0x030d, 0x4b}, {0x030b, 0x02}, {0x4837, 0x1e}, {0x380c, 0x0c}, {0x380d, 0x60}, {0x380e, 0x05}, {0x380f, 0xea} }; static const char * const ov8856_test_pattern_menu[] = { "Disabled", "Standard Color Bar", "Top-Bottom Darker Color Bar", "Right-Left Darker Color Bar", "Bottom-Top Darker Color Bar" }; static const s64 link_freq_menu_items[] = { OV8856_LINK_FREQ_360MHZ, OV8856_LINK_FREQ_264MHZ }; static const struct ov8856_link_freq_config link_freq_configs[] = { [OV8856_LINK_FREQ_720MBPS] = { .reg_list = { .num_of_regs = ARRAY_SIZE(mipi_data_rate_720mbps), .regs = mipi_data_rate_720mbps, } }, [OV8856_LINK_FREQ_528MBPS] = { .reg_list = { .num_of_regs = ARRAY_SIZE(mipi_data_rate_528mbps), .regs = mipi_data_rate_528mbps, } } }; static const struct ov8856_mode supported_modes[] = { { .width = 3280, .height = 2464, .hts = 1928, .vts_def = 2488, .vts_min = 2488, .reg_list = { .num_of_regs = ARRAY_SIZE(mode_3280x2464_regs), .regs = mode_3280x2464_regs, }, .link_freq_index = OV8856_LINK_FREQ_720MBPS, }, { .width = 1640, .height = 1232, .hts = 3820, .vts_def = 1256, .vts_min = 1256, .reg_list = { .num_of_regs = ARRAY_SIZE(mode_1640x1232_regs), .regs = mode_1640x1232_regs, }, .link_freq_index = OV8856_LINK_FREQ_528MBPS, } }; struct ov8856 { struct v4l2_subdev sd; struct media_pad pad; struct v4l2_ctrl_handler ctrl_handler; /* V4L2 Controls */ struct v4l2_ctrl *link_freq; struct v4l2_ctrl *pixel_rate; struct v4l2_ctrl *vblank; struct v4l2_ctrl *hblank; struct v4l2_ctrl *exposure; /* Current mode */ const struct ov8856_mode *cur_mode; /* To serialize asynchronus callbacks */ struct mutex mutex; /* Streaming on/off */ bool streaming; }; static u64 to_pixel_rate(u32 f_index) { u64 pixel_rate = link_freq_menu_items[f_index] * 2 * OV8856_DATA_LANES; do_div(pixel_rate, OV8856_RGB_DEPTH); return pixel_rate; } static u64 to_pixels_per_line(u32 hts, u32 f_index) { u64 ppl = hts * to_pixel_rate(f_index); do_div(ppl, OV8856_SCLK); return ppl; } static int ov8856_read_reg(struct ov8856 *ov8856, u16 reg, u16 len, u32 *val) { struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd); struct i2c_msg msgs[2]; u8 addr_buf[2]; u8 data_buf[4] = {0}; int ret; if (len > 4) return -EINVAL; put_unaligned_be16(reg, addr_buf); msgs[0].addr = client->addr; msgs[0].flags = 0; msgs[0].len = sizeof(addr_buf); msgs[0].buf = addr_buf; msgs[1].addr = client->addr; msgs[1].flags = I2C_M_RD; msgs[1].len = len; msgs[1].buf = &data_buf[4 - len]; ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); if (ret != ARRAY_SIZE(msgs)) return -EIO; *val = get_unaligned_be32(data_buf); return 0; } static int ov8856_write_reg(struct ov8856 *ov8856, u16 reg, u16 len, u32 val) { struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd); u8 buf[6]; if (len > 4) return -EINVAL; put_unaligned_be16(reg, buf); put_unaligned_be32(val << 8 * (4 - len), buf + 2); if (i2c_master_send(client, buf, len + 2) != len + 2) return -EIO; return 0; } static int ov8856_write_reg_list(struct ov8856 *ov8856, const struct ov8856_reg_list *r_list) { struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd); unsigned int i; int ret; for (i = 0; i < r_list->num_of_regs; i++) { ret = ov8856_write_reg(ov8856, r_list->regs[i].address, 1, r_list->regs[i].val); if (ret) { dev_err_ratelimited(&client->dev, "failed to W reg 0x%4.4x err %d", r_list->regs[i].address, ret); return ret; } } return 0; } static int ov8856_update_digital_gain(struct ov8856 *ov8856, u32 d_gain) { int ret; ret = ov8856_write_reg(ov8856, OV8856_REG_MWB_R_GAIN, OV8856_REG_VALUE_16BIT, d_gain); if (ret) return ret; ret = ov8856_write_reg(ov8856, OV8856_REG_MWB_G_GAIN, OV8856_REG_VALUE_16BIT, d_gain); if (ret) return ret; return ov8856_write_reg(ov8856, OV8856_REG_MWB_B_GAIN, OV8856_REG_VALUE_16BIT, d_gain); } static int ov8856_test_pattern(struct ov8856 *ov8856, u32 pattern) { if (pattern) pattern = (pattern - 1) << OV8856_TEST_PATTERN_BAR_SHIFT | OV8856_TEST_PATTERN_ENABLE; return ov8856_write_reg(ov8856, OV8856_REG_TEST_PATTERN, OV8856_REG_VALUE_08BIT, pattern); } static int ov8856_set_ctrl(struct v4l2_ctrl *ctrl) { struct ov8856 *ov8856 = container_of(ctrl->handler, struct ov8856, ctrl_handler); struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd); s64 exposure_max; int ret = 0; /* Propagate change of current control to all related controls */ if (ctrl->id == V4L2_CID_VBLANK) { /* Update max exposure while meeting expected vblanking */ exposure_max = ov8856->cur_mode->height + ctrl->val - OV8856_EXPOSURE_MAX_MARGIN; __v4l2_ctrl_modify_range(ov8856->exposure, ov8856->exposure->minimum, exposure_max, ov8856->exposure->step, exposure_max); } /* V4L2 controls values will be applied only when power is already up */ if (!pm_runtime_get_if_in_use(&client->dev)) return 0; switch (ctrl->id) { case V4L2_CID_ANALOGUE_GAIN: ret = ov8856_write_reg(ov8856, OV8856_REG_ANALOG_GAIN, OV8856_REG_VALUE_16BIT, ctrl->val); break; case V4L2_CID_DIGITAL_GAIN: ret = ov8856_update_digital_gain(ov8856, ctrl->val); break; case V4L2_CID_EXPOSURE: /* 4 least significant bits of expsoure are fractional part */ ret = ov8856_write_reg(ov8856, OV8856_REG_EXPOSURE, OV8856_REG_VALUE_24BIT, ctrl->val << 4); break; case V4L2_CID_VBLANK: ret = ov8856_write_reg(ov8856, OV8856_REG_VTS, OV8856_REG_VALUE_16BIT, ov8856->cur_mode->height + ctrl->val); break; case V4L2_CID_TEST_PATTERN: ret = ov8856_test_pattern(ov8856, ctrl->val); break; default: ret = -EINVAL; break; } pm_runtime_put(&client->dev); return ret; } static const struct v4l2_ctrl_ops ov8856_ctrl_ops = { .s_ctrl = ov8856_set_ctrl, }; static int ov8856_init_controls(struct ov8856 *ov8856) { struct v4l2_ctrl_handler *ctrl_hdlr; s64 exposure_max, h_blank; const struct ov8856_mode *cur_mode = ov8856->cur_mode; int ret; ctrl_hdlr = &ov8856->ctrl_handler; ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8); if (ret) return ret; ctrl_hdlr->lock = &ov8856->mutex; ov8856->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, &ov8856_ctrl_ops, V4L2_CID_LINK_FREQ, ARRAY_SIZE(link_freq_menu_items) - 1, 0, link_freq_menu_items); if (ov8856->link_freq) ov8856->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY; ov8856->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov8856_ctrl_ops, V4L2_CID_PIXEL_RATE, 0, to_pixel_rate(OV8856_LINK_FREQ_720MBPS), 1, to_pixel_rate(OV8856_LINK_FREQ_720MBPS)); ov8856->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov8856_ctrl_ops, V4L2_CID_VBLANK, cur_mode->vts_min - cur_mode->height, OV8856_VTS_MAX - cur_mode->height, 1, cur_mode->vts_def - cur_mode->height); h_blank = to_pixels_per_line(cur_mode->hts, cur_mode->link_freq_index) - cur_mode->width; ov8856->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov8856_ctrl_ops, V4L2_CID_HBLANK, h_blank, h_blank, 1, h_blank); if (ov8856->hblank) ov8856->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY; v4l2_ctrl_new_std(ctrl_hdlr, &ov8856_ctrl_ops, V4L2_CID_ANALOGUE_GAIN, OV8856_ANAL_GAIN_MIN, OV8856_ANAL_GAIN_MAX, OV8856_ANAL_GAIN_STEP, OV8856_ANAL_GAIN_MIN); v4l2_ctrl_new_std(ctrl_hdlr, &ov8856_ctrl_ops, V4L2_CID_DIGITAL_GAIN, OV8856_DGTL_GAIN_MIN, OV8856_DGTL_GAIN_MAX, OV8856_DGTL_GAIN_STEP, OV8856_DGTL_GAIN_DEFAULT); exposure_max = cur_mode->vts_def - OV8856_EXPOSURE_MAX_MARGIN; ov8856->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &ov8856_ctrl_ops, V4L2_CID_EXPOSURE, OV8856_EXPOSURE_MIN, exposure_max, OV8856_EXPOSURE_STEP, exposure_max); v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &ov8856_ctrl_ops, V4L2_CID_TEST_PATTERN, ARRAY_SIZE(ov8856_test_pattern_menu) - 1, 0, 0, ov8856_test_pattern_menu); if (ctrl_hdlr->error) return ctrl_hdlr->error; ov8856->sd.ctrl_handler = ctrl_hdlr; return 0; } static void ov8856_update_pad_format(const struct ov8856_mode *mode, struct v4l2_mbus_framefmt *fmt) { fmt->width = mode->width; fmt->height = mode->height; fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10; fmt->field = V4L2_FIELD_NONE; } static int ov8856_start_streaming(struct ov8856 *ov8856) { struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd); const struct ov8856_reg_list *reg_list; int link_freq_index, ret; link_freq_index = ov8856->cur_mode->link_freq_index; reg_list = &link_freq_configs[link_freq_index].reg_list; ret = ov8856_write_reg_list(ov8856, reg_list); if (ret) { dev_err(&client->dev, "failed to set plls"); return ret; } reg_list = &ov8856->cur_mode->reg_list; ret = ov8856_write_reg_list(ov8856, reg_list); if (ret) { dev_err(&client->dev, "failed to set mode"); return ret; } ret = __v4l2_ctrl_handler_setup(ov8856->sd.ctrl_handler); if (ret) return ret; ret = ov8856_write_reg(ov8856, OV8856_REG_MODE_SELECT, OV8856_REG_VALUE_08BIT, OV8856_MODE_STREAMING); if (ret) { dev_err(&client->dev, "failed to set stream"); return ret; } return 0; } static void ov8856_stop_streaming(struct ov8856 *ov8856) { struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd); if (ov8856_write_reg(ov8856, OV8856_REG_MODE_SELECT, OV8856_REG_VALUE_08BIT, OV8856_MODE_STANDBY)) dev_err(&client->dev, "failed to set stream"); } static int ov8856_set_stream(struct v4l2_subdev *sd, int enable) { struct ov8856 *ov8856 = to_ov8856(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); int ret = 0; if (ov8856->streaming == enable) return 0; mutex_lock(&ov8856->mutex); if (enable) { ret = pm_runtime_get_sync(&client->dev); if (ret < 0) { pm_runtime_put_noidle(&client->dev); mutex_unlock(&ov8856->mutex); return ret; } ret = ov8856_start_streaming(ov8856); if (ret) { enable = 0; ov8856_stop_streaming(ov8856); pm_runtime_put(&client->dev); } } else { ov8856_stop_streaming(ov8856); pm_runtime_put(&client->dev); } ov8856->streaming = enable; mutex_unlock(&ov8856->mutex); return ret; } static int __maybe_unused ov8856_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct v4l2_subdev *sd = i2c_get_clientdata(client); struct ov8856 *ov8856 = to_ov8856(sd); mutex_lock(&ov8856->mutex); if (ov8856->streaming) ov8856_stop_streaming(ov8856); mutex_unlock(&ov8856->mutex); return 0; } static int __maybe_unused ov8856_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct v4l2_subdev *sd = i2c_get_clientdata(client); struct ov8856 *ov8856 = to_ov8856(sd); int ret; mutex_lock(&ov8856->mutex); if (ov8856->streaming) { ret = ov8856_start_streaming(ov8856); if (ret) { ov8856->streaming = false; ov8856_stop_streaming(ov8856); mutex_unlock(&ov8856->mutex); return ret; } } mutex_unlock(&ov8856->mutex); return 0; } static int ov8856_set_format(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *sd_state, #endif struct v4l2_subdev_format *fmt) { struct ov8856 *ov8856 = to_ov8856(sd); const struct ov8856_mode *mode; s32 vblank_def, h_blank; mode = v4l2_find_nearest_size(supported_modes, ARRAY_SIZE(supported_modes), width, height, fmt->format.width, fmt->format.height); mutex_lock(&ov8856->mutex); ov8856_update_pad_format(mode, &fmt->format); if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format; #elif LINUX_VERSION_CODE < KERNEL_VERSION(6, 8, 0) *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format; #else *v4l2_subdev_state_get_format(sd_state, fmt->pad) = fmt->format; #endif } else { ov8856->cur_mode = mode; __v4l2_ctrl_s_ctrl(ov8856->link_freq, mode->link_freq_index); __v4l2_ctrl_s_ctrl_int64(ov8856->pixel_rate, to_pixel_rate(mode->link_freq_index)); /* Update limits and set FPS to default */ vblank_def = mode->vts_def - mode->height; __v4l2_ctrl_modify_range(ov8856->vblank, mode->vts_min - mode->height, OV8856_VTS_MAX - mode->height, 1, vblank_def); __v4l2_ctrl_s_ctrl(ov8856->vblank, vblank_def); h_blank = to_pixels_per_line(mode->hts, mode->link_freq_index) - mode->width; __v4l2_ctrl_modify_range(ov8856->hblank, h_blank, h_blank, 1, h_blank); } mutex_unlock(&ov8856->mutex); return 0; } static int ov8856_get_format(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *sd_state, #endif struct v4l2_subdev_format *fmt) { struct ov8856 *ov8856 = to_ov8856(sd); mutex_lock(&ov8856->mutex); if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) fmt->format = *v4l2_subdev_get_try_format(&ov8856->sd, cfg, fmt->pad); #elif LINUX_VERSION_CODE < KERNEL_VERSION(6, 8, 0) fmt->format = *v4l2_subdev_get_try_format(&ov8856->sd, sd_state, fmt->pad); #else fmt->format = *v4l2_subdev_state_get_format( sd_state, fmt->pad); #endif else ov8856_update_pad_format(ov8856->cur_mode, &fmt->format); mutex_unlock(&ov8856->mutex); return 0; } static int ov8856_enum_mbus_code(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *sd_state, #endif struct v4l2_subdev_mbus_code_enum *code) { /* Only one bayer order GRBG is supported */ if (code->index > 0) return -EINVAL; code->code = MEDIA_BUS_FMT_SGRBG10_1X10; return 0; } static int ov8856_enum_frame_size(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *sd_state, #endif struct v4l2_subdev_frame_size_enum *fse) { if (fse->index >= ARRAY_SIZE(supported_modes)) return -EINVAL; if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10) return -EINVAL; fse->min_width = supported_modes[fse->index].width; fse->max_width = fse->min_width; fse->min_height = supported_modes[fse->index].height; fse->max_height = fse->min_height; return 0; } static int ov8856_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { struct ov8856 *ov8856 = to_ov8856(sd); mutex_lock(&ov8856->mutex); #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) ov8856_update_pad_format(&supported_modes[0], v4l2_subdev_get_try_format(sd, fh->pad, 0)); #elif LINUX_VERSION_CODE < KERNEL_VERSION(6, 8, 0) ov8856_update_pad_format(&supported_modes[0], v4l2_subdev_get_try_format(sd, fh->state, 0)); #else ov8856_update_pad_format(&supported_modes[0], v4l2_subdev_state_get_format(fh->state, 0)); #endif mutex_unlock(&ov8856->mutex); return 0; } static const struct v4l2_subdev_video_ops ov8856_video_ops = { .s_stream = ov8856_set_stream, }; static const struct v4l2_subdev_pad_ops ov8856_pad_ops = { .set_fmt = ov8856_set_format, .get_fmt = ov8856_get_format, .enum_mbus_code = ov8856_enum_mbus_code, .enum_frame_size = ov8856_enum_frame_size, }; static const struct v4l2_subdev_ops ov8856_subdev_ops = { .video = &ov8856_video_ops, .pad = &ov8856_pad_ops, }; static const struct media_entity_operations ov8856_subdev_entity_ops = { .link_validate = v4l2_subdev_link_validate, }; static const struct v4l2_subdev_internal_ops ov8856_internal_ops = { .open = ov8856_open, }; static int ov8856_identify_module(struct ov8856 *ov8856) { struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd); int ret; u32 val; ret = ov8856_read_reg(ov8856, OV8856_REG_CHIP_ID, OV8856_REG_VALUE_24BIT, &val); if (ret) return ret; if (val != OV8856_CHIP_ID) { dev_err(&client->dev, "chip id mismatch: %x!=%x", OV8856_CHIP_ID, val); return -ENXIO; } return 0; } #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) static int ov8856_remove(struct i2c_client *client) #else static void ov8856_remove(struct i2c_client *client) #endif { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct ov8856 *ov8856 = to_ov8856(sd); v4l2_async_unregister_subdev(sd); media_entity_cleanup(&sd->entity); v4l2_ctrl_handler_free(sd->ctrl_handler); pm_runtime_disable(&client->dev); mutex_destroy(&ov8856->mutex); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) return 0; #endif } static int ov8856_probe(struct i2c_client *client) { struct ov8856 *ov8856; int ret; ov8856 = devm_kzalloc(&client->dev, sizeof(*ov8856), GFP_KERNEL); if (!ov8856) return -ENOMEM; v4l2_i2c_subdev_init(&ov8856->sd, client, &ov8856_subdev_ops); ret = ov8856_identify_module(ov8856); if (ret) { dev_err(&client->dev, "failed to find sensor: %d", ret); return ret; } mutex_init(&ov8856->mutex); ov8856->cur_mode = &supported_modes[0]; ret = ov8856_init_controls(ov8856); if (ret) { dev_err(&client->dev, "failed to init controls: %d", ret); goto probe_error_v4l2_ctrl_handler_free; } ov8856->sd.internal_ops = &ov8856_internal_ops; ov8856->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; ov8856->sd.entity.ops = &ov8856_subdev_entity_ops; ov8856->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; ov8856->pad.flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_pads_init(&ov8856->sd.entity, 1, &ov8856->pad); if (ret) { dev_err(&client->dev, "failed to init entity pads: %d", ret); goto probe_error_v4l2_ctrl_handler_free; } #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 13, 0) ret = v4l2_async_register_subdev_sensor_common(&ov8856->sd); #else ret = v4l2_async_register_subdev_sensor(&ov8856->sd); #endif if (ret < 0) { dev_err(&client->dev, "failed to register V4L2 subdev: %d", ret); goto probe_error_media_entity_cleanup; } /* * Device is already turned on by i2c-core with ACPI domain PM. * Enable runtime PM and turn off the device. */ pm_runtime_set_active(&client->dev); pm_runtime_enable(&client->dev); pm_runtime_idle(&client->dev); return 0; probe_error_media_entity_cleanup: media_entity_cleanup(&ov8856->sd.entity); probe_error_v4l2_ctrl_handler_free: v4l2_ctrl_handler_free(ov8856->sd.ctrl_handler); mutex_destroy(&ov8856->mutex); return ret; } static const struct dev_pm_ops ov8856_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(ov8856_suspend, ov8856_resume) }; static const struct i2c_device_id ov8856_id_table[] = { { "ov8856", 0 }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(i2c, ov8856_id_table); #ifdef CONFIG_ACPI static const struct acpi_device_id ov8856_acpi_ids[] = { {"OVTI8856"}, {} }; MODULE_DEVICE_TABLE(acpi, ov8856_acpi_ids); #endif static struct i2c_driver ov8856_i2c_driver = { .driver = { .name = "ov8856", .pm = &ov8856_pm_ops, .acpi_match_table = ACPI_PTR(ov8856_acpi_ids), }, #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) .probe_new = ov8856_probe, #else .probe = ov8856_probe, #endif .remove = ov8856_remove, .id_table = ov8856_id_table, }; module_i2c_driver(ov8856_i2c_driver); MODULE_AUTHOR("Ben Kao "); MODULE_AUTHOR("Qiu, Tianshu "); MODULE_DESCRIPTION("OmniVision OV8856 sensor driver"); MODULE_LICENSE("GPL v2"); ipu6-drivers-0~git202411190607.0ad49882/drivers/media/i2c/power_ctrl_logic.c000066400000000000000000000100001471702545500256130ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2020-2021 Intel Corporation. #include #include #include #include #include #include #define PCL_DRV_NAME "power_ctrl_logic" struct power_ctrl_logic { /* gpio resource*/ struct gpio_desc *reset_gpio; struct gpio_desc *powerdn_gpio; struct gpio_desc *clocken_gpio; struct gpio_desc *indled_gpio; /* status */ struct mutex status_lock; bool power_on; bool gpio_ready; }; struct power_ctrl_gpio { const char *name; struct gpio_desc **pin; }; /* mcu gpio resources*/ static const struct acpi_gpio_params camreset_gpio = { 0, 0, false }; static const struct acpi_gpio_params campwdn_gpio = { 1, 0, false }; static const struct acpi_gpio_params midmclken_gpio = { 2, 0, false }; static const struct acpi_gpio_params led_gpio = { 3, 0, false }; static const struct acpi_gpio_mapping dsc1_acpi_gpios[] = { { "camreset-gpios", &camreset_gpio, 1 }, { "campwdn-gpios", &campwdn_gpio, 1 }, { "midmclken-gpios", &midmclken_gpio, 1 }, { "indled-gpios", &led_gpio, 1 }, { } }; static struct power_ctrl_logic pcl = { .reset_gpio = NULL, .powerdn_gpio = NULL, .clocken_gpio = NULL, .indled_gpio = NULL, .power_on = false, .gpio_ready = false, }; static struct power_ctrl_gpio pcl_gpios[] = { { "camreset", &pcl.reset_gpio }, { "campwdn", &pcl.powerdn_gpio }, { "midmclken", &pcl.clocken_gpio}, { "indled", &pcl.indled_gpio}, }; static int power_ctrl_logic_add(struct acpi_device *adev) { int i, ret; dev_dbg(&adev->dev, "@%s, enter\n", __func__); set_primary_fwnode(&adev->dev, &adev->fwnode); ret = acpi_dev_add_driver_gpios(adev, dsc1_acpi_gpios); if (ret) { dev_err(&adev->dev, "@%s: --111---fail to add gpio. ret %d\n", __func__, ret); return -EBUSY; } for (i = 0; i < ARRAY_SIZE(pcl_gpios); i++) { *pcl_gpios[i].pin = gpiod_get(&adev->dev, pcl_gpios[i].name, GPIOD_OUT_LOW); if (IS_ERR(*pcl_gpios[i].pin)) { dev_dbg(&adev->dev, "failed to get gpio %s\n", pcl_gpios[i].name); return -EPROBE_DEFER; } } mutex_lock(&pcl.status_lock); pcl.gpio_ready = true; mutex_unlock(&pcl.status_lock); #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 14, 0) acpi_dev_clear_dependencies(adev); #endif dev_dbg(&adev->dev, "@%s, exit\n", __func__); return ret; } static int power_ctrl_logic_remove(struct acpi_device *adev) { dev_dbg(&adev->dev, "@%s, enter\n", __func__); mutex_lock(&pcl.status_lock); pcl.gpio_ready = false; gpiod_set_value_cansleep(pcl.reset_gpio, 0); gpiod_put(pcl.reset_gpio); gpiod_set_value_cansleep(pcl.powerdn_gpio, 0); gpiod_put(pcl.powerdn_gpio); gpiod_set_value_cansleep(pcl.clocken_gpio, 0); gpiod_put(pcl.clocken_gpio); gpiod_set_value_cansleep(pcl.indled_gpio, 0); gpiod_put(pcl.indled_gpio); mutex_unlock(&pcl.status_lock); dev_dbg(&adev->dev, "@%s, exit\n", __func__); return 0; } static struct acpi_device_id acpi_ids[] = { { "INT3472", 0 }, { }, }; MODULE_DEVICE_TABLE(acpi, acpi_ids); static struct acpi_driver _driver = { .name = PCL_DRV_NAME, .class = PCL_DRV_NAME, .ids = acpi_ids, .ops = { .add = power_ctrl_logic_add, .remove = power_ctrl_logic_remove, }, }; module_acpi_driver(_driver); int power_ctrl_logic_set_power(int on) { mutex_lock(&pcl.status_lock); if (!pcl.gpio_ready) { pr_debug("@%s,failed to set power, gpio_ready=%d, on=%d\n", __func__, pcl.gpio_ready, on); mutex_unlock(&pcl.status_lock); return -EPROBE_DEFER; } if (pcl.power_on != on) { gpiod_set_value_cansleep(pcl.reset_gpio, on); gpiod_set_value_cansleep(pcl.powerdn_gpio, on); gpiod_set_value_cansleep(pcl.clocken_gpio, on); gpiod_set_value_cansleep(pcl.indled_gpio, on); pcl.power_on = on; } mutex_unlock(&pcl.status_lock); return 0; } EXPORT_SYMBOL_GPL(power_ctrl_logic_set_power); MODULE_AUTHOR("Bingbu Cao "); MODULE_AUTHOR("Qiu, Tianshu "); MODULE_AUTHOR("Xu, Chongyang "); MODULE_DESCRIPTION("Power Control Logic Driver"); MODULE_LICENSE("GPL v2"); ipu6-drivers-0~git202411190607.0ad49882/drivers/media/i2c/power_ctrl_logic.h000066400000000000000000000003551471702545500256340ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2020-2021 Intel Corporation. */ #ifndef _POWER_CTRL_LOGIC_H_ #define _POWER_CTRL_LOGIC_H_ /* exported function for extern module */ int power_ctrl_logic_set_power(int on); #endif ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/000077500000000000000000000000001471702545500222215ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/000077500000000000000000000000001471702545500233345ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/cio2-bridge.c000066400000000000000000000355111471702545500255730ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 /* Author: Dan Scally */ #include #include #include #include #include #include #include #include "cio2-bridge.h" /* * Extend this array with ACPI Hardware IDs of devices known to be working * plus the number of link-frequencies expected by their drivers, along with * the frequency values in hertz. This is somewhat opportunistic way of adding * support for this for now in the hopes of a better source for the information * (possibly some encoded value in the SSDB buffer that we're unaware of) * becoming apparent in the future. * * Do not add an entry for a sensor that is not actually supported. */ static const struct cio2_sensor_config cio2_supported_sensors[] = { /* Omnivision OV5693 */ CIO2_SENSOR_CONFIG("INT33BE", 1, 419200000), /* Omnivision OV8865 */ CIO2_SENSOR_CONFIG("INT347A", 1, 360000000), /* Omnivision OV7251 */ CIO2_SENSOR_CONFIG("INT347E", 1, 319200000), /* Omnivision OV2680 */ CIO2_SENSOR_CONFIG("OVTI2680", 1, 331200000), /* Omnivision ov02c10 */ CIO2_SENSOR_CONFIG("OVTI02C1", 1, 400000000), /* Omnivision ov01a10 */ CIO2_SENSOR_CONFIG("OVTI01A0", 1, 400000000), /* Omnivision ov01a1s */ CIO2_SENSOR_CONFIG("OVTI01AS", 1, 400000000), /* Omnivision ov8856 */ CIO2_SENSOR_CONFIG("OVTI8856", 3, 180000000, 360000000, 720000000), /* Omnivision ov2740 */ CIO2_SENSOR_CONFIG("INT3474", 2, 360000000, 180000000), /* Hynix hi556 */ CIO2_SENSOR_CONFIG("INT3537", 1, 437000000), /* Himax hm2170 */ CIO2_SENSOR_CONFIG("HIMX2170", 1, 384000000), /* Himax hm2172 */ CIO2_SENSOR_CONFIG("HIMX2172", 1, 384000000), /* Omnivision ov02e10 */ CIO2_SENSOR_CONFIG("OVTI02E1", 1, 360000000), /* Himax hm11b1 */ CIO2_SENSOR_CONFIG("HIMX11B1", 1, 384000000), /* Omnivision ov13b10 */ CIO2_SENSOR_CONFIG("OVTIDB10", 1, 560000000), CIO2_SENSOR_CONFIG("OVTI13B1", 1, 560000000), /* Omnivision ov08a10 */ CIO2_SENSOR_CONFIG("OVTI08A1", 1, 500000000), /* Omnivision ov08x40 */ CIO2_SENSOR_CONFIG("OVTI08F4", 1, 400000000), }; static const struct cio2_property_names prop_names = { .clock_frequency = "clock-frequency", .rotation = "rotation", .orientation = "orientation", .bus_type = "bus-type", .data_lanes = "data-lanes", .remote_endpoint = "remote-endpoint", .link_frequencies = "link-frequencies", }; static const char * const cio2_vcm_types[] = { "ad5823", "dw9714", "ad5816", "dw9719", "dw9718", "dw9806b", "wv517s", "lc898122xa", "lc898212axb", }; static int cio2_bridge_read_acpi_buffer(struct acpi_device *adev, char *id, void *data, u32 size) { struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; acpi_status status; int ret = 0; status = acpi_evaluate_object(adev->handle, id, NULL, &buffer); if (ACPI_FAILURE(status)) return -ENODEV; obj = buffer.pointer; if (!obj) { dev_err(&adev->dev, "Couldn't locate ACPI buffer\n"); return -ENODEV; } if (obj->type != ACPI_TYPE_BUFFER) { dev_err(&adev->dev, "Not an ACPI buffer\n"); ret = -ENODEV; goto out_free_buff; } if (obj->buffer.length > size) { dev_err(&adev->dev, "Given buffer is too small\n"); ret = -EINVAL; goto out_free_buff; } memcpy(data, obj->buffer.pointer, obj->buffer.length); out_free_buff: kfree(buffer.pointer); return ret; } static u32 cio2_bridge_parse_rotation(struct cio2_sensor *sensor) { switch (sensor->ssdb.degree) { case CIO2_SENSOR_ROTATION_NORMAL: return 0; case CIO2_SENSOR_ROTATION_INVERTED: return 180; default: dev_warn(&sensor->adev->dev, "Unknown rotation %d. Assume 0 degree rotation\n", sensor->ssdb.degree); return 0; } } static enum v4l2_fwnode_orientation cio2_bridge_parse_orientation(struct cio2_sensor *sensor) { switch (sensor->pld->panel) { case ACPI_PLD_PANEL_FRONT: return V4L2_FWNODE_ORIENTATION_FRONT; case ACPI_PLD_PANEL_BACK: return V4L2_FWNODE_ORIENTATION_BACK; case ACPI_PLD_PANEL_TOP: case ACPI_PLD_PANEL_LEFT: case ACPI_PLD_PANEL_RIGHT: case ACPI_PLD_PANEL_UNKNOWN: return V4L2_FWNODE_ORIENTATION_EXTERNAL; default: dev_warn(&sensor->adev->dev, "Unknown _PLD panel value %d\n", sensor->pld->panel); return V4L2_FWNODE_ORIENTATION_EXTERNAL; } } static void cio2_bridge_create_fwnode_properties(struct cio2_sensor *sensor, struct cio2_bridge *bridge, const struct cio2_sensor_config *cfg) { u32 rotation; enum v4l2_fwnode_orientation orientation; struct cio2_property_names *prop_name; rotation = cio2_bridge_parse_rotation(sensor); orientation = cio2_bridge_parse_orientation(sensor); sensor->prop_names = prop_names; prop_name = &sensor->prop_names; sensor->local_ref[0] = SOFTWARE_NODE_REFERENCE(&sensor->swnodes[SWNODE_CIO2_ENDPOINT]); sensor->remote_ref[0] = SOFTWARE_NODE_REFERENCE(&sensor->swnodes[SWNODE_SENSOR_ENDPOINT]); sensor->dev_properties[0] = PROPERTY_ENTRY_U32(prop_name->clock_frequency, sensor->ssdb.mclkspeed); sensor->dev_properties[1] = PROPERTY_ENTRY_U32(prop_name->rotation, rotation); sensor->dev_properties[2] = PROPERTY_ENTRY_U32(prop_name->orientation, orientation); if (sensor->ssdb.vcmtype) { sensor->vcm_ref[0] = SOFTWARE_NODE_REFERENCE(&sensor->swnodes[SWNODE_VCM]); sensor->dev_properties[3] = PROPERTY_ENTRY_REF_ARRAY("lens-focus", sensor->vcm_ref); } sensor->ep_properties[0] = PROPERTY_ENTRY_U32(prop_name->bus_type, V4L2_FWNODE_BUS_TYPE_CSI2_DPHY); sensor->ep_properties[1] = PROPERTY_ENTRY_U32_ARRAY_LEN(prop_name->data_lanes, bridge->data_lanes, sensor->ssdb.lanes); sensor->ep_properties[2] = PROPERTY_ENTRY_REF_ARRAY(prop_name->remote_endpoint, sensor->local_ref); if (cfg->nr_link_freqs > 0) sensor->ep_properties[3] = PROPERTY_ENTRY_U64_ARRAY_LEN(prop_name->link_frequencies, cfg->link_freqs, cfg->nr_link_freqs); sensor->cio2_properties[0] = PROPERTY_ENTRY_U32_ARRAY_LEN(prop_name->data_lanes, bridge->data_lanes, sensor->ssdb.lanes); sensor->cio2_properties[1] = PROPERTY_ENTRY_REF_ARRAY(prop_name->remote_endpoint, sensor->remote_ref); } static void cio2_bridge_init_swnode_names(struct cio2_sensor *sensor) { snprintf(sensor->node_names.remote_port, sizeof(sensor->node_names.remote_port), SWNODE_GRAPH_PORT_NAME_FMT, sensor->ssdb.link); snprintf(sensor->node_names.port, sizeof(sensor->node_names.port), SWNODE_GRAPH_PORT_NAME_FMT, 0); /* Always port 0 */ snprintf(sensor->node_names.endpoint, sizeof(sensor->node_names.endpoint), SWNODE_GRAPH_ENDPOINT_NAME_FMT, 0); /* And endpoint 0 */ } static void cio2_bridge_init_swnode_group(struct cio2_sensor *sensor) { struct software_node *nodes = sensor->swnodes; sensor->group[SWNODE_SENSOR_HID] = &nodes[SWNODE_SENSOR_HID]; sensor->group[SWNODE_SENSOR_PORT] = &nodes[SWNODE_SENSOR_PORT]; sensor->group[SWNODE_SENSOR_ENDPOINT] = &nodes[SWNODE_SENSOR_ENDPOINT]; sensor->group[SWNODE_CIO2_PORT] = &nodes[SWNODE_CIO2_PORT]; sensor->group[SWNODE_CIO2_ENDPOINT] = &nodes[SWNODE_CIO2_ENDPOINT]; if (sensor->ssdb.vcmtype && sensor->ssdb.vcmtype <= ARRAY_SIZE(cio2_vcm_types)) sensor->group[SWNODE_VCM] = &nodes[SWNODE_VCM]; } static void cio2_bridge_create_connection_swnodes(struct cio2_bridge *bridge, struct cio2_sensor *sensor) { struct software_node *nodes = sensor->swnodes; cio2_bridge_init_swnode_names(sensor); snprintf(sensor->node_names.sensor_name, sizeof(sensor->node_names.sensor_name), "%s-%u", sensor->name, sensor->ssdb.link); nodes[SWNODE_SENSOR_HID] = NODE_SENSOR(sensor->node_names.sensor_name, sensor->dev_properties); nodes[SWNODE_SENSOR_PORT] = NODE_PORT(sensor->node_names.port, &nodes[SWNODE_SENSOR_HID]); nodes[SWNODE_SENSOR_ENDPOINT] = NODE_ENDPOINT( sensor->node_names.endpoint, &nodes[SWNODE_SENSOR_PORT], sensor->ep_properties); nodes[SWNODE_CIO2_PORT] = NODE_PORT(sensor->node_names.remote_port, &bridge->cio2_hid_node); nodes[SWNODE_CIO2_ENDPOINT] = NODE_ENDPOINT(sensor->node_names.endpoint, &nodes[SWNODE_CIO2_PORT], sensor->cio2_properties); if (sensor->ssdb.vcmtype && sensor->ssdb.vcmtype <= ARRAY_SIZE(cio2_vcm_types)) { snprintf(sensor->node_names.vcm_name, sizeof(sensor->node_names.vcm_name), "%s-%u", cio2_vcm_types[sensor->ssdb.vcmtype - 1], sensor->ssdb.link); nodes[SWNODE_VCM] = NODE_VCM(sensor->node_names.vcm_name); } cio2_bridge_init_swnode_group(sensor); } static void cio2_bridge_instantiate_vcm_i2c_client(struct cio2_sensor *sensor) { struct i2c_board_info board_info = { }; char name[16]; if (!sensor->ssdb.vcmtype || sensor->ssdb.vcmtype > ARRAY_SIZE(cio2_vcm_types)) return; snprintf(name, sizeof(name), "%s-VCM", acpi_dev_name(sensor->adev)); board_info.dev_name = name; strscpy(board_info.type, cio2_vcm_types[sensor->ssdb.vcmtype - 1], ARRAY_SIZE(board_info.type)); board_info.swnode = &sensor->swnodes[SWNODE_VCM]; sensor->vcm_i2c_client = #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0) i2c_acpi_new_device_by_fwnode(acpi_fwnode_handle(sensor->adev), 1, &board_info); #else i2c_acpi_new_device(&sensor->adev->dev, 1, &board_info); #endif if (IS_ERR(sensor->vcm_i2c_client)) { dev_warn(&sensor->adev->dev, "Error instantiation VCM i2c-client: %ld\n", PTR_ERR(sensor->vcm_i2c_client)); sensor->vcm_i2c_client = NULL; } } static void cio2_bridge_unregister_sensors(struct cio2_bridge *bridge) { struct cio2_sensor *sensor; unsigned int i; for (i = 0; i < bridge->n_sensors; i++) { sensor = &bridge->sensors[i]; software_node_unregister_node_group(sensor->group); ACPI_FREE(sensor->pld); acpi_dev_put(sensor->adev); i2c_unregister_device(sensor->vcm_i2c_client); } } static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg, struct cio2_bridge *bridge, struct pci_dev *cio2) { struct fwnode_handle *fwnode, *current_fwnode; struct cio2_sensor *sensor; struct acpi_device *adev; acpi_status status; int ret; for_each_acpi_dev_match(adev, cfg->hid, NULL, -1) { if (!adev->status.enabled) continue; if (bridge->n_sensors >= CIO2_NUM_PORTS) { acpi_dev_put(adev); dev_err(&cio2->dev, "Exceeded available CIO2 ports\n"); return -EINVAL; } sensor = &bridge->sensors[bridge->n_sensors]; strscpy(sensor->name, cfg->hid, sizeof(sensor->name)); ret = cio2_bridge_read_acpi_buffer(adev, "SSDB", &sensor->ssdb, sizeof(sensor->ssdb)); if (ret) goto err_put_adev; if (sensor->ssdb.vcmtype > ARRAY_SIZE(cio2_vcm_types)) { dev_warn(&adev->dev, "Unknown VCM type %d\n", sensor->ssdb.vcmtype); sensor->ssdb.vcmtype = 0; } status = acpi_get_physical_device_location(adev->handle, &sensor->pld); if (ACPI_FAILURE(status)) { ret = -ENODEV; goto err_put_adev; } if (sensor->ssdb.lanes > CIO2_MAX_LANES) { dev_err(&adev->dev, "Number of lanes in SSDB is invalid\n"); ret = -EINVAL; goto err_free_pld; } cio2_bridge_create_fwnode_properties(sensor, bridge, cfg); cio2_bridge_create_connection_swnodes(bridge, sensor); ret = software_node_register_node_group(sensor->group); if (ret) goto err_free_pld; fwnode = software_node_fwnode(&sensor->swnodes[ SWNODE_SENSOR_HID]); if (!fwnode) { ret = -ENODEV; goto err_free_swnodes; } sensor->adev = acpi_dev_get(adev); current_fwnode = acpi_fwnode_handle(adev); current_fwnode->secondary = fwnode; cio2_bridge_instantiate_vcm_i2c_client(sensor); dev_info(&cio2->dev, "Found supported sensor %s\n", acpi_dev_name(adev)); bridge->n_sensors++; } return 0; err_free_swnodes: software_node_unregister_node_group(sensor->group); err_free_pld: ACPI_FREE(sensor->pld); err_put_adev: acpi_dev_put(adev); return ret; } static int cio2_bridge_connect_sensors(struct cio2_bridge *bridge, struct pci_dev *cio2) { unsigned int i; int ret; for (i = 0; i < ARRAY_SIZE(cio2_supported_sensors); i++) { const struct cio2_sensor_config *cfg = &cio2_supported_sensors[i]; ret = cio2_bridge_connect_sensor(cfg, bridge, cio2); if (ret) goto err_unregister_sensors; } return 0; err_unregister_sensors: cio2_bridge_unregister_sensors(bridge); return ret; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0) /* * The VCM cannot be probed until the PMIC is completely setup. We cannot rely * on -EPROBE_DEFER for this, since the consumer<->supplier relations between * the VCM and regulators/clks are not described in ACPI, instead they are * passed as board-data to the PMIC drivers. Since -PROBE_DEFER does not work * for the clks/regulators the VCM i2c-clients must not be instantiated until * the PMIC is fully setup. * * The sensor/VCM ACPI device has an ACPI _DEP on the PMIC, check this using the * acpi_dev_ready_for_enumeration() helper, like the i2c-core-acpi code does * for the sensors. */ static int cio2_bridge_sensors_are_ready(void) { struct acpi_device *adev; bool ready = true; unsigned int i; for (i = 0; i < ARRAY_SIZE(cio2_supported_sensors); i++) { const struct cio2_sensor_config *cfg = &cio2_supported_sensors[i]; for_each_acpi_dev_match(adev, cfg->hid, NULL, -1) { if (!adev->status.enabled) continue; if (!acpi_dev_ready_for_enumeration(adev)) ready = false; } } return ready; } #endif int cio2_bridge_init(struct pci_dev *cio2) { struct device *dev = &cio2->dev; struct fwnode_handle *fwnode; struct cio2_bridge *bridge; unsigned int i; int ret; #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0) if (!cio2_bridge_sensors_are_ready()) return -EPROBE_DEFER; #endif bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); if (!bridge) return -ENOMEM; strscpy(bridge->cio2_node_name, CIO2_HID, sizeof(bridge->cio2_node_name)); bridge->cio2_hid_node.name = bridge->cio2_node_name; ret = software_node_register(&bridge->cio2_hid_node); if (ret < 0) { dev_err(dev, "Failed to register the CIO2 HID node\n"); goto err_free_bridge; } /* * Map the lane arrangement, which is fixed for the IPU3 (meaning we * only need one, rather than one per sensor). We include it as a * member of the struct cio2_bridge rather than a global variable so * that it survives if the module is unloaded along with the rest of * the struct. */ for (i = 0; i < ARRAY_SIZE(bridge->data_lanes); i++) bridge->data_lanes[i] = i + 1; ret = cio2_bridge_connect_sensors(bridge, cio2); if (ret || bridge->n_sensors == 0) goto err_unregister_cio2; dev_info(dev, "Connected %d cameras\n", bridge->n_sensors); fwnode = software_node_fwnode(&bridge->cio2_hid_node); if (!fwnode) { dev_err(dev, "Error getting fwnode from cio2 software_node\n"); ret = -ENODEV; goto err_unregister_sensors; } set_secondary_fwnode(dev, fwnode); return 0; err_unregister_sensors: cio2_bridge_unregister_sensors(bridge); err_unregister_cio2: software_node_unregister(&bridge->cio2_hid_node); err_free_bridge: kfree(bridge); return ret; } ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/cio2-bridge.h000066400000000000000000000063431471702545500256010ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* Author: Dan Scally */ #ifndef __CIO2_BRIDGE_H #define __CIO2_BRIDGE_H #include #include #include //#include "ipu3-cio2.h" struct i2c_client; #define CIO2_HID "INT343E" #define CIO2_MAX_LANES 4 #define MAX_NUM_LINK_FREQS 3 #define CIO2_NUM_PORTS 4 /* Values are educated guesses as we don't have a spec */ #define CIO2_SENSOR_ROTATION_NORMAL 0 #define CIO2_SENSOR_ROTATION_INVERTED 1 #define CIO2_SENSOR_CONFIG(_HID, _NR, ...) \ ((const struct cio2_sensor_config) { \ .hid = (_HID), \ .nr_link_freqs = (_NR), \ .link_freqs = { __VA_ARGS__ } \ }) #define NODE_SENSOR(_HID, _PROPS) \ ((const struct software_node) { \ .name = (_HID), \ .properties = (_PROPS), \ }) #define NODE_PORT(_PORT, _SENSOR_NODE) \ ((const struct software_node) { \ .name = (_PORT), \ .parent = (_SENSOR_NODE), \ }) #define NODE_ENDPOINT(_EP, _PORT, _PROPS) \ ((const struct software_node) { \ .name = (_EP), \ .parent = (_PORT), \ .properties = (_PROPS), \ }) #define NODE_VCM(_TYPE) \ ((const struct software_node) { \ .name = (_TYPE), \ }) enum cio2_sensor_swnodes { SWNODE_SENSOR_HID, SWNODE_SENSOR_PORT, SWNODE_SENSOR_ENDPOINT, SWNODE_CIO2_PORT, SWNODE_CIO2_ENDPOINT, /* Must be last because it is optional / maybe empty */ SWNODE_VCM, SWNODE_COUNT }; /* Data representation as it is in ACPI SSDB buffer */ struct cio2_sensor_ssdb { u8 version; u8 sku; u8 guid_csi2[16]; u8 devfunction; u8 bus; u32 dphylinkenfuses; u32 clockdiv; u8 link; u8 lanes; u32 csiparams[10]; u32 maxlanespeed; u8 sensorcalibfileidx; u8 sensorcalibfileidxInMBZ[3]; u8 romtype; u8 vcmtype; u8 platforminfo; u8 platformsubinfo; u8 flash; u8 privacyled; u8 degree; u8 mipilinkdefined; u32 mclkspeed; u8 controllogicid; u8 reserved1[3]; u8 mclkport; u8 reserved2[13]; } __packed; struct cio2_property_names { char clock_frequency[16]; char rotation[9]; char orientation[12]; char bus_type[9]; char data_lanes[11]; char remote_endpoint[16]; char link_frequencies[17]; }; struct cio2_node_names { char port[7]; char endpoint[11]; char remote_port[7]; char sensor_name[ACPI_ID_LEN + 2]; char vcm_name[ACPI_ID_LEN + 2]; }; struct cio2_sensor_config { const char *hid; const u8 nr_link_freqs; const u64 link_freqs[MAX_NUM_LINK_FREQS]; }; struct cio2_sensor { char name[ACPI_ID_LEN]; struct acpi_device *adev; struct i2c_client *vcm_i2c_client; /* SWNODE_COUNT + 1 for terminating NULL */ const struct software_node *group[SWNODE_COUNT + 1]; struct software_node swnodes[SWNODE_COUNT]; struct cio2_node_names node_names; struct cio2_sensor_ssdb ssdb; struct acpi_pld_info *pld; struct cio2_property_names prop_names; struct property_entry ep_properties[5]; struct property_entry dev_properties[5]; struct property_entry cio2_properties[3]; struct software_node_ref_args local_ref[1]; struct software_node_ref_args remote_ref[1]; struct software_node_ref_args vcm_ref[1]; }; struct cio2_bridge { char cio2_node_name[ACPI_ID_LEN]; struct software_node cio2_hid_node; u32 data_lanes[CIO2_MAX_LANES]; unsigned int n_sensors; struct cio2_sensor sensors[CIO2_NUM_PORTS]; }; #endif ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu-bus.c000066400000000000000000000142621471702545500250710ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2013 - 2024 Intel Corporation #include #include #include #include #include #include #include #include #include #include "ipu.h" #include "ipu-platform.h" #include "ipu-dma.h" #ifdef CONFIG_PM static struct bus_type ipu_bus; static int bus_pm_runtime_suspend(struct device *dev) { struct ipu_bus_device *adev = to_ipu_bus_device(dev); int rval; rval = pm_generic_runtime_suspend(dev); if (rval) return rval; rval = ipu_buttress_power(dev, adev->ctrl, false); dev_dbg(dev, "%s: buttress power down %d\n", __func__, rval); if (!rval) return 0; dev_err(dev, "power down failed!\n"); /* Powering down failed, attempt to resume device now */ rval = pm_generic_runtime_resume(dev); if (!rval) return -EBUSY; return -EIO; } static int bus_pm_runtime_resume(struct device *dev) { struct ipu_bus_device *adev = to_ipu_bus_device(dev); int rval; rval = ipu_buttress_power(dev, adev->ctrl, true); dev_dbg(dev, "%s: buttress power up %d\n", __func__, rval); if (rval) return rval; rval = pm_generic_runtime_resume(dev); dev_dbg(dev, "%s: resume %d\n", __func__, rval); if (rval) goto out_err; return 0; out_err: ipu_buttress_power(dev, adev->ctrl, false); return -EBUSY; } static const struct dev_pm_ops ipu_bus_pm_ops = { .runtime_suspend = bus_pm_runtime_suspend, .runtime_resume = bus_pm_runtime_resume, }; #define IPU_BUS_PM_OPS (&ipu_bus_pm_ops) #else #define IPU_BUS_PM_OPS NULL #endif static int ipu_bus_match(struct device *dev, struct device_driver *drv) { struct ipu_bus_driver *adrv = to_ipu_bus_driver(drv); dev_dbg(dev, "bus match: \"%s\" --- \"%s\"\n", dev_name(dev), adrv->wanted); return !strncmp(dev_name(dev), adrv->wanted, strlen(adrv->wanted)); } static int ipu_bus_probe(struct device *dev) { struct ipu_bus_device *adev = to_ipu_bus_device(dev); struct ipu_bus_driver *adrv = to_ipu_bus_driver(dev->driver); int rval; if (!adev->isp->ipu_bus_ready_to_probe) return -EPROBE_DEFER; dev_dbg(dev, "bus probe dev %s\n", dev_name(dev)); adev->adrv = adrv; if (!adrv->probe) { rval = -ENODEV; goto out_err; } rval = pm_runtime_resume_and_get(&adev->dev); if (rval < 0) { dev_err(&adev->dev, "Failed to get runtime PM\n"); goto out_err; } rval = adrv->probe(adev); pm_runtime_put(&adev->dev); if (rval) goto out_err; return 0; out_err: ipu_bus_set_drvdata(adev, NULL); adev->adrv = NULL; return rval; } #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0) static int ipu_bus_remove(struct device *dev) #else static void ipu_bus_remove(struct device *dev) #endif { struct ipu_bus_device *adev = to_ipu_bus_device(dev); struct ipu_bus_driver *adrv = to_ipu_bus_driver(dev->driver); if (adrv->remove) adrv->remove(adev); #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0) return 0; #endif } static struct bus_type ipu_bus = { .name = IPU_BUS_NAME, .match = ipu_bus_match, .probe = ipu_bus_probe, .remove = ipu_bus_remove, .pm = IPU_BUS_PM_OPS, }; static struct mutex ipu_bus_mutex; static void ipu_bus_release(struct device *dev) { struct ipu_bus_device *adev = to_ipu_bus_device(dev); kfree(adev->pdata); kfree(adev); } struct ipu_bus_device *ipu_bus_initialize_device(struct pci_dev *pdev, struct device *parent, void *pdata, struct ipu_buttress_ctrl *ctrl, char *name, unsigned int nr) { struct ipu_bus_device *adev; struct ipu_device *isp = pci_get_drvdata(pdev); adev = kzalloc(sizeof(*adev), GFP_KERNEL); if (!adev) return ERR_PTR(-ENOMEM); adev->dev.parent = parent; adev->dev.bus = &ipu_bus; adev->dev.release = ipu_bus_release; #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 16) adev->dev.dma_ops = &ipu_dma_ops; #else adev->dev.archdata.dma_ops = &ipu_dma_ops; #endif adev->dma_mask = DMA_BIT_MASK(isp->secure_mode ? IPU_MMU_ADDRESS_BITS : IPU_MMU_ADDRESS_BITS_NON_SECURE); adev->dev.dma_mask = &adev->dma_mask; adev->dev.dma_parms = pdev->dev.dma_parms; adev->dev.coherent_dma_mask = adev->dma_mask; adev->ctrl = ctrl; adev->pdata = pdata; adev->isp = isp; mutex_init(&adev->resume_lock); dev_set_name(&adev->dev, "%s%d", name, nr); device_initialize(&adev->dev); pm_runtime_forbid(&adev->dev); pm_runtime_enable(&adev->dev); return adev; } int ipu_bus_add_device(struct ipu_bus_device *adev) { int rval; rval = device_add(&adev->dev); if (rval) { put_device(&adev->dev); return rval; } mutex_lock(&ipu_bus_mutex); list_add(&adev->list, &adev->isp->devices); mutex_unlock(&ipu_bus_mutex); pm_runtime_allow(&adev->dev); return 0; } void ipu_bus_del_devices(struct pci_dev *pdev) { struct ipu_device *isp = pci_get_drvdata(pdev); struct ipu_bus_device *adev, *save; mutex_lock(&ipu_bus_mutex); list_for_each_entry_safe(adev, save, &isp->devices, list) { pm_runtime_disable(&adev->dev); list_del(&adev->list); device_unregister(&adev->dev); } mutex_unlock(&ipu_bus_mutex); } int ipu_bus_register_driver(struct ipu_bus_driver *adrv) { adrv->drv.bus = &ipu_bus; return driver_register(&adrv->drv); } EXPORT_SYMBOL(ipu_bus_register_driver); int ipu_bus_unregister_driver(struct ipu_bus_driver *adrv) { driver_unregister(&adrv->drv); return 0; } EXPORT_SYMBOL(ipu_bus_unregister_driver); int ipu_bus_register(void) { mutex_init(&ipu_bus_mutex); return bus_register(&ipu_bus); } void ipu_bus_unregister(void) { mutex_destroy(&ipu_bus_mutex); return bus_unregister(&ipu_bus); } static int flr_rpm_recovery(struct device *dev, void *p) { dev_dbg(dev, "FLR recovery call\n"); /* * We are not necessarily going through device from child to * parent. runtime PM refuses to change state for parent if the child * is still active. At FLR (full reset for whole IPU) that doesn't * matter. Everything has been power gated by HW during the FLR cycle * and we are just cleaning up SW state. Thus, ignore child during * set_suspended. */ pm_suspend_ignore_children(dev, true); pm_runtime_set_suspended(dev); pm_suspend_ignore_children(dev, false); return 0; } int ipu_bus_flr_recovery(void) { bus_for_each_dev(&ipu_bus, NULL, NULL, flr_rpm_recovery); return 0; } ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu-bus.h000066400000000000000000000035751471702545500251030ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2013 - 2024 Intel Corporation */ #ifndef IPU_BUS_H #define IPU_BUS_H #include #include #include #include #include #define IPU_BUS_NAME IPU_NAME "-bus" struct ipu_buttress_ctrl; struct ipu_subsystem_trace_config; struct ipu_bus_device { struct device dev; struct list_head list; void *pdata; struct ipu_bus_driver *adrv; struct ipu_mmu *mmu; struct ipu_device *isp; struct ipu_subsystem_trace_config *trace_cfg; struct ipu_buttress_ctrl *ctrl; u64 dma_mask; /* Protect runtime_resume calls on the dev */ struct mutex resume_lock; }; #define to_ipu_bus_device(_dev) container_of(_dev, struct ipu_bus_device, dev) struct ipu_bus_driver { struct device_driver drv; const char *wanted; int (*probe)(struct ipu_bus_device *adev); void (*remove)(struct ipu_bus_device *adev); irqreturn_t (*isr)(struct ipu_bus_device *adev); irqreturn_t (*isr_threaded)(struct ipu_bus_device *adev); bool wake_isr_thread; }; #define to_ipu_bus_driver(_drv) container_of(_drv, struct ipu_bus_driver, drv) struct ipu_bus_device *ipu_bus_initialize_device(struct pci_dev *pdev, struct device *parent, void *pdata, struct ipu_buttress_ctrl *ctrl, char *name, unsigned int nr); int ipu_bus_add_device(struct ipu_bus_device *adev); void ipu_bus_del_devices(struct pci_dev *pdev); int ipu_bus_register_driver(struct ipu_bus_driver *adrv); int ipu_bus_unregister_driver(struct ipu_bus_driver *adrv); int ipu_bus_register(void); void ipu_bus_unregister(void); #define module_ipu_bus_driver(drv) \ module_driver(drv, ipu_bus_register_driver, \ ipu_bus_unregister_driver) #define ipu_bus_set_drvdata(adev, data) dev_set_drvdata(&(adev)->dev, data) #define ipu_bus_get_drvdata(adev) dev_get_drvdata(&(adev)->dev) int ipu_bus_flr_recovery(void); #endif ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu-buttress.c000066400000000000000000001101201471702545500261410ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2013 - 2024 Intel Corporation #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ipu.h" #include "ipu-bus.h" #include "ipu-buttress.h" #include "ipu-platform-buttress-regs.h" #include "ipu-cpd.h" #define BOOTLOADER_STATUS_OFFSET 0x15c #define BOOTLOADER_MAGIC_KEY 0xb00710ad #define ENTRY BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE1 #define EXIT BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE2 #define QUERY BUTTRESS_IU2CSECSR_IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE #define BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX 10 #define BUTTRESS_CSE_BOOTLOAD_TIMEOUT 5000000 #define BUTTRESS_CSE_AUTHENTICATE_TIMEOUT 10000000 #define BUTTRESS_CSE_FWRESET_TIMEOUT 100000 #define BUTTRESS_IPC_TX_TIMEOUT 1000 #define BUTTRESS_IPC_RESET_TIMEOUT 2000 #define BUTTRESS_IPC_RX_TIMEOUT 1000 #define BUTTRESS_IPC_VALIDITY_TIMEOUT 1000000 #define BUTTRESS_TSC_SYNC_TIMEOUT 5000 #define IPU_BUTTRESS_TSC_LIMIT 500 /* 26 us @ 19.2 MHz */ #define IPU_BUTTRESS_TSC_RETRY 10 #define BUTTRESS_CSE_IPC_RESET_RETRY 4 #define BUTTRESS_IPC_CMD_SEND_RETRY 1 static const u32 ipu_adev_irq_mask[] = { BUTTRESS_ISR_IS_IRQ, BUTTRESS_ISR_PS_IRQ }; int ipu_buttress_ipc_reset(struct ipu_device *isp, struct ipu_buttress_ipc *ipc) { struct ipu_buttress *b = &isp->buttress; unsigned int retries = BUTTRESS_IPC_RESET_TIMEOUT; u32 val = 0, csr_in_clr; if (!isp->secure_mode) { dev_info(&isp->pdev->dev, "Skip ipc reset for non-secure mode"); return 0; } mutex_lock(&b->ipc_mutex); /* Clear-by-1 CSR (all bits), corresponding internal states. */ val = readl(isp->base + ipc->csr_in); writel(val, isp->base + ipc->csr_in); /* Set peer CSR bit IPC_PEER_COMP_ACTIONS_RST_PHASE1 */ writel(ENTRY, isp->base + ipc->csr_out); /* * Clear-by-1 all CSR bits EXCEPT following * bits: * A. IPC_PEER_COMP_ACTIONS_RST_PHASE1. * B. IPC_PEER_COMP_ACTIONS_RST_PHASE2. * C. Possibly custom bits, depending on * their role. */ csr_in_clr = BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ | BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID | BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ | QUERY; while (retries--) { usleep_range(400, 500); val = readl(isp->base + ipc->csr_in); switch (val) { case (ENTRY | EXIT): case (ENTRY | EXIT | QUERY): dev_dbg(&isp->pdev->dev, "%s:%s & %s\n", __func__, "IPC_PEER_COMP_ACTIONS_RST_PHASE1", "IPC_PEER_COMP_ACTIONS_RST_PHASE2"); /* * 1) Clear-by-1 CSR bits * (IPC_PEER_COMP_ACTIONS_RST_PHASE1, * IPC_PEER_COMP_ACTIONS_RST_PHASE2). * 2) Set peer CSR bit * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE. */ writel(ENTRY | EXIT, isp->base + ipc->csr_in); writel(QUERY, isp->base + ipc->csr_out); break; case ENTRY: case (ENTRY | QUERY): dev_dbg(&isp->pdev->dev, "%s:IPC_PEER_COMP_ACTIONS_RST_PHASE1\n", __func__); /* * 1) Clear-by-1 CSR bits * (IPC_PEER_COMP_ACTIONS_RST_PHASE1, * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE). * 2) Set peer CSR bit * IPC_PEER_COMP_ACTIONS_RST_PHASE1. */ writel(ENTRY | QUERY, isp->base + ipc->csr_in); writel(ENTRY, isp->base + ipc->csr_out); break; case EXIT: case (EXIT | QUERY): dev_dbg(&isp->pdev->dev, "%s: IPC_PEER_COMP_ACTIONS_RST_PHASE2\n", __func__); /* * Clear-by-1 CSR bit * IPC_PEER_COMP_ACTIONS_RST_PHASE2. * 1) Clear incoming doorbell. * 2) Clear-by-1 all CSR bits EXCEPT following * bits: * A. IPC_PEER_COMP_ACTIONS_RST_PHASE1. * B. IPC_PEER_COMP_ACTIONS_RST_PHASE2. * C. Possibly custom bits, depending on * their role. * 3) Set peer CSR bit * IPC_PEER_COMP_ACTIONS_RST_PHASE2. */ writel(EXIT, isp->base + ipc->csr_in); writel(0, isp->base + ipc->db0_in); writel(csr_in_clr, isp->base + ipc->csr_in); writel(EXIT, isp->base + ipc->csr_out); /* * Read csr_in again to make sure if RST_PHASE2 is done. * If csr_in is QUERY, it should be handled again. */ usleep_range(200, 300); val = readl(isp->base + ipc->csr_in); if (val & QUERY) { dev_dbg(&isp->pdev->dev, "%s: RST_PHASE2 retry csr_in = %x\n", __func__, val); break; } mutex_unlock(&b->ipc_mutex); return 0; case QUERY: dev_dbg(&isp->pdev->dev, "%s: %s\n", __func__, "IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE"); /* * 1) Clear-by-1 CSR bit * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE. * 2) Set peer CSR bit * IPC_PEER_COMP_ACTIONS_RST_PHASE1 */ writel(QUERY, isp->base + ipc->csr_in); writel(ENTRY, isp->base + ipc->csr_out); break; default: dev_dbg_ratelimited(&isp->pdev->dev, "%s: unexpected CSR 0x%x\n", __func__, val); break; } } mutex_unlock(&b->ipc_mutex); dev_err(&isp->pdev->dev, "Timed out while waiting for CSE\n"); return -ETIMEDOUT; } static void ipu_buttress_ipc_validity_close(struct ipu_device *isp, struct ipu_buttress_ipc *ipc) { /* Set bit 5 in CSE CSR */ writel(BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ, isp->base + ipc->csr_out); } static int ipu_buttress_ipc_validity_open(struct ipu_device *isp, struct ipu_buttress_ipc *ipc) { unsigned int mask = BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID; unsigned int tout = BUTTRESS_IPC_VALIDITY_TIMEOUT; void __iomem *addr; int ret; u32 val; /* Set bit 3 in CSE CSR */ writel(BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ, isp->base + ipc->csr_out); addr = isp->base + ipc->csr_in; ret = readl_poll_timeout(addr, val, val & mask, 200, tout); if (ret) { val = readl(addr); dev_err(&isp->pdev->dev, "CSE validity timeout 0x%x\n", val); ipu_buttress_ipc_validity_close(isp, ipc); } return ret; } static void ipu_buttress_ipc_recv(struct ipu_device *isp, struct ipu_buttress_ipc *ipc, u32 *ipc_msg) { if (ipc_msg) *ipc_msg = readl(isp->base + ipc->data0_in); writel(0, isp->base + ipc->db0_in); } static int ipu_buttress_ipc_send_bulk(struct ipu_device *isp, enum ipu_buttress_ipc_domain ipc_domain, struct ipu_ipc_buttress_bulk_msg *msgs, u32 size) { struct ipu_buttress *b = &isp->buttress; struct ipu_buttress_ipc *ipc; unsigned long tx_timeout_jiffies, rx_timeout_jiffies; u32 val; int ret; int tout; unsigned int i, retry = BUTTRESS_IPC_CMD_SEND_RETRY; ipc = ipc_domain == IPU_BUTTRESS_IPC_CSE ? &b->cse : &b->ish; mutex_lock(&b->ipc_mutex); ret = ipu_buttress_ipc_validity_open(isp, ipc); if (ret) { dev_err(&isp->pdev->dev, "IPC validity open failed\n"); goto out; } tx_timeout_jiffies = msecs_to_jiffies(BUTTRESS_IPC_TX_TIMEOUT); rx_timeout_jiffies = msecs_to_jiffies(BUTTRESS_IPC_RX_TIMEOUT); for (i = 0; i < size; i++) { reinit_completion(&ipc->send_complete); if (msgs[i].require_resp) reinit_completion(&ipc->recv_complete); dev_dbg(&isp->pdev->dev, "bulk IPC command: 0x%x\n", msgs[i].cmd); writel(msgs[i].cmd, isp->base + ipc->data0_out); val = BUTTRESS_IU2CSEDB0_BUSY | msgs[i].cmd_size; writel(val, isp->base + ipc->db0_out); tout = wait_for_completion_timeout(&ipc->send_complete, tx_timeout_jiffies); if (!tout) { dev_err(&isp->pdev->dev, "send IPC response timeout\n"); if (!retry--) { ret = -ETIMEDOUT; goto out; } /* * WORKAROUND: Sometimes CSE is not * responding on first try, try again. */ writel(0, isp->base + ipc->db0_out); i--; continue; } retry = BUTTRESS_IPC_CMD_SEND_RETRY; if (!msgs[i].require_resp) continue; tout = wait_for_completion_timeout(&ipc->recv_complete, rx_timeout_jiffies); if (!tout) { dev_err(&isp->pdev->dev, "recv IPC response timeout\n"); ret = -ETIMEDOUT; goto out; } if (ipc->nack_mask && (ipc->recv_data & ipc->nack_mask) == ipc->nack) { dev_err(&isp->pdev->dev, "IPC NACK for cmd 0x%x\n", msgs[i].cmd); ret = -ENODEV; goto out; } if (ipc->recv_data != msgs[i].expected_resp) { dev_err(&isp->pdev->dev, "expected resp: 0x%x, IPC response: 0x%x ", msgs[i].expected_resp, ipc->recv_data); ret = -EIO; goto out; } } dev_dbg(&isp->pdev->dev, "bulk IPC commands done\n"); out: ipu_buttress_ipc_validity_close(isp, ipc); mutex_unlock(&b->ipc_mutex); return ret; } static int ipu_buttress_ipc_send(struct ipu_device *isp, enum ipu_buttress_ipc_domain ipc_domain, u32 ipc_msg, u32 size, bool require_resp, u32 expected_resp) { struct ipu_ipc_buttress_bulk_msg msg = { .cmd = ipc_msg, .cmd_size = size, .require_resp = require_resp, .expected_resp = expected_resp, }; return ipu_buttress_ipc_send_bulk(isp, ipc_domain, &msg, 1); } static irqreturn_t ipu_buttress_call_isr(struct ipu_bus_device *adev) { irqreturn_t ret = IRQ_WAKE_THREAD; if (!adev || !adev->adrv) return IRQ_NONE; if (adev->adrv->isr) ret = adev->adrv->isr(adev); if (ret == IRQ_WAKE_THREAD && !adev->adrv->isr_threaded) ret = IRQ_NONE; adev->adrv->wake_isr_thread = (ret == IRQ_WAKE_THREAD); return ret; } irqreturn_t ipu_buttress_isr(int irq, void *isp_ptr) { struct ipu_device *isp = isp_ptr; struct ipu_bus_device *adev[] = { isp->isys, isp->psys }; struct ipu_buttress *b = &isp->buttress; irqreturn_t ret = IRQ_NONE; u32 disable_irqs = 0; u32 irq_status; u32 reg_irq_sts = BUTTRESS_REG_ISR_STATUS; unsigned int i; pm_runtime_get(&isp->pdev->dev); if (!pm_runtime_active(&isp->pdev->dev)) { irq_status = readl(isp->base + reg_irq_sts); writel(irq_status, isp->base + BUTTRESS_REG_ISR_CLEAR); pm_runtime_put(&isp->pdev->dev); return IRQ_HANDLED; } irq_status = readl(isp->base + reg_irq_sts); if (!irq_status) { pm_runtime_put(&isp->pdev->dev); return IRQ_NONE; } do { writel(irq_status, isp->base + BUTTRESS_REG_ISR_CLEAR); for (i = 0; i < ARRAY_SIZE(ipu_adev_irq_mask); i++) { if (irq_status & ipu_adev_irq_mask[i]) { irqreturn_t r = ipu_buttress_call_isr(adev[i]); if (r == IRQ_WAKE_THREAD) { ret = IRQ_WAKE_THREAD; disable_irqs |= ipu_adev_irq_mask[i]; } else if (ret == IRQ_NONE && r == IRQ_HANDLED) { ret = IRQ_HANDLED; } } } if (irq_status & (BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING | BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING | BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE | BUTTRESS_ISR_IPC_EXEC_DONE_BY_ISH | BUTTRESS_ISR_SAI_VIOLATION) && ret == IRQ_NONE) ret = IRQ_HANDLED; if (irq_status & BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING) { dev_dbg(&isp->pdev->dev, "BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING\n"); ipu_buttress_ipc_recv(isp, &b->cse, &b->cse.recv_data); complete(&b->cse.recv_complete); } if (irq_status & BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING) { dev_dbg(&isp->pdev->dev, "BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING\n"); ipu_buttress_ipc_recv(isp, &b->ish, &b->ish.recv_data); complete(&b->ish.recv_complete); } if (irq_status & BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE) { dev_dbg(&isp->pdev->dev, "BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE\n"); complete(&b->cse.send_complete); } if (irq_status & BUTTRESS_ISR_IPC_EXEC_DONE_BY_ISH) { dev_dbg(&isp->pdev->dev, "BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE\n"); complete(&b->ish.send_complete); } if (irq_status & BUTTRESS_ISR_SAI_VIOLATION && ipu_buttress_get_secure_mode(isp)) { dev_err(&isp->pdev->dev, "BUTTRESS_ISR_SAI_VIOLATION\n"); WARN_ON(1); } if (irq_status & (BUTTRESS_ISR_IS_FATAL_MEM_ERR | BUTTRESS_ISR_PS_FATAL_MEM_ERR)) { dev_err(&isp->pdev->dev, "BUTTRESS_ISR_FATAL_MEM_ERR\n"); } if (irq_status & BUTTRESS_ISR_UFI_ERROR) dev_err(&isp->pdev->dev, "BUTTRESS_ISR_UFI_ERROR\n"); irq_status = readl(isp->base + reg_irq_sts); } while (irq_status && !isp->flr_done); if (disable_irqs) writel(BUTTRESS_IRQS & ~disable_irqs, isp->base + BUTTRESS_REG_ISR_ENABLE); pm_runtime_put(&isp->pdev->dev); return ret; } irqreturn_t ipu_buttress_isr_threaded(int irq, void *isp_ptr) { struct ipu_device *isp = isp_ptr; struct ipu_bus_device *adev[] = { isp->isys, isp->psys }; irqreturn_t ret = IRQ_NONE; unsigned int i; dev_dbg(&isp->pdev->dev, "isr: Buttress threaded interrupt handler\n"); for (i = 0; i < ARRAY_SIZE(ipu_adev_irq_mask); i++) { if (adev[i] && adev[i]->adrv && adev[i]->adrv->wake_isr_thread && adev[i]->adrv->isr_threaded(adev[i]) == IRQ_HANDLED) ret = IRQ_HANDLED; } writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE); return ret; } int ipu_buttress_power(struct device *dev, struct ipu_buttress_ctrl *ctrl, bool on) { struct ipu_device *isp = to_ipu_bus_device(dev)->isp; u32 pwr_sts, val; int ret = 0; if (!ctrl) return 0; /* Until FLR completion nothing is expected to work */ if (isp->flr_done) return 0; mutex_lock(&isp->buttress.power_mutex); if (!on) { val = 0; pwr_sts = ctrl->pwr_sts_off << ctrl->pwr_sts_shift; } else { val = BUTTRESS_FREQ_CTL_START | ctrl->divisor << ctrl->divisor_shift | ctrl->qos_floor << BUTTRESS_FREQ_CTL_QOS_FLOOR_SHIFT | BUTTRESS_FREQ_CTL_ICCMAX_LEVEL; pwr_sts = ctrl->pwr_sts_on << ctrl->pwr_sts_shift; } writel(val, isp->base + ctrl->freq_ctl); ret = readl_poll_timeout(isp->base + BUTTRESS_REG_PWR_STATE, val, ((val & ctrl->pwr_sts_mask) == pwr_sts), 100, BUTTRESS_POWER_TIMEOUT); if (ret) dev_err(&isp->pdev->dev, "Change power status timeout with 0x%x\n", val); ctrl->started = !ret && on; mutex_unlock(&isp->buttress.power_mutex); return ret; } static bool secure_mode_enable = 1; module_param(secure_mode_enable, bool, 0660); MODULE_PARM_DESC(secure_mode, "IPU secure mode enable"); void ipu_buttress_set_secure_mode(struct ipu_device *isp) { u8 retry = 100; u32 val, read; /* * HACK to disable possible secure mode. This can be * reverted when CSE is disabling the secure mode */ read = readl(isp->base + BUTTRESS_REG_SECURITY_CTL); if (secure_mode_enable) val = read |= BUTTRESS_SECURITY_CTL_FW_SECURE_MODE; else val = read & ~BUTTRESS_SECURITY_CTL_FW_SECURE_MODE; if (val == read) return; writel(val, isp->base + BUTTRESS_REG_SECURITY_CTL); /* In B0, for some registers in buttress, because of a hw bug, write * might not succeed at first attempt. Write twice until the * write is successful */ writel(val, isp->base + BUTTRESS_REG_SECURITY_CTL); while (retry--) { read = readl(isp->base + BUTTRESS_REG_SECURITY_CTL); if (read == val) break; writel(val, isp->base + BUTTRESS_REG_SECURITY_CTL); if (retry == 0) dev_err(&isp->pdev->dev, "update security control register failed\n"); } } bool ipu_buttress_get_secure_mode(struct ipu_device *isp) { u32 val; val = readl(isp->base + BUTTRESS_REG_SECURITY_CTL); return val & BUTTRESS_SECURITY_CTL_FW_SECURE_MODE; } bool ipu_buttress_auth_done(struct ipu_device *isp) { u32 val; if (!isp->secure_mode) return 1; val = readl(isp->base + BUTTRESS_REG_SECURITY_CTL); return (val & BUTTRESS_SECURITY_CTL_FW_SETUP_MASK) == BUTTRESS_SECURITY_CTL_AUTH_DONE; } EXPORT_SYMBOL(ipu_buttress_auth_done); static void ipu_buttress_set_psys_ratio(struct ipu_device *isp, unsigned int psys_divisor, unsigned int psys_qos_floor) { struct ipu_buttress_ctrl *ctrl = isp->psys->ctrl; mutex_lock(&isp->buttress.power_mutex); if (ctrl->divisor == psys_divisor && ctrl->qos_floor == psys_qos_floor) goto out_mutex_unlock; ctrl->divisor = psys_divisor; ctrl->qos_floor = psys_qos_floor; if (ctrl->started) { /* * According to documentation driver initiates DVFS * transition by writing wanted ratio, floor ratio and start * bit. No need to stop PS first */ writel(BUTTRESS_FREQ_CTL_START | ctrl->qos_floor << BUTTRESS_FREQ_CTL_QOS_FLOOR_SHIFT | psys_divisor, isp->base + BUTTRESS_REG_PS_FREQ_CTL); } out_mutex_unlock: mutex_unlock(&isp->buttress.power_mutex); } static void ipu_buttress_set_isys_ratio(struct ipu_device *isp, unsigned int isys_divisor) { struct ipu_buttress_ctrl *ctrl = isp->isys->ctrl; mutex_lock(&isp->buttress.power_mutex); if (ctrl->divisor == isys_divisor) goto out_mutex_unlock; ctrl->divisor = isys_divisor; if (ctrl->started) { writel(BUTTRESS_FREQ_CTL_START | ctrl->qos_floor << BUTTRESS_FREQ_CTL_QOS_FLOOR_SHIFT | isys_divisor, isp->base + BUTTRESS_REG_IS_FREQ_CTL); } out_mutex_unlock: mutex_unlock(&isp->buttress.power_mutex); } static void ipu_buttress_set_psys_freq(struct ipu_device *isp, unsigned int freq) { unsigned int psys_ratio = freq / BUTTRESS_PS_FREQ_STEP; if (isp->buttress.psys_force_ratio) return; ipu_buttress_set_psys_ratio(isp, psys_ratio, psys_ratio); } void ipu_buttress_add_psys_constraint(struct ipu_device *isp, struct ipu_buttress_constraint *constraint) { struct ipu_buttress *b = &isp->buttress; mutex_lock(&b->cons_mutex); list_add(&constraint->list, &b->constraints); if (constraint->min_freq > b->psys_min_freq) { isp->buttress.psys_min_freq = min(constraint->min_freq, b->psys_fused_freqs.max_freq); ipu_buttress_set_psys_freq(isp, b->psys_min_freq); } mutex_unlock(&b->cons_mutex); } EXPORT_SYMBOL_GPL(ipu_buttress_add_psys_constraint); void ipu_buttress_remove_psys_constraint(struct ipu_device *isp, struct ipu_buttress_constraint *constraint) { struct ipu_buttress *b = &isp->buttress; struct ipu_buttress_constraint *c; unsigned int min_freq = 0; mutex_lock(&b->cons_mutex); list_del(&constraint->list); if (constraint->min_freq >= b->psys_min_freq) { list_for_each_entry(c, &b->constraints, list) if (c->min_freq > min_freq) min_freq = c->min_freq; b->psys_min_freq = clamp(min_freq, b->psys_fused_freqs.efficient_freq, b->psys_fused_freqs.max_freq); ipu_buttress_set_psys_freq(isp, b->psys_min_freq); } mutex_unlock(&b->cons_mutex); } EXPORT_SYMBOL_GPL(ipu_buttress_remove_psys_constraint); int ipu_buttress_reset_authentication(struct ipu_device *isp) { int ret; u32 val; if (!isp->secure_mode) { dev_dbg(&isp->pdev->dev, "Non-secure mode -> skip authentication\n"); return 0; } writel(BUTTRESS_FW_RESET_CTL_START, isp->base + BUTTRESS_REG_FW_RESET_CTL); ret = readl_poll_timeout(isp->base + BUTTRESS_REG_FW_RESET_CTL, val, val & BUTTRESS_FW_RESET_CTL_DONE, 500, BUTTRESS_CSE_FWRESET_TIMEOUT); if (ret) { dev_err(&isp->pdev->dev, "Time out while resetting authentication state\n"); } else { dev_info(&isp->pdev->dev, "FW reset for authentication done\n"); writel(0, isp->base + BUTTRESS_REG_FW_RESET_CTL); /* leave some time for HW restore */ usleep_range(800, 1000); } return ret; } int ipu_buttress_map_fw_image(struct ipu_bus_device *sys, const struct firmware *fw, struct sg_table *sgt) { struct page **pages; const void *addr; unsigned long n_pages; int rval, i; #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) int nents; #endif n_pages = PAGE_ALIGN(fw->size) >> PAGE_SHIFT; pages = kmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); if (!pages) return -ENOMEM; addr = fw->data; for (i = 0; i < n_pages; i++) { struct page *p = vmalloc_to_page(addr); if (!p) { rval = -ENODEV; goto out; } pages[i] = p; addr += PAGE_SIZE; } rval = sg_alloc_table_from_pages(sgt, pages, n_pages, 0, fw->size, GFP_KERNEL); if (rval) { rval = -ENOMEM; goto out; } #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) nents = dma_map_sg(&sys->dev, sgt->sgl, sgt->orig_nents, DMA_TO_DEVICE); if (!nents) { rval = -ENOMEM; sg_free_table(sgt); goto out; } sgt->nents = nents; dma_sync_sg_for_device(&sys->dev, sgt->sgl, sgt->orig_nents, DMA_TO_DEVICE); #else rval = dma_map_sgtable(&sys->dev, sgt, DMA_TO_DEVICE, 0); if (rval < 0) { rval = -ENOMEM; sg_free_table(sgt); goto out; } dma_sync_sgtable_for_device(&sys->dev, sgt, DMA_TO_DEVICE); #endif out: kfree(pages); return rval; } EXPORT_SYMBOL_GPL(ipu_buttress_map_fw_image); int ipu_buttress_unmap_fw_image(struct ipu_bus_device *sys, struct sg_table *sgt) { dma_unmap_sg(&sys->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE); sg_free_table(sgt); return 0; } EXPORT_SYMBOL_GPL(ipu_buttress_unmap_fw_image); int ipu_buttress_authenticate(struct ipu_device *isp) { struct ipu_psys_pdata *psys_pdata; struct ipu_buttress *b = &isp->buttress; u32 data, mask, done, fail; int rval; if (!isp->secure_mode) { dev_dbg(&isp->pdev->dev, "Non-secure mode -> skip authentication\n"); return 0; } psys_pdata = isp->psys->pdata; mutex_lock(&b->auth_mutex); if (ipu_buttress_auth_done(isp)) { rval = 0; goto iunit_power_off; } /* * Write address of FIT table to FW_SOURCE register * Let's use fw address. I.e. not using FIT table yet */ data = lower_32_bits(isp->pkg_dir_dma_addr); writel(data, isp->base + BUTTRESS_REG_FW_SOURCE_BASE_LO); data = upper_32_bits(isp->pkg_dir_dma_addr); writel(data, isp->base + BUTTRESS_REG_FW_SOURCE_BASE_HI); /* * Write boot_load into IU2CSEDATA0 * Write sizeof(boot_load) | 0x2 << CLIENT_ID to * IU2CSEDB.IU2CSECMD and set IU2CSEDB.IU2CSEBUSY as */ dev_info(&isp->pdev->dev, "Sending BOOT_LOAD to CSE\n"); rval = ipu_buttress_ipc_send(isp, IPU_BUTTRESS_IPC_CSE, BUTTRESS_IU2CSEDATA0_IPC_BOOT_LOAD, 1, 1, BUTTRESS_CSE2IUDATA0_IPC_BOOT_LOAD_DONE); if (rval) { dev_err(&isp->pdev->dev, "CSE boot_load failed\n"); goto iunit_power_off; } mask = BUTTRESS_SECURITY_CTL_FW_SETUP_MASK; done = BUTTRESS_SECURITY_CTL_FW_SETUP_DONE; fail = BUTTRESS_SECURITY_CTL_AUTH_FAILED; rval = readl_poll_timeout(isp->base + BUTTRESS_REG_SECURITY_CTL, data, ((data & mask) == done || (data & mask) == fail), 500, BUTTRESS_CSE_BOOTLOAD_TIMEOUT); if (rval) { dev_err(&isp->pdev->dev, "CSE boot_load timeout\n"); goto iunit_power_off; } data = readl(isp->base + BUTTRESS_REG_SECURITY_CTL) & mask; if (data == fail) { dev_err(&isp->pdev->dev, "CSE auth failed\n"); rval = -EINVAL; goto iunit_power_off; } rval = readl_poll_timeout(psys_pdata->base + BOOTLOADER_STATUS_OFFSET, data, data == BOOTLOADER_MAGIC_KEY, 500, BUTTRESS_CSE_BOOTLOAD_TIMEOUT); if (rval) { dev_err(&isp->pdev->dev, "Expect magic number timeout 0x%x\n", data); goto iunit_power_off; } /* * Write authenticate_run into IU2CSEDATA0 * Write sizeof(boot_load) | 0x2 << CLIENT_ID to * IU2CSEDB.IU2CSECMD and set IU2CSEDB.IU2CSEBUSY as */ dev_info(&isp->pdev->dev, "Sending AUTHENTICATE_RUN to CSE\n"); rval = ipu_buttress_ipc_send(isp, IPU_BUTTRESS_IPC_CSE, BUTTRESS_IU2CSEDATA0_IPC_AUTH_RUN, 1, 1, BUTTRESS_CSE2IUDATA0_IPC_AUTH_RUN_DONE); if (rval) { dev_err(&isp->pdev->dev, "CSE authenticate_run failed\n"); goto iunit_power_off; } done = BUTTRESS_SECURITY_CTL_AUTH_DONE; rval = readl_poll_timeout(isp->base + BUTTRESS_REG_SECURITY_CTL, data, ((data & mask) == done || (data & mask) == fail), 500, BUTTRESS_CSE_AUTHENTICATE_TIMEOUT); if (rval) { dev_err(&isp->pdev->dev, "CSE authenticate timeout\n"); goto iunit_power_off; } data = readl(isp->base + BUTTRESS_REG_SECURITY_CTL) & mask; if (data == fail) { dev_err(&isp->pdev->dev, "CSE boot_load failed\n"); rval = -EINVAL; goto iunit_power_off; } dev_info(&isp->pdev->dev, "CSE authenticate_run done\n"); iunit_power_off: mutex_unlock(&b->auth_mutex); return rval; } static int ipu_buttress_send_tsc_request(struct ipu_device *isp) { u32 val, mask, shift, done; int ret; mask = BUTTRESS_PWR_STATE_HH_STATUS_MASK; shift = BUTTRESS_PWR_STATE_HH_STATUS_SHIFT; writel(BUTTRESS_FABRIC_CMD_START_TSC_SYNC, isp->base + BUTTRESS_REG_FABRIC_CMD); val = readl(isp->base + BUTTRESS_REG_PWR_STATE); val = (val & mask) >> shift; if (val == BUTTRESS_PWR_STATE_HH_STATE_ERR) { dev_err(&isp->pdev->dev, "Start tsc sync failed\n"); return -EINVAL; } done = BUTTRESS_PWR_STATE_HH_STATE_DONE; ret = readl_poll_timeout(isp->base + BUTTRESS_REG_PWR_STATE, val, ((val & mask) >> shift == done), 500, BUTTRESS_TSC_SYNC_TIMEOUT); if (ret) dev_err(&isp->pdev->dev, "Start tsc sync timeout\n"); return ret; } int ipu_buttress_start_tsc_sync(struct ipu_device *isp) { unsigned int i; for (i = 0; i < BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX; i++) { int ret; ret = ipu_buttress_send_tsc_request(isp); if (ret == -ETIMEDOUT) { u32 val; /* set tsw soft reset */ val = readl(isp->base + BUTTRESS_REG_TSW_CTL); val = val | BUTTRESS_TSW_CTL_SOFT_RESET; writel(val, isp->base + BUTTRESS_REG_TSW_CTL); /* clear tsw soft reset */ val = val & (~BUTTRESS_TSW_CTL_SOFT_RESET); writel(val, isp->base + BUTTRESS_REG_TSW_CTL); continue; } return ret; } dev_err(&isp->pdev->dev, "TSC sync failed(timeout)\n"); return -ETIMEDOUT; } EXPORT_SYMBOL(ipu_buttress_start_tsc_sync); struct clk_ipu_sensor { struct ipu_device *isp; struct clk_hw hw; unsigned int id; unsigned long rate; }; #define to_clk_ipu_sensor(_hw) container_of(_hw, struct clk_ipu_sensor, hw) int ipu_buttress_tsc_read(struct ipu_device *isp, u64 *val) { u32 tsc_hi_1, tsc_hi_2, tsc_lo; unsigned long flags; local_irq_save(flags); tsc_hi_1 = readl(isp->base + BUTTRESS_REG_TSC_HI); tsc_lo = readl(isp->base + BUTTRESS_REG_TSC_LO); tsc_hi_2 = readl(isp->base + BUTTRESS_REG_TSC_HI); if (tsc_hi_1 == tsc_hi_2) { *val = (u64)tsc_hi_1 << 32 | tsc_lo; } else { /* Check if TSC has rolled over */ if (tsc_lo & BIT(31)) *val = (u64)tsc_hi_1 << 32 | tsc_lo; else *val = (u64)tsc_hi_2 << 32 | tsc_lo; } local_irq_restore(flags); return 0; } EXPORT_SYMBOL_GPL(ipu_buttress_tsc_read); int ipu_buttress_isys_freq_set(void *data, u64 val) { struct ipu_device *isp = data; int rval; if (val < BUTTRESS_MIN_FORCE_IS_FREQ || val > BUTTRESS_MAX_FORCE_IS_FREQ) return -EINVAL; rval = pm_runtime_get_sync(&isp->isys->dev); if (rval < 0) { pm_runtime_put(&isp->isys->dev); dev_err(&isp->pdev->dev, "Runtime PM failed (%d)\n", rval); return rval; } do_div(val, BUTTRESS_IS_FREQ_STEP); if (val) ipu_buttress_set_isys_ratio(isp, val); pm_runtime_put(&isp->isys->dev); return 0; } EXPORT_SYMBOL_GPL(ipu_buttress_isys_freq_set); #ifdef CONFIG_DEBUG_FS static int ipu_buttress_reg_open(struct inode *inode, struct file *file) { if (!inode->i_private) return -EACCES; file->private_data = inode->i_private; return 0; } static ssize_t ipu_buttress_reg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct debugfs_reg32 *reg = file->private_data; u8 tmp[11]; u32 val = readl((void __iomem *)reg->offset); int len = scnprintf(tmp, sizeof(tmp), "0x%08x", val); return simple_read_from_buffer(buf, len, ppos, &tmp, len); } static ssize_t ipu_buttress_reg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct debugfs_reg32 *reg = file->private_data; u32 val; int rval; rval = kstrtou32_from_user(buf, count, 0, &val); if (rval) return rval; writel(val, (void __iomem *)reg->offset); return count; } static struct debugfs_reg32 buttress_regs[] = { {"IU2CSEDB0", BUTTRESS_REG_IU2CSEDB0}, {"IU2CSEDATA0", BUTTRESS_REG_IU2CSEDATA0}, {"CSE2IUDB0", BUTTRESS_REG_CSE2IUDB0}, {"CSE2IUDATA0", BUTTRESS_REG_CSE2IUDATA0}, {"CSE2IUCSR", BUTTRESS_REG_CSE2IUCSR}, {"IU2CSECSR", BUTTRESS_REG_IU2CSECSR}, }; static const struct file_operations ipu_buttress_reg_fops = { .owner = THIS_MODULE, .open = ipu_buttress_reg_open, .read = ipu_buttress_reg_read, .write = ipu_buttress_reg_write, }; static int ipu_buttress_start_tsc_sync_set(void *data, u64 val) { struct ipu_device *isp = data; return ipu_buttress_start_tsc_sync(isp); } DEFINE_SIMPLE_ATTRIBUTE(ipu_buttress_start_tsc_sync_fops, NULL, ipu_buttress_start_tsc_sync_set, "%llu\n"); static int ipu_buttress_tsc_get(void *data, u64 *val) { return ipu_buttress_tsc_read(data, val); } DEFINE_SIMPLE_ATTRIBUTE(ipu_buttress_tsc_fops, ipu_buttress_tsc_get, NULL, "%llu\n"); static int ipu_buttress_psys_freq_get(void *data, u64 *val) { struct ipu_device *isp = data; u32 reg_val; int rval; rval = pm_runtime_get_sync(&isp->psys->dev); if (rval < 0) { pm_runtime_put(&isp->psys->dev); dev_err(&isp->pdev->dev, "Runtime PM failed (%d)\n", rval); return rval; } reg_val = readl(isp->base + BUTTRESS_REG_PS_FREQ_CTL); pm_runtime_put(&isp->psys->dev); *val = IPU_PS_FREQ_RATIO_BASE * (reg_val & IPU_BUTTRESS_PS_FREQ_CTL_DIVISOR_MASK); return 0; } static int ipu_buttress_psys_freq_set(void *data, u64 val) { struct ipu_device *isp = data; if (val && (val < BUTTRESS_MIN_FORCE_PS_FREQ || val > BUTTRESS_MAX_FORCE_PS_FREQ)) return -EINVAL; do_div(val, BUTTRESS_PS_FREQ_STEP); isp->buttress.psys_force_ratio = val; if (isp->buttress.psys_force_ratio) ipu_buttress_set_psys_ratio(isp, isp->buttress.psys_force_ratio, isp->buttress.psys_force_ratio); else ipu_buttress_set_psys_freq(isp, isp->buttress.psys_min_freq); return 0; } static int ipu_buttress_isys_freq_get(void *data, u64 *val) { struct ipu_device *isp = data; u32 reg_val; int rval; rval = pm_runtime_get_sync(&isp->isys->dev); if (rval < 0) { pm_runtime_put(&isp->isys->dev); dev_err(&isp->pdev->dev, "Runtime PM failed (%d)\n", rval); return rval; } reg_val = readl(isp->base + BUTTRESS_REG_IS_FREQ_CTL); pm_runtime_put(&isp->isys->dev); *val = IPU_IS_FREQ_RATIO_BASE * (reg_val & IPU_BUTTRESS_IS_FREQ_CTL_DIVISOR_MASK); return 0; } DEFINE_SIMPLE_ATTRIBUTE(ipu_buttress_psys_freq_fops, ipu_buttress_psys_freq_get, ipu_buttress_psys_freq_set, "%llu\n"); DEFINE_SIMPLE_ATTRIBUTE(ipu_buttress_isys_freq_fops, ipu_buttress_isys_freq_get, ipu_buttress_isys_freq_set, "%llu\n"); int ipu_buttress_debugfs_init(struct ipu_device *isp) { struct debugfs_reg32 *reg = devm_kcalloc(&isp->pdev->dev, ARRAY_SIZE(buttress_regs), sizeof(*reg), GFP_KERNEL); struct dentry *dir, *file; int i; if (!reg) return -ENOMEM; dir = debugfs_create_dir("buttress", isp->ipu_dir); if (!dir) return -ENOMEM; for (i = 0; i < ARRAY_SIZE(buttress_regs); i++, reg++) { reg->offset = (unsigned long)isp->base + buttress_regs[i].offset; reg->name = buttress_regs[i].name; file = debugfs_create_file(reg->name, 0700, dir, reg, &ipu_buttress_reg_fops); if (!file) goto err; } file = debugfs_create_file("start_tsc_sync", 0200, dir, isp, &ipu_buttress_start_tsc_sync_fops); if (!file) goto err; file = debugfs_create_file("tsc", 0400, dir, isp, &ipu_buttress_tsc_fops); if (!file) goto err; file = debugfs_create_file("psys_freq", 0700, dir, isp, &ipu_buttress_psys_freq_fops); if (!file) goto err; file = debugfs_create_file("isys_freq", 0700, dir, isp, &ipu_buttress_isys_freq_fops); if (!file) goto err; return 0; err: debugfs_remove_recursive(dir); return -ENOMEM; } #endif /* CONFIG_DEBUG_FS */ u64 ipu_buttress_tsc_ticks_to_ns(u64 ticks, const struct ipu_device *isp) { u64 ns = ticks * 10000; /* * converting TSC tick count to ns is calculated by: * Example (TSC clock frequency is 19.2MHz): * ns = ticks * 1000 000 000 / 19.2Mhz * = ticks * 1000 000 000 / 19200000Hz * = ticks * 10000 / 192 ns */ do_div(ns, isp->buttress.ref_clk); return ns; } EXPORT_SYMBOL_GPL(ipu_buttress_tsc_ticks_to_ns); static ssize_t psys_fused_min_freq_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ipu_device *isp = pci_get_drvdata(to_pci_dev(dev)); return snprintf(buf, PAGE_SIZE, "%u\n", isp->buttress.psys_fused_freqs.min_freq); } static DEVICE_ATTR_RO(psys_fused_min_freq); static ssize_t psys_fused_max_freq_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ipu_device *isp = pci_get_drvdata(to_pci_dev(dev)); return snprintf(buf, PAGE_SIZE, "%u\n", isp->buttress.psys_fused_freqs.max_freq); } static DEVICE_ATTR_RO(psys_fused_max_freq); static ssize_t psys_fused_efficient_freq_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ipu_device *isp = pci_get_drvdata(to_pci_dev(dev)); return snprintf(buf, PAGE_SIZE, "%u\n", isp->buttress.psys_fused_freqs.efficient_freq); } static DEVICE_ATTR_RO(psys_fused_efficient_freq); int ipu_buttress_restore(struct ipu_device *isp) { struct ipu_buttress *b = &isp->buttress; writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_CLEAR); writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE); writel(b->wdt_cached_value, isp->base + BUTTRESS_REG_WDT); return 0; } int ipu_buttress_init(struct ipu_device *isp) { struct ipu_buttress *b = &isp->buttress; u32 val; int rval, ipc_reset_retry = BUTTRESS_CSE_IPC_RESET_RETRY; mutex_init(&b->power_mutex); mutex_init(&b->auth_mutex); mutex_init(&b->cons_mutex); mutex_init(&b->ipc_mutex); init_completion(&b->ish.send_complete); init_completion(&b->cse.send_complete); init_completion(&b->ish.recv_complete); init_completion(&b->cse.recv_complete); b->cse.nack = BUTTRESS_CSE2IUDATA0_IPC_NACK; b->cse.nack_mask = BUTTRESS_CSE2IUDATA0_IPC_NACK_MASK; b->cse.csr_in = BUTTRESS_REG_CSE2IUCSR; b->cse.csr_out = BUTTRESS_REG_IU2CSECSR; b->cse.db0_in = BUTTRESS_REG_CSE2IUDB0; b->cse.db0_out = BUTTRESS_REG_IU2CSEDB0; b->cse.data0_in = BUTTRESS_REG_CSE2IUDATA0; b->cse.data0_out = BUTTRESS_REG_IU2CSEDATA0; /* no ISH on IPU6 */ memset(&b->ish, 0, sizeof(b->ish)); INIT_LIST_HEAD(&b->constraints); ipu_buttress_set_secure_mode(isp); isp->secure_mode = ipu_buttress_get_secure_mode(isp); if (isp->secure_mode != secure_mode_enable) dev_warn(&isp->pdev->dev, "Unable to set secure mode\n"); dev_info(&isp->pdev->dev, "IPU in %s mode\n", isp->secure_mode ? "secure" : "non-secure"); dev_info(&isp->pdev->dev, "IPU secure touch = 0x%x\n", readl(isp->base + BUTTRESS_REG_SECURITY_TOUCH)); dev_info(&isp->pdev->dev, "IPU camera mask = 0x%x\n", readl(isp->base + BUTTRESS_REG_CAMERA_MASK)); b->wdt_cached_value = readl(isp->base + BUTTRESS_REG_WDT); writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_CLEAR); writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE); /* get ref_clk frequency by reading the indication in btrs control */ val = readl(isp->base + BUTTRESS_REG_BTRS_CTRL); val &= BUTTRESS_REG_BTRS_CTRL_REF_CLK_IND; val >>= 8; switch (val) { case 0x0: b->ref_clk = 240; break; case 0x1: b->ref_clk = 192; break; case 0x2: b->ref_clk = 384; break; default: dev_warn(&isp->pdev->dev, "Unsupported ref clock, use 19.2Mhz by default.\n"); b->ref_clk = 192; break; } rval = device_create_file(&isp->pdev->dev, &dev_attr_psys_fused_min_freq); if (rval) { dev_err(&isp->pdev->dev, "Create min freq file failed\n"); goto err_mutex_destroy; } rval = device_create_file(&isp->pdev->dev, &dev_attr_psys_fused_max_freq); if (rval) { dev_err(&isp->pdev->dev, "Create max freq file failed\n"); goto err_remove_min_freq_file; } rval = device_create_file(&isp->pdev->dev, &dev_attr_psys_fused_efficient_freq); if (rval) { dev_err(&isp->pdev->dev, "Create efficient freq file failed\n"); goto err_remove_max_freq_file; } /* * We want to retry couple of time in case CSE initialization * is delayed for reason or another. */ do { rval = ipu_buttress_ipc_reset(isp, &b->cse); if (rval) { dev_warn(&isp->pdev->dev, "IPC reset protocol failed, retrying\n"); } else { dev_info(&isp->pdev->dev, "IPC reset done\n"); return 0; } } while (ipc_reset_retry--); dev_err(&isp->pdev->dev, "IPC reset protocol failed\n"); err_remove_max_freq_file: device_remove_file(&isp->pdev->dev, &dev_attr_psys_fused_max_freq); err_remove_min_freq_file: device_remove_file(&isp->pdev->dev, &dev_attr_psys_fused_min_freq); err_mutex_destroy: mutex_destroy(&b->power_mutex); mutex_destroy(&b->auth_mutex); mutex_destroy(&b->cons_mutex); mutex_destroy(&b->ipc_mutex); return rval; } void ipu_buttress_exit(struct ipu_device *isp) { struct ipu_buttress *b = &isp->buttress; writel(0, isp->base + BUTTRESS_REG_ISR_ENABLE); device_remove_file(&isp->pdev->dev, &dev_attr_psys_fused_efficient_freq); device_remove_file(&isp->pdev->dev, &dev_attr_psys_fused_max_freq); device_remove_file(&isp->pdev->dev, &dev_attr_psys_fused_min_freq); mutex_destroy(&b->power_mutex); mutex_destroy(&b->auth_mutex); mutex_destroy(&b->cons_mutex); mutex_destroy(&b->ipc_mutex); } ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu-buttress.h000066400000000000000000000070211471702545500261530ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2013 - 2024 Intel Corporation */ #ifndef IPU_BUTTRESS_H #define IPU_BUTTRESS_H #include #include #include "ipu.h" #define IPU_BUTTRESS_NUM_OF_SENS_CKS 3 #define IPU_BUTTRESS_NUM_OF_PLL_CKS 3 #define IPU_BUTTRESS_TSC_CLK 19200000 #define BUTTRESS_POWER_TIMEOUT 200000 #define BUTTRESS_PS_FREQ_STEP 25U #define BUTTRESS_MIN_FORCE_PS_FREQ (BUTTRESS_PS_FREQ_STEP * 8) #define BUTTRESS_MAX_FORCE_PS_FREQ (BUTTRESS_PS_FREQ_STEP * 32) #define BUTTRESS_IS_FREQ_STEP 25U #define BUTTRESS_MIN_FORCE_IS_FREQ (BUTTRESS_IS_FREQ_STEP * 8) #define BUTTRESS_MAX_FORCE_IS_FREQ (BUTTRESS_IS_FREQ_STEP * 22) struct ipu_buttress_ctrl { u32 freq_ctl, pwr_sts_shift, pwr_sts_mask, pwr_sts_on, pwr_sts_off; union { unsigned int divisor; unsigned int ratio; }; union { unsigned int divisor_shift; unsigned int ratio_shift; }; unsigned int qos_floor; bool started; }; struct ipu_buttress_fused_freqs { unsigned int min_freq; unsigned int max_freq; unsigned int efficient_freq; }; struct ipu_buttress_ipc { struct completion send_complete; struct completion recv_complete; u32 nack; u32 nack_mask; u32 recv_data; u32 csr_out; u32 csr_in; u32 db0_in; u32 db0_out; u32 data0_out; u32 data0_in; }; struct ipu_buttress { struct mutex power_mutex, auth_mutex, cons_mutex, ipc_mutex; struct ipu_buttress_ipc cse; struct ipu_buttress_ipc ish; struct list_head constraints; struct ipu_buttress_fused_freqs psys_fused_freqs; unsigned int psys_min_freq; u32 wdt_cached_value; u8 psys_force_ratio; bool force_suspend; u32 ref_clk; }; struct ipu_buttress_sensor_clk_freq { unsigned int rate; unsigned int val; }; struct firmware; enum ipu_buttress_ipc_domain { IPU_BUTTRESS_IPC_CSE, IPU_BUTTRESS_IPC_ISH, }; struct ipu_buttress_constraint { struct list_head list; unsigned int min_freq; }; struct ipu_ipc_buttress_bulk_msg { u32 cmd; u32 expected_resp; bool require_resp; u8 cmd_size; }; int ipu_buttress_ipc_reset(struct ipu_device *isp, struct ipu_buttress_ipc *ipc); int ipu_buttress_map_fw_image(struct ipu_bus_device *sys, const struct firmware *fw, struct sg_table *sgt); int ipu_buttress_unmap_fw_image(struct ipu_bus_device *sys, struct sg_table *sgt); int ipu_buttress_power(struct device *dev, struct ipu_buttress_ctrl *ctrl, bool on); void ipu_buttress_add_psys_constraint(struct ipu_device *isp, struct ipu_buttress_constraint *constraint); void ipu_buttress_remove_psys_constraint(struct ipu_device *isp, struct ipu_buttress_constraint *constraint); void ipu_buttress_set_secure_mode(struct ipu_device *isp); bool ipu_buttress_get_secure_mode(struct ipu_device *isp); int ipu_buttress_authenticate(struct ipu_device *isp); int ipu_buttress_reset_authentication(struct ipu_device *isp); bool ipu_buttress_auth_done(struct ipu_device *isp); int ipu_buttress_start_tsc_sync(struct ipu_device *isp); int ipu_buttress_tsc_read(struct ipu_device *isp, u64 *val); u64 ipu_buttress_tsc_ticks_to_ns(u64 ticks, const struct ipu_device *isp); irqreturn_t ipu_buttress_isr(int irq, void *isp_ptr); irqreturn_t ipu_buttress_isr_threaded(int irq, void *isp_ptr); int ipu_buttress_debugfs_init(struct ipu_device *isp); int ipu_buttress_init(struct ipu_device *isp); void ipu_buttress_exit(struct ipu_device *isp); void ipu_buttress_csi_port_config(struct ipu_device *isp, u32 legacy, u32 combo); int ipu_buttress_restore(struct ipu_device *isp); int ipu_buttress_isys_freq_set(void *data, u64 val); #endif /* IPU_BUTTRESS_H */ ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu-cpd.c000066400000000000000000000311371471702545500250460ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2015 - 2024 Intel Corporation #include #include #include "ipu.h" #include "ipu-cpd.h" /* 15 entries + header*/ #define MAX_PKG_DIR_ENT_CNT 16 /* 2 qword per entry/header */ #define PKG_DIR_ENT_LEN 2 /* PKG_DIR size in bytes */ #define PKG_DIR_SIZE ((MAX_PKG_DIR_ENT_CNT) * \ (PKG_DIR_ENT_LEN) * sizeof(u64)) #define PKG_DIR_ID_SHIFT 48 #define PKG_DIR_ID_MASK 0x7f #define PKG_DIR_VERSION_SHIFT 32 #define PKG_DIR_SIZE_MASK 0xfffff /* _IUPKDR_ */ #define PKG_DIR_HDR_MARK 0x5f4955504b44525f /* $CPD */ #define CPD_HDR_MARK 0x44504324 /* Maximum size is 2K DWORDs */ #define MAX_MANIFEST_SIZE (2 * 1024 * sizeof(u32)) /* Maximum size is 64k */ #define MAX_METADATA_SIZE (64 * 1024) #define MAX_COMPONENT_ID 127 #define MAX_COMPONENT_VERSION 0xffff #define CPD_MANIFEST_IDX 0 #define CPD_METADATA_IDX 1 #define CPD_MODULEDATA_IDX 2 static inline struct ipu_cpd_ent *ipu_cpd_get_entries(const void *cpd) { const struct ipu_cpd_hdr *cpd_hdr = cpd; return (struct ipu_cpd_ent *)((u8 *)cpd + cpd_hdr->hdr_len); } #define ipu_cpd_get_entry(cpd, idx) (&ipu_cpd_get_entries(cpd)[idx]) #define ipu_cpd_get_manifest(cpd) ipu_cpd_get_entry(cpd, CPD_MANIFEST_IDX) #define ipu_cpd_get_metadata(cpd) ipu_cpd_get_entry(cpd, CPD_METADATA_IDX) #define ipu_cpd_get_moduledata(cpd) ipu_cpd_get_entry(cpd, CPD_MODULEDATA_IDX) static const struct ipu_cpd_metadata_cmpnt * ipu_cpd_metadata_get_cmpnt(struct ipu_device *isp, const void *metadata, unsigned int metadata_size, u8 idx) { const struct ipu_cpd_metadata_extn *extn; const struct ipu_cpd_metadata_cmpnt *cmpnts; int cmpnt_count; extn = metadata; cmpnts = metadata + sizeof(*extn); cmpnt_count = (metadata_size - sizeof(*extn)) / sizeof(*cmpnts); if (idx > MAX_COMPONENT_ID || idx >= cmpnt_count) { dev_err(&isp->pdev->dev, "Component index out of range (%d)\n", idx); return ERR_PTR(-EINVAL); } return &cmpnts[idx]; } static u32 ipu_cpd_metadata_cmpnt_version(struct ipu_device *isp, const void *metadata, unsigned int metadata_size, u8 idx) { const struct ipu_cpd_metadata_cmpnt *cmpnt = ipu_cpd_metadata_get_cmpnt(isp, metadata, metadata_size, idx); if (IS_ERR(cmpnt)) return PTR_ERR(cmpnt); return cmpnt->ver; } static int ipu_cpd_metadata_get_cmpnt_id(struct ipu_device *isp, const void *metadata, unsigned int metadata_size, u8 idx) { const struct ipu_cpd_metadata_cmpnt *cmpnt = ipu_cpd_metadata_get_cmpnt(isp, metadata, metadata_size, idx); if (IS_ERR(cmpnt)) return PTR_ERR(cmpnt); return cmpnt->id; } static const struct ipu6_cpd_metadata_cmpnt * ipu6_cpd_metadata_get_cmpnt(struct ipu_device *isp, const void *metadata, unsigned int metadata_size, u8 idx) { const struct ipu_cpd_metadata_extn *extn = metadata; const struct ipu6_cpd_metadata_cmpnt *cmpnts = metadata + sizeof(*extn); int cmpnt_count; cmpnt_count = (metadata_size - sizeof(*extn)) / sizeof(*cmpnts); if (idx > MAX_COMPONENT_ID || idx >= cmpnt_count) { dev_err(&isp->pdev->dev, "Component index out of range (%d)\n", idx); return ERR_PTR(-EINVAL); } return &cmpnts[idx]; } static u32 ipu6_cpd_metadata_cmpnt_version(struct ipu_device *isp, const void *metadata, unsigned int metadata_size, u8 idx) { const struct ipu6_cpd_metadata_cmpnt *cmpnt = ipu6_cpd_metadata_get_cmpnt(isp, metadata, metadata_size, idx); if (IS_ERR(cmpnt)) return PTR_ERR(cmpnt); return cmpnt->ver; } static int ipu6_cpd_metadata_get_cmpnt_id(struct ipu_device *isp, const void *metadata, unsigned int metadata_size, u8 idx) { const struct ipu6_cpd_metadata_cmpnt *cmpnt = ipu6_cpd_metadata_get_cmpnt(isp, metadata, metadata_size, idx); if (IS_ERR(cmpnt)) return PTR_ERR(cmpnt); return cmpnt->id; } static int ipu_cpd_parse_module_data(struct ipu_device *isp, const void *module_data, unsigned int module_data_size, dma_addr_t dma_addr_module_data, u64 *pkg_dir, const void *metadata, unsigned int metadata_size) { const struct ipu_cpd_module_data_hdr *module_data_hdr; const struct ipu_cpd_hdr *dir_hdr; const struct ipu_cpd_ent *dir_ent; int i; u8 len; if (!module_data) return -EINVAL; module_data_hdr = module_data; dir_hdr = module_data + module_data_hdr->hdr_len; len = dir_hdr->hdr_len; dir_ent = (struct ipu_cpd_ent *)(((u8 *)dir_hdr) + len); pkg_dir[0] = PKG_DIR_HDR_MARK; /* pkg_dir entry count = component count + pkg_dir header */ pkg_dir[1] = dir_hdr->ent_cnt + 1; for (i = 0; i < dir_hdr->ent_cnt; i++, dir_ent++) { u64 *p = &pkg_dir[PKG_DIR_ENT_LEN + i * PKG_DIR_ENT_LEN]; int ver, id; *p++ = dma_addr_module_data + dir_ent->offset; if (ipu_ver == IPU_VER_6 || ipu_ver == IPU_VER_6EP || ipu_ver == IPU_VER_6EP_MTL) id = ipu6_cpd_metadata_get_cmpnt_id(isp, metadata, metadata_size, i); else id = ipu_cpd_metadata_get_cmpnt_id(isp, metadata, metadata_size, i); if (id < 0 || id > MAX_COMPONENT_ID) { dev_err(&isp->pdev->dev, "Failed to parse component id\n"); return -EINVAL; } if (ipu_ver == IPU_VER_6 || ipu_ver == IPU_VER_6EP || ipu_ver == IPU_VER_6EP_MTL) ver = ipu6_cpd_metadata_cmpnt_version(isp, metadata, metadata_size, i); else ver = ipu_cpd_metadata_cmpnt_version(isp, metadata, metadata_size, i); if (ver < 0 || ver > MAX_COMPONENT_VERSION) { dev_err(&isp->pdev->dev, "Failed to parse component version\n"); return -EINVAL; } /* * PKG_DIR Entry (type == id) * 63:56 55 54:48 47:32 31:24 23:0 * Rsvd Rsvd Type Version Rsvd Size */ *p = dir_ent->len | (u64)id << PKG_DIR_ID_SHIFT | (u64)ver << PKG_DIR_VERSION_SHIFT; } return 0; } void *ipu_cpd_create_pkg_dir(struct ipu_bus_device *adev, const void *src, dma_addr_t dma_addr_src, dma_addr_t *dma_addr, unsigned int *pkg_dir_size) { struct ipu_device *isp = adev->isp; const struct ipu_cpd_ent *ent, *man_ent, *met_ent; u64 *pkg_dir; unsigned int man_sz, met_sz; void *pkg_dir_pos; int ret; man_ent = ipu_cpd_get_manifest(src); man_sz = man_ent->len; met_ent = ipu_cpd_get_metadata(src); met_sz = met_ent->len; *pkg_dir_size = PKG_DIR_SIZE + man_sz + met_sz; pkg_dir = dma_alloc_attrs(&adev->dev, *pkg_dir_size, dma_addr, GFP_KERNEL, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) NULL); #else 0); #endif if (!pkg_dir) return pkg_dir; /* * pkg_dir entry/header: * qword | 63:56 | 55 | 54:48 | 47:32 | 31:24 | 23:0 * N Address/Offset/"_IUPKDR_" * N + 1 | rsvd | rsvd | type | ver | rsvd | size * * We can ignore other fields that size in N + 1 qword as they * are 0 anyway. Just setting size for now. */ ent = ipu_cpd_get_moduledata(src); ret = ipu_cpd_parse_module_data(isp, src + ent->offset, ent->len, dma_addr_src + ent->offset, pkg_dir, src + met_ent->offset, met_ent->len); if (ret) { dev_err(&isp->pdev->dev, "Unable to parse module data section!\n"); dma_free_attrs(&adev->dev, *pkg_dir_size, pkg_dir, *dma_addr, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) NULL); #else 0); #endif return NULL; } /* Copy manifest after pkg_dir */ pkg_dir_pos = pkg_dir + PKG_DIR_ENT_LEN * MAX_PKG_DIR_ENT_CNT; memcpy(pkg_dir_pos, src + man_ent->offset, man_sz); /* Copy metadata after manifest */ pkg_dir_pos += man_sz; memcpy(pkg_dir_pos, src + met_ent->offset, met_sz); dma_sync_single_range_for_device(&adev->dev, *dma_addr, 0, *pkg_dir_size, DMA_TO_DEVICE); return pkg_dir; } EXPORT_SYMBOL_GPL(ipu_cpd_create_pkg_dir); void ipu_cpd_free_pkg_dir(struct ipu_bus_device *adev, u64 *pkg_dir, dma_addr_t dma_addr, unsigned int pkg_dir_size) { #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) dma_free_attrs(&adev->dev, pkg_dir_size, pkg_dir, dma_addr, NULL); #else dma_free_attrs(&adev->dev, pkg_dir_size, pkg_dir, dma_addr, 0); #endif } EXPORT_SYMBOL_GPL(ipu_cpd_free_pkg_dir); static int ipu_cpd_validate_cpd(struct ipu_device *isp, const void *cpd, unsigned long cpd_size, unsigned long data_size) { const struct ipu_cpd_hdr *cpd_hdr = cpd; struct ipu_cpd_ent *ent; unsigned int i; u8 len; len = cpd_hdr->hdr_len; /* Ensure cpd hdr is within moduledata */ if (cpd_size < len) { dev_err(&isp->pdev->dev, "Invalid CPD moduledata size\n"); return -EINVAL; } /* Sanity check for CPD header */ if ((cpd_size - len) / sizeof(*ent) < cpd_hdr->ent_cnt) { dev_err(&isp->pdev->dev, "Invalid CPD header\n"); return -EINVAL; } /* Ensure that all entries are within moduledata */ ent = (struct ipu_cpd_ent *)(((u8 *)cpd_hdr) + len); for (i = 0; i < cpd_hdr->ent_cnt; i++, ent++) { if (data_size < ent->offset || data_size - ent->offset < ent->len) { dev_err(&isp->pdev->dev, "Invalid CPD entry (%d)\n", i); return -EINVAL; } } return 0; } static int ipu_cpd_validate_moduledata(struct ipu_device *isp, const void *moduledata, u32 moduledata_size) { const struct ipu_cpd_module_data_hdr *mod_hdr = moduledata; int rval; /* Ensure moduledata hdr is within moduledata */ if (moduledata_size < sizeof(*mod_hdr) || moduledata_size < mod_hdr->hdr_len) { dev_err(&isp->pdev->dev, "Invalid moduledata size\n"); return -EINVAL; } dev_info(&isp->pdev->dev, "FW version: %x\n", mod_hdr->fw_pkg_date); rval = ipu_cpd_validate_cpd(isp, moduledata + mod_hdr->hdr_len, moduledata_size - mod_hdr->hdr_len, moduledata_size); if (rval) { dev_err(&isp->pdev->dev, "Invalid CPD in moduledata\n"); return -EINVAL; } return 0; } static int ipu_cpd_validate_metadata(struct ipu_device *isp, const void *metadata, u32 meta_size) { const struct ipu_cpd_metadata_extn *extn = metadata; unsigned int size; /* Sanity check for metadata size */ if (meta_size < sizeof(*extn) || meta_size > MAX_METADATA_SIZE) { dev_err(&isp->pdev->dev, "%s: Invalid metadata\n", __func__); return -EINVAL; } /* Validate extension and image types */ if (extn->extn_type != IPU_CPD_METADATA_EXTN_TYPE_IUNIT || extn->img_type != IPU_CPD_METADATA_IMAGE_TYPE_MAIN_FIRMWARE) { dev_err(&isp->pdev->dev, "Invalid metadata descriptor img_type (%d)\n", extn->img_type); return -EINVAL; } /* Validate metadata size multiple of metadata components */ if (ipu_ver == IPU_VER_6 || ipu_ver == IPU_VER_6EP || ipu_ver == IPU_VER_6EP_MTL) size = sizeof(struct ipu6_cpd_metadata_cmpnt); else size = sizeof(struct ipu_cpd_metadata_cmpnt); if ((meta_size - sizeof(*extn)) % size) { dev_err(&isp->pdev->dev, "%s: Invalid metadata size\n", __func__); return -EINVAL; } return 0; } int ipu_cpd_validate_cpd_file(struct ipu_device *isp, const void *cpd_file, unsigned long cpd_file_size) { const struct ipu_cpd_hdr *hdr = cpd_file; struct ipu_cpd_ent *ent; int rval; rval = ipu_cpd_validate_cpd(isp, cpd_file, cpd_file_size, cpd_file_size); if (rval) { dev_err(&isp->pdev->dev, "Invalid CPD in file\n"); return -EINVAL; } /* Check for CPD file marker */ if (hdr->hdr_mark != CPD_HDR_MARK) { dev_err(&isp->pdev->dev, "Invalid CPD header\n"); return -EINVAL; } /* Sanity check for manifest size */ ent = ipu_cpd_get_manifest(cpd_file); if (ent->len > MAX_MANIFEST_SIZE) { dev_err(&isp->pdev->dev, "Invalid manifest size\n"); return -EINVAL; } /* Validate metadata */ ent = ipu_cpd_get_metadata(cpd_file); rval = ipu_cpd_validate_metadata(isp, cpd_file + ent->offset, ent->len); if (rval) { dev_err(&isp->pdev->dev, "Invalid metadata\n"); return rval; } /* Validate moduledata */ ent = ipu_cpd_get_moduledata(cpd_file); rval = ipu_cpd_validate_moduledata(isp, cpd_file + ent->offset, ent->len); if (rval) { dev_err(&isp->pdev->dev, "Invalid moduledata\n"); return rval; } return 0; } EXPORT_SYMBOL_GPL(ipu_cpd_validate_cpd_file); unsigned int ipu_cpd_pkg_dir_get_address(const u64 *pkg_dir, int pkg_dir_idx) { return pkg_dir[++pkg_dir_idx * PKG_DIR_ENT_LEN]; } EXPORT_SYMBOL_GPL(ipu_cpd_pkg_dir_get_address); unsigned int ipu_cpd_pkg_dir_get_num_entries(const u64 *pkg_dir) { return pkg_dir[1]; } EXPORT_SYMBOL_GPL(ipu_cpd_pkg_dir_get_num_entries); unsigned int ipu_cpd_pkg_dir_get_size(const u64 *pkg_dir, int pkg_dir_idx) { return pkg_dir[++pkg_dir_idx * PKG_DIR_ENT_LEN + 1] & PKG_DIR_SIZE_MASK; } EXPORT_SYMBOL_GPL(ipu_cpd_pkg_dir_get_size); unsigned int ipu_cpd_pkg_dir_get_type(const u64 *pkg_dir, int pkg_dir_idx) { return pkg_dir[++pkg_dir_idx * PKG_DIR_ENT_LEN + 1] >> PKG_DIR_ID_SHIFT & PKG_DIR_ID_MASK; } EXPORT_SYMBOL_GPL(ipu_cpd_pkg_dir_get_type); ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu-cpd.h000066400000000000000000000053511471702545500250520ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2015 - 2024 Intel Corporation */ #ifndef IPU_CPD_H #define IPU_CPD_H #define IPU_CPD_SIZE_OF_FW_ARCH_VERSION 7 #define IPU_CPD_SIZE_OF_SYSTEM_VERSION 11 #define IPU_CPD_SIZE_OF_COMPONENT_NAME 12 #define IPU_CPD_METADATA_EXTN_TYPE_IUNIT 0x10 #define IPU_CPD_METADATA_IMAGE_TYPE_RESERVED 0 #define IPU_CPD_METADATA_IMAGE_TYPE_BOOTLOADER 1 #define IPU_CPD_METADATA_IMAGE_TYPE_MAIN_FIRMWARE 2 #define IPU_CPD_PKG_DIR_PSYS_SERVER_IDX 0 #define IPU_CPD_PKG_DIR_ISYS_SERVER_IDX 1 #define IPU_CPD_PKG_DIR_CLIENT_PG_TYPE 3 #define IPU6_CPD_METADATA_HASH_KEY_SIZE 48 #define IPU_CPD_METADATA_HASH_KEY_SIZE 32 struct __packed ipu_cpd_module_data_hdr { u32 hdr_len; u32 endian; u32 fw_pkg_date; u32 hive_sdk_date; u32 compiler_date; u32 target_platform_type; u8 sys_ver[IPU_CPD_SIZE_OF_SYSTEM_VERSION]; u8 fw_arch_ver[IPU_CPD_SIZE_OF_FW_ARCH_VERSION]; u8 rsvd[2]; }; /* ipu_cpd_hdr structure updated as the chksum and * sub_partition_name is unused on host side * CSE layout version 1.6 for ipu6se (hdr_len = 0x10) * CSE layout version 1.7 for ipu6 (hdr_len = 0x14) */ struct __packed ipu_cpd_hdr { u32 hdr_mark; u32 ent_cnt; u8 hdr_ver; u8 ent_ver; u8 hdr_len; }; struct __packed ipu_cpd_ent { u8 name[IPU_CPD_SIZE_OF_COMPONENT_NAME]; u32 offset; u32 len; u8 rsvd[4]; }; struct __packed ipu_cpd_metadata_cmpnt { u32 id; u32 size; u32 ver; u8 sha2_hash[IPU_CPD_METADATA_HASH_KEY_SIZE]; u32 entry_point; u32 icache_base_offs; u8 attrs[16]; }; struct __packed ipu6_cpd_metadata_cmpnt { u32 id; u32 size; u32 ver; u8 sha2_hash[IPU6_CPD_METADATA_HASH_KEY_SIZE]; u32 entry_point; u32 icache_base_offs; u8 attrs[16]; }; struct __packed ipu_cpd_metadata_extn { u32 extn_type; u32 len; u32 img_type; u8 rsvd[16]; }; struct __packed ipu_cpd_client_pkg_hdr { u32 prog_list_offs; u32 prog_list_size; u32 prog_desc_offs; u32 prog_desc_size; u32 pg_manifest_offs; u32 pg_manifest_size; u32 prog_bin_offs; u32 prog_bin_size; }; void *ipu_cpd_create_pkg_dir(struct ipu_bus_device *adev, const void *src, dma_addr_t dma_addr_src, dma_addr_t *dma_addr, unsigned int *pkg_dir_size); void ipu_cpd_free_pkg_dir(struct ipu_bus_device *adev, u64 *pkg_dir, dma_addr_t dma_addr, unsigned int pkg_dir_size); int ipu_cpd_validate_cpd_file(struct ipu_device *isp, const void *cpd_file, unsigned long cpd_file_size); unsigned int ipu_cpd_pkg_dir_get_address(const u64 *pkg_dir, int pkg_dir_idx); unsigned int ipu_cpd_pkg_dir_get_num_entries(const u64 *pkg_dir); unsigned int ipu_cpd_pkg_dir_get_size(const u64 *pkg_dir, int pkg_dir_idx); unsigned int ipu_cpd_pkg_dir_get_type(const u64 *pkg_dir, int pkg_dir_idx); #endif /* IPU_CPD_H */ ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu-dma.c000066400000000000000000000330651471702545500250430ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2013 - 2024 Intel Corporation #include #include #include #include #include #include #include #include #include #include #include #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) #include #endif #include "ipu-dma.h" #include "ipu-bus.h" #include "ipu-mmu.h" struct vm_info { struct list_head list; struct page **pages; dma_addr_t ipu_iova; void *vaddr; unsigned long size; }; static struct vm_info *get_vm_info(struct ipu_mmu *mmu, dma_addr_t iova) { struct vm_info *info, *save; list_for_each_entry_safe(info, save, &mmu->vma_list, list) { if (iova >= info->ipu_iova && iova < (info->ipu_iova + info->size)) return info; } return NULL; } /* Begin of things adapted from arch/arm/mm/dma-mapping.c */ static void __dma_clear_buffer(struct page *page, size_t size, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) struct dma_attrs *attrs) #else unsigned long attrs) #endif { /* * Ensure that the allocated pages are zeroed, and that any data * lurking in the kernel direct-mapped region is invalidated. */ void *ptr = page_address(page); memset(ptr, 0, size); #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) clflush_cache_range(ptr, size); #else if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) clflush_cache_range(ptr, size); #endif } static struct page **__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) struct dma_attrs *attrs) #else unsigned long attrs) #endif { struct page **pages; int count = size >> PAGE_SHIFT; int array_size = count * sizeof(struct page *); int i = 0; pages = kvzalloc(array_size, GFP_KERNEL); if (!pages) return NULL; gfp |= __GFP_NOWARN; while (count) { int j, order = __fls(count); pages[i] = alloc_pages(gfp, order); while (!pages[i] && order) pages[i] = alloc_pages(gfp, --order); if (!pages[i]) goto error; if (order) { split_page(pages[i], order); j = 1 << order; while (--j) pages[i + j] = pages[i] + j; } __dma_clear_buffer(pages[i], PAGE_SIZE << order, attrs); i += 1 << order; count -= 1 << order; } return pages; error: while (i--) if (pages[i]) __free_pages(pages[i], 0); kvfree(pages); return NULL; } static int __dma_free_buffer(struct device *dev, struct page **pages, size_t size, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) struct dma_attrs *attrs) #else unsigned long attrs) #endif { int count = size >> PAGE_SHIFT; int i; for (i = 0; i < count; i++) { if (pages[i]) { __free_pages(pages[i], 0); } } kvfree(pages); return 0; } /* End of things adapted from arch/arm/mm/dma-mapping.c */ static void ipu_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) { void *vaddr; u32 offset; struct vm_info *info; struct ipu_mmu *mmu = to_ipu_bus_device(dev)->mmu; info = get_vm_info(mmu, dma_handle); if (WARN_ON(!info)) return; offset = dma_handle - info->ipu_iova; if (WARN_ON(size > (info->size - offset))) return; vaddr = info->vaddr + offset; clflush_cache_range(vaddr, size); } static void ipu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction dir) { struct scatterlist *sg; int i; for_each_sg(sglist, sg, nents, i) clflush_cache_range(page_to_virt(sg_page(sg)), sg->length); } static void *ipu_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) struct dma_attrs *attrs) #else unsigned long attrs) #endif { struct ipu_mmu *mmu = to_ipu_bus_device(dev)->mmu; struct pci_dev *pdev = to_ipu_bus_device(dev)->isp->pdev; struct page **pages; struct iova *iova; struct vm_info *info; int i; int rval; unsigned long count; dma_addr_t pci_dma_addr, ipu_iova; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return NULL; size = PAGE_ALIGN(size); count = size >> PAGE_SHIFT; iova = alloc_iova(&mmu->dmap->iovad, count, dma_get_mask(dev) >> PAGE_SHIFT, 0); if (!iova) goto out_kfree; pages = __dma_alloc_buffer(dev, size, gfp, attrs); if (!pages) goto out_free_iova; dev_dbg(dev, "dma_alloc: iova low pfn %lu, high pfn %lu\n", iova->pfn_lo, iova->pfn_hi); for (i = 0; iova->pfn_lo + i <= iova->pfn_hi; i++) { pci_dma_addr = dma_map_page_attrs(&pdev->dev, pages[i], 0, PAGE_SIZE, DMA_BIDIRECTIONAL, attrs); dev_dbg(dev, "dma_alloc: mapped pci_dma_addr %pad\n", &pci_dma_addr); if (dma_mapping_error(&pdev->dev, pci_dma_addr)) { dev_err(dev, "pci_dma_mapping for page[%d] failed", i); goto out_unmap; } rval = ipu_mmu_map(mmu->dmap->mmu_info, (iova->pfn_lo + i) << PAGE_SHIFT, pci_dma_addr, PAGE_SIZE); if (rval) { dev_err(dev, "ipu_mmu_map for pci_dma[%d] %pad failed", i, &pci_dma_addr); dma_unmap_page_attrs(&pdev->dev, pci_dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL, attrs); goto out_unmap; } } info->vaddr = vmap(pages, count, VM_USERMAP, PAGE_KERNEL); if (!info->vaddr) goto out_unmap; *dma_handle = iova->pfn_lo << PAGE_SHIFT; info->pages = pages; info->ipu_iova = *dma_handle; info->size = size; list_add(&info->list, &mmu->vma_list); return info->vaddr; out_unmap: for (i--; i >= 0; i--) { ipu_iova = (iova->pfn_lo + i) << PAGE_SHIFT; pci_dma_addr = ipu_mmu_iova_to_phys(mmu->dmap->mmu_info, ipu_iova); dma_unmap_page_attrs(&pdev->dev, pci_dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL, attrs); ipu_mmu_unmap(mmu->dmap->mmu_info, ipu_iova, PAGE_SIZE); } __dma_free_buffer(dev, pages, size, attrs); out_free_iova: __free_iova(&mmu->dmap->iovad, iova); out_kfree: kfree(info); return NULL; } static void ipu_dma_free(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) struct dma_attrs *attrs) #else unsigned long attrs) #endif { struct ipu_mmu *mmu = to_ipu_bus_device(dev)->mmu; struct pci_dev *pdev = to_ipu_bus_device(dev)->isp->pdev; struct page **pages; struct vm_info *info; struct iova *iova = find_iova(&mmu->dmap->iovad, dma_handle >> PAGE_SHIFT); dma_addr_t pci_dma_addr, ipu_iova; int i; if (WARN_ON(!iova)) return; info = get_vm_info(mmu, dma_handle); if (WARN_ON(!info)) return; if (WARN_ON(!info->vaddr)) return; if (WARN_ON(!info->pages)) return; list_del(&info->list); size = PAGE_ALIGN(size); pages = info->pages; vunmap(vaddr); for (i = 0; i < size >> PAGE_SHIFT; i++) { ipu_iova = (iova->pfn_lo + i) << PAGE_SHIFT; pci_dma_addr = ipu_mmu_iova_to_phys(mmu->dmap->mmu_info, ipu_iova); dma_unmap_page_attrs(&pdev->dev, pci_dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL, attrs); } ipu_mmu_unmap(mmu->dmap->mmu_info, iova->pfn_lo << PAGE_SHIFT, iova_size(iova) << PAGE_SHIFT); __dma_free_buffer(dev, pages, size, attrs); mmu->tlb_invalidate(mmu); __free_iova(&mmu->dmap->iovad, iova); kfree(info); } #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0) static int ipu_dma_mmap(struct device *dev, struct vm_area_struct *vma, void *addr, dma_addr_t iova, size_t size, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) struct dma_attrs *attrs) #else unsigned long attrs) #endif { struct ipu_mmu *mmu = to_ipu_bus_device(dev)->mmu; struct vm_info *info; size_t count = PAGE_ALIGN(size) >> PAGE_SHIFT; size_t i; int ret; info = get_vm_info(mmu, iova); if (!info) return -EFAULT; if (!info->vaddr) return -EFAULT; if (vma->vm_start & ~PAGE_MASK) return -EINVAL; if (size > info->size) return -EFAULT; for (i = 0; i < count; i++) { ret = vm_insert_page(vma, vma->vm_start + (i << PAGE_SHIFT), info->pages[i]); if (ret < 0) return ret; } return 0; } #else static int ipu_dma_mmap(struct device *dev, struct vm_area_struct *vma, void *addr, dma_addr_t iova, size_t size, unsigned long attrs) { struct ipu_mmu *mmu = to_ipu_bus_device(dev)->mmu; struct vm_info *info; size_t count = PAGE_ALIGN(size) >> PAGE_SHIFT; info = get_vm_info(mmu, iova); if (!info) return -EFAULT; if (!info->vaddr) return -EFAULT; if (vma->vm_start & ~PAGE_MASK) return -EINVAL; if (size > info->size) return -EFAULT; return vm_insert_pages(vma, vma->vm_start, info->pages, &count); } #endif static void ipu_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction dir, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) struct dma_attrs *attrs) #else unsigned long attrs) #endif { int i, npages, count; struct scatterlist *sg; dma_addr_t pci_dma_addr; struct ipu_mmu *mmu = to_ipu_bus_device(dev)->mmu; struct pci_dev *pdev = to_ipu_bus_device(dev)->isp->pdev; struct iova *iova = find_iova(&mmu->dmap->iovad, sg_dma_address(sglist) >> PAGE_SHIFT); if (!nents) return; if (WARN_ON(!iova)) return; #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) #else if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) #endif ipu_dma_sync_sg_for_cpu(dev, sglist, nents, DMA_BIDIRECTIONAL); /* get the nents as orig_nents given by caller */ count = 0; npages = iova_size(iova); for_each_sg(sglist, sg, nents, i) { if (sg_dma_len(sg) == 0 || sg_dma_address(sg) == DMA_MAPPING_ERROR) break; npages -= PAGE_ALIGN(sg_dma_len(sg)) >> PAGE_SHIFT; count++; if (npages <= 0) break; } /* before ipu mmu unmap, return the pci dma address back to sg * assume the nents is less than orig_nents as the least granule * is 1 SZ_4K page */ dev_dbg(dev, "trying to unmap concatenated %u ents\n", count); for_each_sg(sglist, sg, count, i) { dev_dbg(dev, "ipu_unmap sg[%d] %pad\n", i, &sg_dma_address(sg)); pci_dma_addr = ipu_mmu_iova_to_phys(mmu->dmap->mmu_info, sg_dma_address(sg)); dev_dbg(dev, "return pci_dma_addr %pad back to sg[%d]\n", &pci_dma_addr, i); sg_dma_address(sg) = pci_dma_addr; } dev_dbg(dev, "ipu_mmu_unmap low pfn %lu high pfn %lu\n", iova->pfn_lo, iova->pfn_hi); ipu_mmu_unmap(mmu->dmap->mmu_info, iova->pfn_lo << PAGE_SHIFT, iova_size(iova) << PAGE_SHIFT); mmu->tlb_invalidate(mmu); dma_unmap_sg_attrs(&pdev->dev, sglist, nents, dir, attrs); __free_iova(&mmu->dmap->iovad, iova); } static int ipu_dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction dir, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) struct dma_attrs *attrs) #else unsigned long attrs) #endif { struct ipu_mmu *mmu = to_ipu_bus_device(dev)->mmu; struct pci_dev *pdev = to_ipu_bus_device(dev)->isp->pdev; struct scatterlist *sg; struct iova *iova; size_t npages = 0; unsigned long iova_addr; int i, count; dev_dbg(dev, "pci_dma_map_sg trying to map %d ents\n", nents); count = dma_map_sg_attrs(&pdev->dev, sglist, nents, dir, attrs); if (count <= 0) { dev_err(dev, "pci_dma_map_sg %d ents failed\n", nents); return 0; } dev_dbg(dev, "pci_dma_map_sg %d ents mapped\n", count); for_each_sg(sglist, sg, count, i) npages += PAGE_ALIGN(sg_dma_len(sg)) >> PAGE_SHIFT; iova = alloc_iova(&mmu->dmap->iovad, npages, dma_get_mask(dev) >> PAGE_SHIFT, 0); if (!iova) return 0; dev_dbg(dev, "dmamap: iova low pfn %lu, high pfn %lu\n", iova->pfn_lo, iova->pfn_hi); iova_addr = iova->pfn_lo; for_each_sg(sglist, sg, count, i) { int rval; dev_dbg(dev, "mapping entry %d: iova 0x%lx phy %pad size %d\n", i, iova_addr << PAGE_SHIFT, &sg_dma_address(sg), sg_dma_len(sg)); dev_dbg(dev, "mapping entry %d: sg->length = %d\n", i, sg->length); rval = ipu_mmu_map(mmu->dmap->mmu_info, iova_addr << PAGE_SHIFT, sg_dma_address(sg), PAGE_ALIGN(sg_dma_len(sg))); if (rval) goto out_fail; sg_dma_address(sg) = iova_addr << PAGE_SHIFT; iova_addr += PAGE_ALIGN(sg_dma_len(sg)) >> PAGE_SHIFT; } #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) #else if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) #endif ipu_dma_sync_sg_for_cpu(dev, sglist, nents, DMA_BIDIRECTIONAL); mmu->tlb_invalidate(mmu); return count; out_fail: ipu_dma_unmap_sg(dev, sglist, i, dir, attrs); return 0; } /* * Create scatter-list for the already allocated DMA buffer */ static int ipu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t handle, size_t size, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) struct dma_attrs *attrs) #else unsigned long attrs) #endif { struct ipu_mmu *mmu = to_ipu_bus_device(dev)->mmu; struct vm_info *info; int n_pages; int ret = 0; info = get_vm_info(mmu, handle); if (!info) return -EFAULT; if (!info->vaddr) return -EFAULT; if (WARN_ON(!info->pages)) return -ENOMEM; n_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; ret = sg_alloc_table_from_pages(sgt, info->pages, n_pages, 0, size, GFP_KERNEL); if (ret) dev_warn(dev, "IPU get sgt table fail\n"); return ret; } const struct dma_map_ops ipu_dma_ops = { .alloc = ipu_dma_alloc, .free = ipu_dma_free, .mmap = ipu_dma_mmap, .map_sg = ipu_dma_map_sg, .unmap_sg = ipu_dma_unmap_sg, .sync_single_for_cpu = ipu_dma_sync_single_for_cpu, .sync_single_for_device = ipu_dma_sync_single_for_cpu, .sync_sg_for_cpu = ipu_dma_sync_sg_for_cpu, .sync_sg_for_device = ipu_dma_sync_sg_for_cpu, .get_sgtable = ipu_dma_get_sgtable, }; ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu-dma.h000066400000000000000000000005351471702545500250440ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2013 - 2024 Intel Corporation */ #ifndef IPU_DMA_H #define IPU_DMA_H #include struct ipu_mmu_info; struct ipu_dma_mapping { struct ipu_mmu_info *mmu_info; struct iova_domain iovad; struct kref ref; }; extern const struct dma_map_ops ipu_dma_ops; #endif /* IPU_DMA_H */ ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu-fw-com.c000066400000000000000000000331211471702545500254630ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2013 - 2024 Intel Corporation #include #include #include #include #include #include #include "ipu.h" #include "ipu-trace.h" #include "ipu-fw-com.h" #include "ipu-bus.h" /* * FWCOM layer is a shared resource between FW and driver. It consist * of token queues to both send and receive directions. Queue is simply * an array of structures with read and write indexes to the queue. * There are 1...n queues to both directions. Queues locates in * system ram and are mapped to ISP MMU so that both CPU and ISP can * see the same buffer. Indexes are located in ISP DMEM so that FW code * can poll those with very low latency and cost. CPU access to indexes is * more costly but that happens only at message sending time and * interrupt trigged message handling. CPU doesn't need to poll indexes. * wr_reg / rd_reg are offsets to those dmem location. They are not * the indexes itself. */ /* Shared structure between driver and FW - do not modify */ struct ipu_fw_sys_queue { u64 host_address; u32 vied_address; u32 size; u32 token_size; u32 wr_reg; /* reg no in subsystem's regmem */ u32 rd_reg; u32 _align; }; struct ipu_fw_sys_queue_res { u64 host_address; u32 vied_address; u32 reg; }; enum syscom_state { /* Program load or explicit host setting should init to this */ SYSCOM_STATE_UNINIT = 0x57A7E000, /* SP Syscom sets this when it is ready for use */ SYSCOM_STATE_READY = 0x57A7E001, /* SP Syscom sets this when no more syscom accesses will happen */ SYSCOM_STATE_INACTIVE = 0x57A7E002 }; enum syscom_cmd { /* Program load or explicit host setting should init to this */ SYSCOM_COMMAND_UNINIT = 0x57A7F000, /* Host Syscom requests syscom to become inactive */ SYSCOM_COMMAND_INACTIVE = 0x57A7F001 }; /* firmware config: data that sent from the host to SP via DDR */ /* Cell copies data into a context */ struct ipu_fw_syscom_config { u32 firmware_address; u32 num_input_queues; u32 num_output_queues; /* ISP pointers to an array of ipu_fw_sys_queue structures */ u32 input_queue; u32 output_queue; /* ISYS / PSYS private data */ u32 specific_addr; u32 specific_size; }; /* End of shared structures / data */ struct ipu_fw_com_context { struct ipu_bus_device *adev; void __iomem *dmem_addr; int (*cell_ready)(struct ipu_bus_device *adev); void (*cell_start)(struct ipu_bus_device *adev); void *dma_buffer; dma_addr_t dma_addr; unsigned int dma_size; unsigned long attrs; unsigned int num_input_queues; unsigned int num_output_queues; struct ipu_fw_sys_queue *input_queue; /* array of host to SP queues */ struct ipu_fw_sys_queue *output_queue; /* array of SP to host */ void *config_host_addr; void *specific_host_addr; u64 ibuf_host_addr; u64 obuf_host_addr; u32 config_vied_addr; u32 input_queue_vied_addr; u32 output_queue_vied_addr; u32 specific_vied_addr; u32 ibuf_vied_addr; u32 obuf_vied_addr; unsigned int buttress_boot_offset; void __iomem *base_addr; }; #define FW_COM_WR_REG 0 #define FW_COM_RD_REG 4 #define REGMEM_OFFSET 0 #define TUNIT_MAGIC_PATTERN 0x5a5a5a5a enum regmem_id { /* pass pkg_dir address to SPC in non-secure mode */ PKG_DIR_ADDR_REG = 0, /* Tunit CFG blob for secure - provided by host.*/ TUNIT_CFG_DWR_REG = 1, /* syscom commands - modified by the host */ SYSCOM_COMMAND_REG = 2, /* Store interrupt status - updated by SP */ SYSCOM_IRQ_REG = 3, /* first syscom queue pointer register */ SYSCOM_QPR_BASE_REG = 4 }; enum message_direction { DIR_RECV = 0, DIR_SEND }; #define BUTRESS_FW_BOOT_PARAMS_0 0x4000 #define BUTTRESS_FW_BOOT_PARAM_REG(base, offset, id) ((base) \ + BUTRESS_FW_BOOT_PARAMS_0 + ((offset) + (id)) * 4) enum buttress_syscom_id { /* pass syscom configuration to SPC */ SYSCOM_CONFIG_ID = 0, /* syscom state - modified by SP */ SYSCOM_STATE_ID = 1, /* syscom vtl0 addr mask */ SYSCOM_VTL0_ADDR_MASK_ID = 2, SYSCOM_ID_MAX }; static unsigned int num_messages(unsigned int wr, unsigned int rd, unsigned int size) { if (wr < rd) wr += size; return wr - rd; } static unsigned int num_free(unsigned int wr, unsigned int rd, unsigned int size) { return size - num_messages(wr, rd, size); } static unsigned int curr_index(void __iomem *q_dmem, enum message_direction dir) { return readl(q_dmem + (dir == DIR_RECV ? FW_COM_RD_REG : FW_COM_WR_REG)); } static unsigned int inc_index(void __iomem *q_dmem, struct ipu_fw_sys_queue *q, enum message_direction dir) { unsigned int index; index = curr_index(q_dmem, dir) + 1; return index >= q->size ? 0 : index; } static unsigned int ipu_sys_queue_buf_size(unsigned int size, unsigned int token_size) { return (size + 1) * token_size; } static void ipu_sys_queue_init(struct ipu_fw_sys_queue *q, unsigned int size, unsigned int token_size, struct ipu_fw_sys_queue_res *res) { unsigned int buf_size; q->size = size + 1; q->token_size = token_size; buf_size = ipu_sys_queue_buf_size(size, token_size); /* acquire the shared buffer space */ q->host_address = res->host_address; res->host_address += buf_size; q->vied_address = res->vied_address; res->vied_address += buf_size; /* acquire the shared read and writer pointers */ q->wr_reg = res->reg; res->reg++; q->rd_reg = res->reg; res->reg++; } void *ipu_fw_com_prepare(struct ipu_fw_com_cfg *cfg, struct ipu_bus_device *adev, void __iomem *base) { struct ipu_fw_com_context *ctx; struct ipu_fw_syscom_config *fw_cfg; unsigned int i; unsigned int sizeall, offset; unsigned int sizeinput = 0, sizeoutput = 0; unsigned long attrs = 0; struct ipu_fw_sys_queue_res res; /* error handling */ if (!cfg || !cfg->cell_start || !cfg->cell_ready) return NULL; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return NULL; ctx->dmem_addr = base + cfg->dmem_addr + REGMEM_OFFSET; ctx->adev = adev; ctx->cell_start = cfg->cell_start; ctx->cell_ready = cfg->cell_ready; ctx->buttress_boot_offset = cfg->buttress_boot_offset; ctx->base_addr = base; ctx->num_input_queues = cfg->num_input_queues; ctx->num_output_queues = cfg->num_output_queues; /* * Allocate DMA mapped memory. Allocate one big chunk. */ sizeall = /* Base cfg for FW */ roundup(sizeof(struct ipu_fw_syscom_config), 8) + /* Descriptions of the queues */ cfg->num_input_queues * sizeof(struct ipu_fw_sys_queue) + cfg->num_output_queues * sizeof(struct ipu_fw_sys_queue) + /* FW specific information structure */ roundup(cfg->specific_size, 8); for (i = 0; i < cfg->num_input_queues; i++) sizeinput += ipu_sys_queue_buf_size(cfg->input[i].queue_size, cfg->input[i].token_size); for (i = 0; i < cfg->num_output_queues; i++) sizeoutput += ipu_sys_queue_buf_size(cfg->output[i].queue_size, cfg->output[i].token_size); sizeall += sizeinput + sizeoutput; ctx->dma_buffer = dma_alloc_attrs(&ctx->adev->dev, sizeall, &ctx->dma_addr, GFP_KERNEL, #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) attrs); ctx->attrs = attrs; #else NULL); #endif if (!ctx->dma_buffer) { dev_err(&ctx->adev->dev, "failed to allocate dma memory\n"); kfree(ctx); return NULL; } ctx->dma_size = sizeall; /* This is the address where FW starts to parse allocations */ ctx->config_host_addr = ctx->dma_buffer; ctx->config_vied_addr = ctx->dma_addr; fw_cfg = (struct ipu_fw_syscom_config *)ctx->config_host_addr; offset = roundup(sizeof(struct ipu_fw_syscom_config), 8); ctx->input_queue = ctx->dma_buffer + offset; ctx->input_queue_vied_addr = ctx->dma_addr + offset; offset += cfg->num_input_queues * sizeof(struct ipu_fw_sys_queue); ctx->output_queue = ctx->dma_buffer + offset; ctx->output_queue_vied_addr = ctx->dma_addr + offset; offset += cfg->num_output_queues * sizeof(struct ipu_fw_sys_queue); ctx->specific_host_addr = ctx->dma_buffer + offset; ctx->specific_vied_addr = ctx->dma_addr + offset; offset += roundup(cfg->specific_size, 8); ctx->ibuf_host_addr = (uintptr_t)(ctx->dma_buffer + offset); ctx->ibuf_vied_addr = ctx->dma_addr + offset; offset += sizeinput; ctx->obuf_host_addr = (uintptr_t)(ctx->dma_buffer + offset); ctx->obuf_vied_addr = ctx->dma_addr + offset; offset += sizeoutput; /* initialize input queues */ res.reg = SYSCOM_QPR_BASE_REG; res.host_address = ctx->ibuf_host_addr; res.vied_address = ctx->ibuf_vied_addr; for (i = 0; i < cfg->num_input_queues; i++) { ipu_sys_queue_init(ctx->input_queue + i, cfg->input[i].queue_size, cfg->input[i].token_size, &res); } /* initialize output queues */ res.host_address = ctx->obuf_host_addr; res.vied_address = ctx->obuf_vied_addr; for (i = 0; i < cfg->num_output_queues; i++) { ipu_sys_queue_init(ctx->output_queue + i, cfg->output[i].queue_size, cfg->output[i].token_size, &res); } /* copy firmware specific data */ if (cfg->specific_addr && cfg->specific_size) { memcpy((void *)ctx->specific_host_addr, cfg->specific_addr, cfg->specific_size); } fw_cfg->num_input_queues = cfg->num_input_queues; fw_cfg->num_output_queues = cfg->num_output_queues; fw_cfg->input_queue = ctx->input_queue_vied_addr; fw_cfg->output_queue = ctx->output_queue_vied_addr; fw_cfg->specific_addr = ctx->specific_vied_addr; fw_cfg->specific_size = cfg->specific_size; return ctx; } EXPORT_SYMBOL_GPL(ipu_fw_com_prepare); int ipu_fw_com_open(struct ipu_fw_com_context *ctx) { dma_addr_t trace_buff = TUNIT_MAGIC_PATTERN; /* * Write trace buff start addr to tunit cfg reg. * This feature is used to enable tunit trace in secure mode. */ ipu_trace_buffer_dma_handle(&ctx->adev->dev, &trace_buff); writel(trace_buff, ctx->dmem_addr + TUNIT_CFG_DWR_REG * 4); /* Check if SP is in valid state */ if (!ctx->cell_ready(ctx->adev)) return -EIO; /* store syscom uninitialized command */ writel(SYSCOM_COMMAND_UNINIT, ctx->dmem_addr + SYSCOM_COMMAND_REG * 4); /* store syscom uninitialized state */ writel(SYSCOM_STATE_UNINIT, BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr, ctx->buttress_boot_offset, SYSCOM_STATE_ID)); /* store firmware configuration address */ writel(ctx->config_vied_addr, BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr, ctx->buttress_boot_offset, SYSCOM_CONFIG_ID)); ctx->cell_start(ctx->adev); return 0; } EXPORT_SYMBOL_GPL(ipu_fw_com_open); int ipu_fw_com_close(struct ipu_fw_com_context *ctx) { int state; state = readl(BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr, ctx->buttress_boot_offset, SYSCOM_STATE_ID)); if (state != SYSCOM_STATE_READY) return -EBUSY; /* set close request flag */ writel(SYSCOM_COMMAND_INACTIVE, ctx->dmem_addr + SYSCOM_COMMAND_REG * 4); return 0; } EXPORT_SYMBOL_GPL(ipu_fw_com_close); int ipu_fw_com_release(struct ipu_fw_com_context *ctx, unsigned int force) { /* check if release is forced, an verify cell state if it is not */ if (!force && !ctx->cell_ready(ctx->adev)) return -EBUSY; dma_free_attrs(&ctx->adev->dev, ctx->dma_size, ctx->dma_buffer, ctx->dma_addr, #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) ctx->attrs); #else NULL); #endif kfree(ctx); return 0; } EXPORT_SYMBOL_GPL(ipu_fw_com_release); int ipu_fw_com_ready(struct ipu_fw_com_context *ctx) { int state; state = readl(BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr, ctx->buttress_boot_offset, SYSCOM_STATE_ID)); if (state != SYSCOM_STATE_READY) return -EBUSY; /* SPC is not ready to handle messages yet */ return 0; } EXPORT_SYMBOL_GPL(ipu_fw_com_ready); static bool is_index_valid(struct ipu_fw_sys_queue *q, unsigned int index) { if (index >= q->size) return false; return true; } void *ipu_send_get_token(struct ipu_fw_com_context *ctx, int q_nbr) { struct ipu_fw_sys_queue *q = &ctx->input_queue[q_nbr]; void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4; unsigned int wr, rd; unsigned int packets; unsigned int index; wr = readl(q_dmem + FW_COM_WR_REG); rd = readl(q_dmem + FW_COM_RD_REG); /* Catch indexes in dmem */ if (!is_index_valid(q, wr) || !is_index_valid(q, rd)) return NULL; packets = num_free(wr + 1, rd, q->size); if (!packets) return NULL; index = curr_index(q_dmem, DIR_SEND); return (void *)(unsigned long)q->host_address + (index * q->token_size); } EXPORT_SYMBOL_GPL(ipu_send_get_token); void ipu_send_put_token(struct ipu_fw_com_context *ctx, int q_nbr) { struct ipu_fw_sys_queue *q = &ctx->input_queue[q_nbr]; void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4; int index = curr_index(q_dmem, DIR_SEND); /* Increment index */ index = inc_index(q_dmem, q, DIR_SEND); writel(index, q_dmem + FW_COM_WR_REG); } EXPORT_SYMBOL_GPL(ipu_send_put_token); void *ipu_recv_get_token(struct ipu_fw_com_context *ctx, int q_nbr) { struct ipu_fw_sys_queue *q = &ctx->output_queue[q_nbr]; void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4; unsigned int wr, rd; unsigned int packets; void *addr; wr = readl(q_dmem + FW_COM_WR_REG); rd = readl(q_dmem + FW_COM_RD_REG); /* Catch indexes in dmem? */ if (!is_index_valid(q, wr) || !is_index_valid(q, rd)) return NULL; packets = num_messages(wr, rd, q->size); if (!packets) return NULL; addr = (void *)(unsigned long)q->host_address + (rd * q->token_size); return addr; } EXPORT_SYMBOL_GPL(ipu_recv_get_token); void ipu_recv_put_token(struct ipu_fw_com_context *ctx, int q_nbr) { struct ipu_fw_sys_queue *q = &ctx->output_queue[q_nbr]; void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4; unsigned int rd = inc_index(q_dmem, q, DIR_RECV); /* Release index */ writel(rd, q_dmem + FW_COM_RD_REG); } EXPORT_SYMBOL_GPL(ipu_recv_put_token); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Intel ipu fw comm library"); ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu-fw-com.h000066400000000000000000000027141471702545500254740ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2013 - 2024 Intel Corporation */ #ifndef IPU_FW_COM_H #define IPU_FW_COM_H struct ipu_fw_com_context; struct ipu_bus_device; struct ipu_fw_syscom_queue_config { unsigned int queue_size; /* tokens per queue */ unsigned int token_size; /* bytes per token */ }; #define SYSCOM_BUTTRESS_FW_PARAMS_ISYS_OFFSET 0 #define SYSCOM_BUTTRESS_FW_PARAMS_PSYS_OFFSET 7 struct ipu_fw_com_cfg { unsigned int num_input_queues; unsigned int num_output_queues; struct ipu_fw_syscom_queue_config *input; struct ipu_fw_syscom_queue_config *output; unsigned int dmem_addr; /* firmware-specific configuration data */ void *specific_addr; unsigned int specific_size; int (*cell_ready)(struct ipu_bus_device *adev); void (*cell_start)(struct ipu_bus_device *adev); unsigned int buttress_boot_offset; }; void *ipu_fw_com_prepare(struct ipu_fw_com_cfg *cfg, struct ipu_bus_device *adev, void __iomem *base); int ipu_fw_com_open(struct ipu_fw_com_context *ctx); int ipu_fw_com_ready(struct ipu_fw_com_context *ctx); int ipu_fw_com_close(struct ipu_fw_com_context *ctx); int ipu_fw_com_release(struct ipu_fw_com_context *ctx, unsigned int force); void *ipu_recv_get_token(struct ipu_fw_com_context *ctx, int q_nbr); void ipu_recv_put_token(struct ipu_fw_com_context *ctx, int q_nbr); void *ipu_send_get_token(struct ipu_fw_com_context *ctx, int q_nbr); void ipu_send_put_token(struct ipu_fw_com_context *ctx, int q_nbr); #endif ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu-fw-isys.c000066400000000000000000000462621471702545500257060ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2013 - 2024 Intel Corporation #include #include #include #include "ipu.h" #include "ipu-trace.h" #include "ipu-platform-regs.h" #include "ipu-platform.h" #include "ipu-fw-isys.h" #include "ipu-fw-com.h" #include "ipu-isys.h" #define IPU_FW_UNSUPPORTED_DATA_TYPE 0 static const uint32_t extracted_bits_per_pixel_per_mipi_data_type[N_IPU_FW_ISYS_MIPI_DATA_TYPE] = { 64, /* [0x00] IPU_FW_ISYS_MIPI_DATA_TYPE_FRAME_START_CODE */ 64, /* [0x01] IPU_FW_ISYS_MIPI_DATA_TYPE_FRAME_END_CODE */ 64, /* [0x02] IPU_FW_ISYS_MIPI_DATA_TYPE_LINE_START_CODE */ 64, /* [0x03] IPU_FW_ISYS_MIPI_DATA_TYPE_LINE_END_CODE */ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x04] */ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x05] */ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x06] */ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x07] */ 64, /* [0x08] IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT1 */ 64, /* [0x09] IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT2 */ 64, /* [0x0A] IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT3 */ 64, /* [0x0B] IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT4 */ 64, /* [0x0C] IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT5 */ 64, /* [0x0D] IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT6 */ 64, /* [0x0E] IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT7 */ 64, /* [0x0F] IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT8 */ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x10] */ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x11] */ 8, /* [0x12] IPU_FW_ISYS_MIPI_DATA_TYPE_EMBEDDED */ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x13] */ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x14] */ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x15] */ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x16] */ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x17] */ 12, /* [0x18] IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_8 */ 15, /* [0x19] IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_10 */ 12, /* [0x1A] IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_8_LEGACY */ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x1B] */ 12, /* [0x1C] IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_8_SHIFT */ 15, /* [0x1D] IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_10_SHIFT */ 16, /* [0x1E] IPU_FW_ISYS_MIPI_DATA_TYPE_YUV422_8 */ 20, /* [0x1F] IPU_FW_ISYS_MIPI_DATA_TYPE_YUV422_10 */ 16, /* [0x20] IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_444 */ 16, /* [0x21] IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_555 */ 16, /* [0x22] IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_565 */ 18, /* [0x23] IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_666 */ 24, /* [0x24] IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_888 */ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x25] */ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x26] */ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x27] */ 6, /* [0x28] IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_6 */ 7, /* [0x29] IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_7 */ 8, /* [0x2A] IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_8 */ 10, /* [0x2B] IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_10 */ 12, /* [0x2C] IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_12 */ 14, /* [0x2D] IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_14 */ 16, /* [0x2E] IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_16 */ 8, /* [0x2F] IPU_FW_ISYS_MIPI_DATA_TYPE_BINARY_8 */ 8, /* [0x30] IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF1 */ 8, /* [0x31] IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF2 */ 8, /* [0x32] IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF3 */ 8, /* [0x33] IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF4 */ 8, /* [0x34] IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF5 */ 8, /* [0x35] IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF6 */ 8, /* [0x36] IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF7 */ 8, /* [0x37] IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF8 */ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x38] */ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x39] */ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x3A] */ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x3B] */ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x3C] */ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x3D] */ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x3E] */ IPU_FW_UNSUPPORTED_DATA_TYPE /* [0x3F] */ }; static const char send_msg_types[N_IPU_FW_ISYS_SEND_TYPE][32] = { "STREAM_OPEN", "STREAM_START", "STREAM_START_AND_CAPTURE", "STREAM_CAPTURE", "STREAM_STOP", "STREAM_FLUSH", "STREAM_CLOSE" }; static int handle_proxy_response(struct ipu_isys *isys, unsigned int req_id) { struct ipu_fw_isys_proxy_resp_info_abi *resp; int rval = -EIO; resp = (struct ipu_fw_isys_proxy_resp_info_abi *) ipu_recv_get_token(isys->fwcom, IPU_BASE_PROXY_RECV_QUEUES); if (!resp) return 1; dev_dbg(&isys->adev->dev, "Proxy response: id 0x%x, error %d, details %d\n", resp->request_id, resp->error_info.error, resp->error_info.error_details); if (req_id == resp->request_id) rval = 0; ipu_recv_put_token(isys->fwcom, IPU_BASE_PROXY_RECV_QUEUES); return rval; } /* Simple blocking proxy send function */ int ipu_fw_isys_send_proxy_token(struct ipu_isys *isys, unsigned int req_id, unsigned int index, unsigned int offset, u32 value) { struct ipu_fw_com_context *ctx = isys->fwcom; struct ipu_fw_proxy_send_queue_token *token; unsigned int timeout = 1000; int rval = -EBUSY; dev_dbg(&isys->adev->dev, "proxy send token: req_id 0x%x, index %d, offset 0x%x, value 0x%x\n", req_id, index, offset, value); token = ipu_send_get_token(ctx, IPU_BASE_PROXY_SEND_QUEUES); if (!token) goto leave; token->request_id = req_id; token->region_index = index; token->offset = offset; token->value = value; ipu_send_put_token(ctx, IPU_BASE_PROXY_SEND_QUEUES); /* Currently proxy doesn't support irq based service. Poll */ do { usleep_range(100, 110); rval = handle_proxy_response(isys, req_id); if (!rval) break; if (rval == -EIO) { dev_err(&isys->adev->dev, "Proxy response received with unexpected id\n"); break; } timeout--; } while (rval && timeout); if (!timeout) dev_err(&isys->adev->dev, "Proxy response timed out\n"); leave: return rval; } int ipu_fw_isys_complex_cmd(struct ipu_isys *isys, const unsigned int stream_handle, void *cpu_mapped_buf, dma_addr_t dma_mapped_buf, size_t size, enum ipu_fw_isys_send_type send_type) { struct ipu_fw_com_context *ctx = isys->fwcom; struct ipu_fw_send_queue_token *token; if (send_type >= N_IPU_FW_ISYS_SEND_TYPE) return -EINVAL; dev_dbg(&isys->adev->dev, "send_token: %s, stream_handle: %u\n", send_msg_types[send_type], stream_handle); /* * Time to flush cache in case we have some payload. Not all messages * have that */ if (cpu_mapped_buf) clflush_cache_range(cpu_mapped_buf, size); token = ipu_send_get_token(ctx, stream_handle + IPU_BASE_MSG_SEND_QUEUES); if (!token) return -EBUSY; token->payload = dma_mapped_buf; token->buf_handle = (unsigned long)cpu_mapped_buf; token->send_type = send_type; ipu_send_put_token(ctx, stream_handle + IPU_BASE_MSG_SEND_QUEUES); return 0; } int ipu_fw_isys_simple_cmd(struct ipu_isys *isys, const unsigned int stream_handle, enum ipu_fw_isys_send_type send_type) { return ipu_fw_isys_complex_cmd(isys, stream_handle, NULL, 0, 0, send_type); } int ipu_fw_isys_close(struct ipu_isys *isys) { struct device *dev = &isys->adev->dev; int timeout = IPU_ISYS_TURNOFF_TIMEOUT; int rval; unsigned long flags; void *fwcom; /* * Stop the isys fw. Actual close takes * some time as the FW must stop its actions including code fetch * to SP icache. * spinlock to wait the interrupt handler to be finished */ spin_lock_irqsave(&isys->power_lock, flags); rval = ipu_fw_com_close(isys->fwcom); fwcom = isys->fwcom; isys->fwcom = NULL; spin_unlock_irqrestore(&isys->power_lock, flags); if (rval) dev_err(dev, "Device close failure: %d\n", rval); /* release probably fails if the close failed. Let's try still */ do { usleep_range(IPU_ISYS_TURNOFF_DELAY_US, 2 * IPU_ISYS_TURNOFF_DELAY_US); rval = ipu_fw_com_release(fwcom, 0); timeout--; } while (rval != 0 && timeout); if (rval) { dev_err(dev, "Device release time out %d\n", rval); spin_lock_irqsave(&isys->power_lock, flags); isys->fwcom = fwcom; spin_unlock_irqrestore(&isys->power_lock, flags); } return rval; } void ipu_fw_isys_cleanup(struct ipu_isys *isys) { int ret; ret = ipu_fw_com_release(isys->fwcom, 1); if (ret < 0) dev_err(&isys->adev->dev, "Device busy, fw_com release failed."); isys->fwcom = NULL; } static void start_sp(struct ipu_bus_device *adev) { struct ipu_isys *isys = ipu_bus_get_drvdata(adev); void __iomem *spc_regs_base = isys->pdata->base + isys->pdata->ipdata->hw_variant.spc_offset; u32 val = 0; val |= IPU_ISYS_SPC_STATUS_START | IPU_ISYS_SPC_STATUS_RUN | IPU_ISYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE; val |= isys->icache_prefetch ? IPU_ISYS_SPC_STATUS_ICACHE_PREFETCH : 0; writel(val, spc_regs_base + IPU_ISYS_REG_SPC_STATUS_CTRL); } static int query_sp(struct ipu_bus_device *adev) { struct ipu_isys *isys = ipu_bus_get_drvdata(adev); void __iomem *spc_regs_base = isys->pdata->base + isys->pdata->ipdata->hw_variant.spc_offset; u32 val = readl(spc_regs_base + IPU_ISYS_REG_SPC_STATUS_CTRL); /* return true when READY == 1, START == 0 */ val &= IPU_ISYS_SPC_STATUS_READY | IPU_ISYS_SPC_STATUS_START; return val == IPU_ISYS_SPC_STATUS_READY; } static int ipu6_isys_fwcom_cfg_init(struct ipu_isys *isys, struct ipu_fw_com_cfg *fwcom, unsigned int num_streams) { int i; unsigned int size; struct ipu_fw_syscom_queue_config *input_queue_cfg; struct ipu_fw_syscom_queue_config *output_queue_cfg; struct ipu6_fw_isys_fw_config *isys_fw_cfg; int num_out_message_queues = 1; int type_proxy = IPU_FW_ISYS_QUEUE_TYPE_PROXY; int type_dev = IPU_FW_ISYS_QUEUE_TYPE_DEV; int type_msg = IPU_FW_ISYS_QUEUE_TYPE_MSG; int base_dev_send = IPU_BASE_DEV_SEND_QUEUES; int base_msg_send = IPU_BASE_MSG_SEND_QUEUES; int base_msg_recv = IPU_BASE_MSG_RECV_QUEUES; int num_in_message_queues; unsigned int max_streams; unsigned int max_send_queues, max_sram_blocks, max_devq_size; max_streams = IPU6_ISYS_NUM_STREAMS; max_send_queues = IPU6_N_MAX_SEND_QUEUES; max_sram_blocks = IPU6_NOF_SRAM_BLOCKS_MAX; max_devq_size = IPU6_DEV_SEND_QUEUE_SIZE; if (ipu_ver == IPU_VER_6SE) { max_streams = IPU6SE_ISYS_NUM_STREAMS; max_send_queues = IPU6SE_N_MAX_SEND_QUEUES; max_sram_blocks = IPU6SE_NOF_SRAM_BLOCKS_MAX; max_devq_size = IPU6SE_DEV_SEND_QUEUE_SIZE; } num_in_message_queues = clamp_t(unsigned int, num_streams, 1, max_streams); isys_fw_cfg = devm_kzalloc(&isys->adev->dev, sizeof(*isys_fw_cfg), GFP_KERNEL); if (!isys_fw_cfg) return -ENOMEM; isys_fw_cfg->num_send_queues[IPU_FW_ISYS_QUEUE_TYPE_PROXY] = IPU_N_MAX_PROXY_SEND_QUEUES; isys_fw_cfg->num_send_queues[IPU_FW_ISYS_QUEUE_TYPE_DEV] = IPU_N_MAX_DEV_SEND_QUEUES; isys_fw_cfg->num_send_queues[IPU_FW_ISYS_QUEUE_TYPE_MSG] = num_in_message_queues; isys_fw_cfg->num_recv_queues[IPU_FW_ISYS_QUEUE_TYPE_PROXY] = IPU_N_MAX_PROXY_RECV_QUEUES; /* Common msg/dev return queue */ isys_fw_cfg->num_recv_queues[IPU_FW_ISYS_QUEUE_TYPE_DEV] = 0; isys_fw_cfg->num_recv_queues[IPU_FW_ISYS_QUEUE_TYPE_MSG] = num_out_message_queues; size = sizeof(*input_queue_cfg) * max_send_queues; input_queue_cfg = devm_kzalloc(&isys->adev->dev, size, GFP_KERNEL); if (!input_queue_cfg) return -ENOMEM; size = sizeof(*output_queue_cfg) * IPU_N_MAX_RECV_QUEUES; output_queue_cfg = devm_kzalloc(&isys->adev->dev, size, GFP_KERNEL); if (!output_queue_cfg) return -ENOMEM; fwcom->input = input_queue_cfg; fwcom->output = output_queue_cfg; fwcom->num_input_queues = isys_fw_cfg->num_send_queues[type_proxy] + isys_fw_cfg->num_send_queues[type_dev] + isys_fw_cfg->num_send_queues[type_msg]; fwcom->num_output_queues = isys_fw_cfg->num_recv_queues[type_proxy] + isys_fw_cfg->num_recv_queues[type_dev] + isys_fw_cfg->num_recv_queues[type_msg]; /* SRAM partitioning. Equal partitioning is set. */ for (i = 0; i < max_sram_blocks; i++) { if (i < num_in_message_queues) isys_fw_cfg->buffer_partition.num_gda_pages[i] = (IPU_DEVICE_GDA_NR_PAGES * IPU_DEVICE_GDA_VIRT_FACTOR) / num_in_message_queues; else isys_fw_cfg->buffer_partition.num_gda_pages[i] = 0; } /* FW assumes proxy interface at fwcom queue 0 */ for (i = 0; i < isys_fw_cfg->num_send_queues[type_proxy]; i++) { input_queue_cfg[i].token_size = sizeof(struct ipu_fw_proxy_send_queue_token); input_queue_cfg[i].queue_size = IPU_ISYS_SIZE_PROXY_SEND_QUEUE; } for (i = 0; i < isys_fw_cfg->num_send_queues[type_dev]; i++) { input_queue_cfg[base_dev_send + i].token_size = sizeof(struct ipu_fw_send_queue_token); input_queue_cfg[base_dev_send + i].queue_size = max_devq_size; } for (i = 0; i < isys_fw_cfg->num_send_queues[type_msg]; i++) { input_queue_cfg[base_msg_send + i].token_size = sizeof(struct ipu_fw_send_queue_token); input_queue_cfg[base_msg_send + i].queue_size = IPU_ISYS_SIZE_SEND_QUEUE; } for (i = 0; i < isys_fw_cfg->num_recv_queues[type_proxy]; i++) { output_queue_cfg[i].token_size = sizeof(struct ipu_fw_proxy_resp_queue_token); output_queue_cfg[i].queue_size = IPU_ISYS_SIZE_PROXY_RECV_QUEUE; } /* There is no recv DEV queue */ for (i = 0; i < isys_fw_cfg->num_recv_queues[type_msg]; i++) { output_queue_cfg[base_msg_recv + i].token_size = sizeof(struct ipu_fw_resp_queue_token); output_queue_cfg[base_msg_recv + i].queue_size = IPU_ISYS_SIZE_RECV_QUEUE; } fwcom->dmem_addr = isys->pdata->ipdata->hw_variant.dmem_offset; fwcom->specific_addr = isys_fw_cfg; fwcom->specific_size = sizeof(*isys_fw_cfg); return 0; } int ipu_fw_isys_init(struct ipu_isys *isys, unsigned int num_streams) { int retry = IPU_ISYS_OPEN_RETRY; struct ipu_fw_com_cfg fwcom = { .cell_start = start_sp, .cell_ready = query_sp, .buttress_boot_offset = SYSCOM_BUTTRESS_FW_PARAMS_ISYS_OFFSET, }; struct device *dev = &isys->adev->dev; int rval; ipu6_isys_fwcom_cfg_init(isys, &fwcom, num_streams); isys->fwcom = ipu_fw_com_prepare(&fwcom, isys->adev, isys->pdata->base); if (!isys->fwcom) { dev_err(dev, "isys fw com prepare failed\n"); return -EIO; } rval = ipu_fw_com_open(isys->fwcom); if (rval) { dev_err(dev, "isys fw com open failed %d\n", rval); return rval; } do { usleep_range(IPU_ISYS_OPEN_TIMEOUT_US, IPU_ISYS_OPEN_TIMEOUT_US + 10); rval = ipu_fw_com_ready(isys->fwcom); if (!rval) break; retry--; } while (retry > 0); if (!retry && rval) { dev_err(dev, "isys port open ready failed %d\n", rval); ipu_fw_isys_close(isys); } return rval; } struct ipu_fw_isys_resp_info_abi * ipu_fw_isys_get_resp(void *context, unsigned int queue, struct ipu_fw_isys_resp_info_abi *response) { return (struct ipu_fw_isys_resp_info_abi *) ipu_recv_get_token(context, queue); } void ipu_fw_isys_put_resp(void *context, unsigned int queue) { ipu_recv_put_token(context, queue); } void ipu_fw_isys_set_params(struct ipu_fw_isys_stream_cfg_data_abi *stream_cfg) { unsigned int i; unsigned int idx; for (i = 0; i < stream_cfg->nof_input_pins; i++) { idx = stream_cfg->input_pins[i].dt; stream_cfg->input_pins[i].bits_per_pix = extracted_bits_per_pixel_per_mipi_data_type[idx]; stream_cfg->input_pins[i].mapped_dt = N_IPU_FW_ISYS_MIPI_DATA_TYPE; stream_cfg->input_pins[i].mipi_decompression = IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_NO_COMPRESSION; /* * CSI BE can be used to crop and change bayer order. * NOTE: currently it only crops first and last lines in height. */ if (stream_cfg->crop.top_offset & 1) stream_cfg->input_pins[i].crop_first_and_last_lines = 1; stream_cfg->input_pins[i].capture_mode = IPU_FW_ISYS_CAPTURE_MODE_REGULAR; } } void ipu_fw_isys_dump_stream_cfg(struct device *dev, struct ipu_fw_isys_stream_cfg_data_abi *stream_cfg) { unsigned int i; dev_dbg(dev, "---------------------------\n"); dev_dbg(dev, "IPU_FW_ISYS_STREAM_CFG_DATA\n"); dev_dbg(dev, "---------------------------\n"); dev_dbg(dev, "Source %d\n", stream_cfg->src); dev_dbg(dev, "VC %d\n", stream_cfg->vc); dev_dbg(dev, "Nof input pins %d\n", stream_cfg->nof_input_pins); dev_dbg(dev, "Nof output pins %d\n", stream_cfg->nof_output_pins); for (i = 0; i < stream_cfg->nof_input_pins; i++) { dev_dbg(dev, "Input pin %d\n", i); dev_dbg(dev, "Mipi data type 0x%0x\n", stream_cfg->input_pins[i].dt); dev_dbg(dev, "Mipi store mode %d\n", stream_cfg->input_pins[i].mipi_store_mode); dev_dbg(dev, "Bits per pixel %d\n", stream_cfg->input_pins[i].bits_per_pix); dev_dbg(dev, "Mapped data type 0x%0x\n", stream_cfg->input_pins[i].mapped_dt); dev_dbg(dev, "Input res width %d\n", stream_cfg->input_pins[i].input_res.width); dev_dbg(dev, "Input res height %d\n", stream_cfg->input_pins[i].input_res.height); dev_dbg(dev, "mipi decompression %d\n", stream_cfg->input_pins[i].mipi_decompression); dev_dbg(dev, "capture_mode %d\n", stream_cfg->input_pins[i].capture_mode); } dev_dbg(dev, "Crop info\n"); dev_dbg(dev, "Crop.top_offset %d\n", stream_cfg->crop.top_offset); dev_dbg(dev, "Crop.left_offset %d\n", stream_cfg->crop.left_offset); dev_dbg(dev, "Crop.bottom_offset %d\n", stream_cfg->crop.bottom_offset); dev_dbg(dev, "Crop.right_offset %d\n", stream_cfg->crop.right_offset); dev_dbg(dev, "----------------\n"); for (i = 0; i < stream_cfg->nof_output_pins; i++) { dev_dbg(dev, "Output pin %d\n", i); dev_dbg(dev, "Output input pin id %d\n", stream_cfg->output_pins[i].input_pin_id); dev_dbg(dev, "Output res width %d\n", stream_cfg->output_pins[i].output_res.width); dev_dbg(dev, "Output res height %d\n", stream_cfg->output_pins[i].output_res.height); dev_dbg(dev, "Stride %d\n", stream_cfg->output_pins[i].stride); dev_dbg(dev, "Pin type %d\n", stream_cfg->output_pins[i].pt); dev_dbg(dev, "Payload %d\n", stream_cfg->output_pins[i].payload_buf_size); dev_dbg(dev, "Ft %d\n", stream_cfg->output_pins[i].ft); dev_dbg(dev, "Watermar in lines %d\n", stream_cfg->output_pins[i].watermark_in_lines); dev_dbg(dev, "Send irq %d\n", stream_cfg->output_pins[i].send_irq); dev_dbg(dev, "Reserve compression %d\n", stream_cfg->output_pins[i].reserve_compression); dev_dbg(dev, "snoopable %d\n", stream_cfg->output_pins[i].snoopable); dev_dbg(dev, "error_handling_enable %d\n", stream_cfg->output_pins[i].error_handling_enable); dev_dbg(dev, "sensor type %d\n", stream_cfg->output_pins[i].sensor_type); dev_dbg(dev, "----------------\n"); } dev_dbg(dev, "Isl_use %d\n", stream_cfg->isl_use); dev_dbg(dev, "stream sensor_type %d\n", stream_cfg->sensor_type); } void ipu_fw_isys_dump_frame_buff_set(struct device *dev, struct ipu_fw_isys_frame_buff_set_abi *buf, unsigned int outputs) { unsigned int i; dev_dbg(dev, "--------------------------\n"); dev_dbg(dev, "IPU_FW_ISYS_FRAME_BUFF_SET\n"); dev_dbg(dev, "--------------------------\n"); for (i = 0; i < outputs; i++) { dev_dbg(dev, "Output pin %d\n", i); dev_dbg(dev, "out_buf_id %llu\n", buf->output_pins[i].out_buf_id); dev_dbg(dev, "addr 0x%x\n", buf->output_pins[i].addr); dev_dbg(dev, "compress %u\n", buf->output_pins[i].compress); dev_dbg(dev, "----------------\n"); } dev_dbg(dev, "send_irq_sof 0x%x\n", buf->send_irq_sof); dev_dbg(dev, "send_irq_eof 0x%x\n", buf->send_irq_eof); dev_dbg(dev, "send_resp_sof 0x%x\n", buf->send_resp_sof); dev_dbg(dev, "send_resp_eof 0x%x\n", buf->send_resp_eof); dev_dbg(dev, "send_irq_capture_ack 0x%x\n", buf->send_irq_capture_ack); dev_dbg(dev, "send_irq_capture_done 0x%x\n", buf->send_irq_capture_done); dev_dbg(dev, "send_resp_capture_ack 0x%x\n", buf->send_resp_capture_ack); dev_dbg(dev, "send_resp_capture_done 0x%x\n", buf->send_resp_capture_done); } ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu-fw-isys.h000066400000000000000000000674301471702545500257130ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2013 - 2024 Intel Corporation */ #ifndef IPU_FW_ISYS_H #define IPU_FW_ISYS_H #include "ipu-fw-com.h" /* Max number of Input/Output Pins */ #define IPU_MAX_IPINS 4 #define IPU_MAX_OPINS ((IPU_MAX_IPINS) + 1) #define IPU6_STREAM_ID_MAX 16 #define IPU6_NONSECURE_STREAM_ID_MAX 16 #define IPU6_DEV_SEND_QUEUE_SIZE (IPU6_STREAM_ID_MAX) #define IPU6_NOF_SRAM_BLOCKS_MAX (IPU6_STREAM_ID_MAX) #define IPU6_N_MAX_MSG_SEND_QUEUES (IPU6_STREAM_ID_MAX) #define IPU6SE_STREAM_ID_MAX 8 #define IPU6SE_NONSECURE_STREAM_ID_MAX 4 #define IPU6SE_DEV_SEND_QUEUE_SIZE (IPU6SE_STREAM_ID_MAX) #define IPU6SE_NOF_SRAM_BLOCKS_MAX (IPU6SE_STREAM_ID_MAX) #define IPU6SE_N_MAX_MSG_SEND_QUEUES (IPU6SE_STREAM_ID_MAX) /* Single return queue for all streams/commands type */ #define IPU_N_MAX_MSG_RECV_QUEUES 1 /* Single device queue for high priority commands (bypass in-order queue) */ #define IPU_N_MAX_DEV_SEND_QUEUES 1 /* Single dedicated send queue for proxy interface */ #define IPU_N_MAX_PROXY_SEND_QUEUES 1 /* Single dedicated recv queue for proxy interface */ #define IPU_N_MAX_PROXY_RECV_QUEUES 1 /* Send queues layout */ #define IPU_BASE_PROXY_SEND_QUEUES 0 #define IPU_BASE_DEV_SEND_QUEUES \ (IPU_BASE_PROXY_SEND_QUEUES + IPU_N_MAX_PROXY_SEND_QUEUES) #define IPU_BASE_MSG_SEND_QUEUES \ (IPU_BASE_DEV_SEND_QUEUES + IPU_N_MAX_DEV_SEND_QUEUES) /* Recv queues layout */ #define IPU_BASE_PROXY_RECV_QUEUES 0 #define IPU_BASE_MSG_RECV_QUEUES \ (IPU_BASE_PROXY_RECV_QUEUES + IPU_N_MAX_PROXY_RECV_QUEUES) #define IPU_N_MAX_RECV_QUEUES \ (IPU_BASE_MSG_RECV_QUEUES + IPU_N_MAX_MSG_RECV_QUEUES) #define IPU6_N_MAX_SEND_QUEUES \ (IPU_BASE_MSG_SEND_QUEUES + IPU6_N_MAX_MSG_SEND_QUEUES) #define IPU6SE_N_MAX_SEND_QUEUES \ (IPU_BASE_MSG_SEND_QUEUES + IPU6SE_N_MAX_MSG_SEND_QUEUES) /* Max number of supported input pins routed in ISL */ #define IPU_MAX_IPINS_IN_ISL 2 /* Max number of planes for frame formats supported by the FW */ #define IPU_PIN_PLANES_MAX 4 /** * enum ipu_fw_isys_resp_type */ enum ipu_fw_isys_resp_type { IPU_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE = 0, IPU_FW_ISYS_RESP_TYPE_STREAM_START_ACK, IPU_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK, IPU_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK, IPU_FW_ISYS_RESP_TYPE_STREAM_STOP_ACK, IPU_FW_ISYS_RESP_TYPE_STREAM_FLUSH_ACK, IPU_FW_ISYS_RESP_TYPE_STREAM_CLOSE_ACK, IPU_FW_ISYS_RESP_TYPE_PIN_DATA_READY, IPU_FW_ISYS_RESP_TYPE_PIN_DATA_WATERMARK, IPU_FW_ISYS_RESP_TYPE_FRAME_SOF, IPU_FW_ISYS_RESP_TYPE_FRAME_EOF, IPU_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE, IPU_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE, IPU_FW_ISYS_RESP_TYPE_PIN_DATA_SKIPPED, IPU_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_SKIPPED, IPU_FW_ISYS_RESP_TYPE_FRAME_SOF_DISCARDED, IPU_FW_ISYS_RESP_TYPE_FRAME_EOF_DISCARDED, IPU_FW_ISYS_RESP_TYPE_STATS_DATA_READY, N_IPU_FW_ISYS_RESP_TYPE }; /** * enum ipu_fw_isys_send_type */ enum ipu_fw_isys_send_type { IPU_FW_ISYS_SEND_TYPE_STREAM_OPEN = 0, IPU_FW_ISYS_SEND_TYPE_STREAM_START, IPU_FW_ISYS_SEND_TYPE_STREAM_START_AND_CAPTURE, IPU_FW_ISYS_SEND_TYPE_STREAM_CAPTURE, IPU_FW_ISYS_SEND_TYPE_STREAM_STOP, IPU_FW_ISYS_SEND_TYPE_STREAM_FLUSH, IPU_FW_ISYS_SEND_TYPE_STREAM_CLOSE, N_IPU_FW_ISYS_SEND_TYPE }; /** * enum ipu_fw_isys_queue_type */ enum ipu_fw_isys_queue_type { IPU_FW_ISYS_QUEUE_TYPE_PROXY = 0, IPU_FW_ISYS_QUEUE_TYPE_DEV, IPU_FW_ISYS_QUEUE_TYPE_MSG, N_IPU_FW_ISYS_QUEUE_TYPE }; /** * enum ipu_fw_isys_stream_source: Specifies a source for a stream */ enum ipu_fw_isys_stream_source { IPU_FW_ISYS_STREAM_SRC_PORT_0 = 0, IPU_FW_ISYS_STREAM_SRC_PORT_1, IPU_FW_ISYS_STREAM_SRC_PORT_2, IPU_FW_ISYS_STREAM_SRC_PORT_3, IPU_FW_ISYS_STREAM_SRC_PORT_4, IPU_FW_ISYS_STREAM_SRC_PORT_5, IPU_FW_ISYS_STREAM_SRC_PORT_6, IPU_FW_ISYS_STREAM_SRC_PORT_7, IPU_FW_ISYS_STREAM_SRC_PORT_8, IPU_FW_ISYS_STREAM_SRC_PORT_9, IPU_FW_ISYS_STREAM_SRC_PORT_10, IPU_FW_ISYS_STREAM_SRC_PORT_11, IPU_FW_ISYS_STREAM_SRC_PORT_12, IPU_FW_ISYS_STREAM_SRC_PORT_13, IPU_FW_ISYS_STREAM_SRC_PORT_14, IPU_FW_ISYS_STREAM_SRC_PORT_15, IPU_FW_ISYS_STREAM_SRC_MIPIGEN_0, IPU_FW_ISYS_STREAM_SRC_MIPIGEN_1, IPU_FW_ISYS_STREAM_SRC_MIPIGEN_2, IPU_FW_ISYS_STREAM_SRC_MIPIGEN_3, IPU_FW_ISYS_STREAM_SRC_MIPIGEN_4, IPU_FW_ISYS_STREAM_SRC_MIPIGEN_5, IPU_FW_ISYS_STREAM_SRC_MIPIGEN_6, IPU_FW_ISYS_STREAM_SRC_MIPIGEN_7, IPU_FW_ISYS_STREAM_SRC_MIPIGEN_8, IPU_FW_ISYS_STREAM_SRC_MIPIGEN_9, N_IPU_FW_ISYS_STREAM_SRC }; enum ipu_fw_isys_sensor_type { /* non-snoopable to PSYS */ IPU_FW_ISYS_VC1_SENSOR_DATA = 0, /* non-snoopable for PDAF */ IPU_FW_ISYS_VC1_SENSOR_PDAF, /* snoopable to CPU */ IPU_FW_ISYS_VC0_SENSOR_METADATA, /* snoopable to CPU */ IPU_FW_ISYS_VC0_SENSOR_DATA, N_IPU_FW_ISYS_SENSOR_TYPE }; enum ipu6se_fw_isys_sensor_info { /* VC1 */ IPU6SE_FW_ISYS_SENSOR_DATA_1 = 1, IPU6SE_FW_ISYS_SENSOR_DATA_2 = 2, IPU6SE_FW_ISYS_SENSOR_DATA_3 = 3, IPU6SE_FW_ISYS_SENSOR_PDAF_1 = 4, IPU6SE_FW_ISYS_SENSOR_PDAF_2 = 4, /* VC0 */ IPU6SE_FW_ISYS_SENSOR_METADATA = 5, IPU6SE_FW_ISYS_SENSOR_DATA_4 = 6, IPU6SE_FW_ISYS_SENSOR_DATA_5 = 7, IPU6SE_FW_ISYS_SENSOR_DATA_6 = 8, IPU6SE_FW_ISYS_SENSOR_DATA_7 = 9, IPU6SE_FW_ISYS_SENSOR_DATA_8 = 10, IPU6SE_FW_ISYS_SENSOR_DATA_9 = 11, N_IPU6SE_FW_ISYS_SENSOR_INFO, IPU6SE_FW_ISYS_VC1_SENSOR_DATA_START = IPU6SE_FW_ISYS_SENSOR_DATA_1, IPU6SE_FW_ISYS_VC1_SENSOR_DATA_END = IPU6SE_FW_ISYS_SENSOR_DATA_3, IPU6SE_FW_ISYS_VC0_SENSOR_DATA_START = IPU6SE_FW_ISYS_SENSOR_DATA_4, IPU6SE_FW_ISYS_VC0_SENSOR_DATA_END = IPU6SE_FW_ISYS_SENSOR_DATA_9, IPU6SE_FW_ISYS_VC1_SENSOR_PDAF_START = IPU6SE_FW_ISYS_SENSOR_PDAF_1, IPU6SE_FW_ISYS_VC1_SENSOR_PDAF_END = IPU6SE_FW_ISYS_SENSOR_PDAF_2, }; enum ipu6_fw_isys_sensor_info { /* VC1 */ IPU6_FW_ISYS_SENSOR_DATA_1 = 1, IPU6_FW_ISYS_SENSOR_DATA_2 = 2, IPU6_FW_ISYS_SENSOR_DATA_3 = 3, IPU6_FW_ISYS_SENSOR_DATA_4 = 4, IPU6_FW_ISYS_SENSOR_DATA_5 = 5, IPU6_FW_ISYS_SENSOR_DATA_6 = 6, IPU6_FW_ISYS_SENSOR_DATA_7 = 7, IPU6_FW_ISYS_SENSOR_DATA_8 = 8, IPU6_FW_ISYS_SENSOR_DATA_9 = 9, IPU6_FW_ISYS_SENSOR_DATA_10 = 10, IPU6_FW_ISYS_SENSOR_PDAF_1 = 11, IPU6_FW_ISYS_SENSOR_PDAF_2 = 12, /* VC0 */ IPU6_FW_ISYS_SENSOR_METADATA = 13, IPU6_FW_ISYS_SENSOR_DATA_11 = 14, IPU6_FW_ISYS_SENSOR_DATA_12 = 15, IPU6_FW_ISYS_SENSOR_DATA_13 = 16, IPU6_FW_ISYS_SENSOR_DATA_14 = 17, IPU6_FW_ISYS_SENSOR_DATA_15 = 18, IPU6_FW_ISYS_SENSOR_DATA_16 = 19, N_IPU6_FW_ISYS_SENSOR_INFO, IPU6_FW_ISYS_VC1_SENSOR_DATA_START = IPU6_FW_ISYS_SENSOR_DATA_1, IPU6_FW_ISYS_VC1_SENSOR_DATA_END = IPU6_FW_ISYS_SENSOR_DATA_10, IPU6_FW_ISYS_VC0_SENSOR_DATA_START = IPU6_FW_ISYS_SENSOR_DATA_11, IPU6_FW_ISYS_VC0_SENSOR_DATA_END = IPU6_FW_ISYS_SENSOR_DATA_16, IPU6_FW_ISYS_VC1_SENSOR_PDAF_START = IPU6_FW_ISYS_SENSOR_PDAF_1, IPU6_FW_ISYS_VC1_SENSOR_PDAF_END = IPU6_FW_ISYS_SENSOR_PDAF_2, }; #define IPU_FW_ISYS_STREAM_SRC_CSI2_PORT0 IPU_FW_ISYS_STREAM_SRC_PORT_0 #define IPU_FW_ISYS_STREAM_SRC_CSI2_PORT1 IPU_FW_ISYS_STREAM_SRC_PORT_1 #define IPU_FW_ISYS_STREAM_SRC_CSI2_PORT2 IPU_FW_ISYS_STREAM_SRC_PORT_2 #define IPU_FW_ISYS_STREAM_SRC_CSI2_PORT3 IPU_FW_ISYS_STREAM_SRC_PORT_3 #define IPU_FW_ISYS_STREAM_SRC_CSI2_3PH_PORTA IPU_FW_ISYS_STREAM_SRC_PORT_4 #define IPU_FW_ISYS_STREAM_SRC_CSI2_3PH_PORTB IPU_FW_ISYS_STREAM_SRC_PORT_5 #define IPU_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT0 IPU_FW_ISYS_STREAM_SRC_PORT_6 #define IPU_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT1 IPU_FW_ISYS_STREAM_SRC_PORT_7 #define IPU_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT2 IPU_FW_ISYS_STREAM_SRC_PORT_8 #define IPU_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT3 IPU_FW_ISYS_STREAM_SRC_PORT_9 #define IPU_FW_ISYS_STREAM_SRC_MIPIGEN_PORT0 IPU_FW_ISYS_STREAM_SRC_MIPIGEN_0 #define IPU_FW_ISYS_STREAM_SRC_MIPIGEN_PORT1 IPU_FW_ISYS_STREAM_SRC_MIPIGEN_1 /** * enum ipu_fw_isys_mipi_vc: MIPI csi2 spec * supports up to 4 virtual per physical channel */ enum ipu_fw_isys_mipi_vc { IPU_FW_ISYS_MIPI_VC_0 = 0, IPU_FW_ISYS_MIPI_VC_1, IPU_FW_ISYS_MIPI_VC_2, IPU_FW_ISYS_MIPI_VC_3, N_IPU_FW_ISYS_MIPI_VC }; /** * Supported Pixel Frame formats. Expandable if needed */ enum ipu_fw_isys_frame_format_type { IPU_FW_ISYS_FRAME_FORMAT_NV11 = 0, /* 12 bit YUV 411, Y, UV plane */ IPU_FW_ISYS_FRAME_FORMAT_NV12, /* 12 bit YUV 420, Y, UV plane */ IPU_FW_ISYS_FRAME_FORMAT_NV12_16, /* 16 bit YUV 420, Y, UV plane */ IPU_FW_ISYS_FRAME_FORMAT_NV12_TILEY, /* 12 bit YUV 420, * Intel proprietary tiled format, * TileY */ IPU_FW_ISYS_FRAME_FORMAT_NV16, /* 16 bit YUV 422, Y, UV plane */ IPU_FW_ISYS_FRAME_FORMAT_NV21, /* 12 bit YUV 420, Y, VU plane */ IPU_FW_ISYS_FRAME_FORMAT_NV61, /* 16 bit YUV 422, Y, VU plane */ IPU_FW_ISYS_FRAME_FORMAT_YV12, /* 12 bit YUV 420, Y, V, U plane */ IPU_FW_ISYS_FRAME_FORMAT_YV16, /* 16 bit YUV 422, Y, V, U plane */ IPU_FW_ISYS_FRAME_FORMAT_YUV420, /* 12 bit YUV 420, Y, U, V plane */ IPU_FW_ISYS_FRAME_FORMAT_YUV420_10, /* yuv420, 10 bits per subpixel */ IPU_FW_ISYS_FRAME_FORMAT_YUV420_12, /* yuv420, 12 bits per subpixel */ IPU_FW_ISYS_FRAME_FORMAT_YUV420_14, /* yuv420, 14 bits per subpixel */ IPU_FW_ISYS_FRAME_FORMAT_YUV420_16, /* yuv420, 16 bits per subpixel */ IPU_FW_ISYS_FRAME_FORMAT_YUV422, /* 16 bit YUV 422, Y, U, V plane */ IPU_FW_ISYS_FRAME_FORMAT_YUV422_16, /* yuv422, 16 bits per subpixel */ IPU_FW_ISYS_FRAME_FORMAT_UYVY, /* 16 bit YUV 422, UYVY interleaved */ IPU_FW_ISYS_FRAME_FORMAT_YUYV, /* 16 bit YUV 422, YUYV interleaved */ IPU_FW_ISYS_FRAME_FORMAT_YUV444, /* 24 bit YUV 444, Y, U, V plane */ IPU_FW_ISYS_FRAME_FORMAT_YUV_LINE, /* Internal format, 2 y lines * followed by a uvinterleaved line */ IPU_FW_ISYS_FRAME_FORMAT_RAW8, /* RAW8, 1 plane */ IPU_FW_ISYS_FRAME_FORMAT_RAW10, /* RAW10, 1 plane */ IPU_FW_ISYS_FRAME_FORMAT_RAW12, /* RAW12, 1 plane */ IPU_FW_ISYS_FRAME_FORMAT_RAW14, /* RAW14, 1 plane */ IPU_FW_ISYS_FRAME_FORMAT_RAW16, /* RAW16, 1 plane */ IPU_FW_ISYS_FRAME_FORMAT_RGB565, /* 16 bit RGB, 1 plane. Each 3 sub * pixels are packed into one 16 bit * value, 5 bits for R, 6 bits * for G and 5 bits for B. */ IPU_FW_ISYS_FRAME_FORMAT_PLANAR_RGB888, /* 24 bit RGB, 3 planes */ IPU_FW_ISYS_FRAME_FORMAT_RGBA888, /* 32 bit RGBA, 1 plane, * A=Alpha (alpha is unused) */ IPU_FW_ISYS_FRAME_FORMAT_QPLANE6, /* Internal, for advanced ISP */ IPU_FW_ISYS_FRAME_FORMAT_BINARY_8, /* byte stream, used for jpeg. */ N_IPU_FW_ISYS_FRAME_FORMAT }; /* Temporary for driver compatibility */ #define IPU_FW_ISYS_FRAME_FORMAT_RAW (IPU_FW_ISYS_FRAME_FORMAT_RAW16) enum ipu_fw_isys_mipi_compression_type { IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_NO_COMPRESSION = 0, IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_10_8_10_TYPE1, IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_10_8_10_TYPE2, IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_10_7_10_TYPE1, IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_10_7_10_TYPE2, IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_10_6_10_TYPE1, IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_10_6_10_TYPE2, IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_12_8_12_TYPE1, IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_12_8_12_TYPE2, IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_12_7_12_TYPE1, IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_12_7_12_TYPE2, IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_12_6_12_TYPE1, IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_12_6_12_TYPE2, IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_12_10_12_TYPE1, IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_12_10_12_TYPE2, N_IPU_FW_ISYS_MIPI_COMPRESSION_TYPE, }; /** * Supported MIPI data type. Keep in sync array in ipu_fw_isys_private.c */ enum ipu_fw_isys_mipi_data_type { /** SYNCHRONIZATION SHORT PACKET DATA TYPES */ IPU_FW_ISYS_MIPI_DATA_TYPE_FRAME_START_CODE = 0x00, IPU_FW_ISYS_MIPI_DATA_TYPE_FRAME_END_CODE = 0x01, IPU_FW_ISYS_MIPI_DATA_TYPE_LINE_START_CODE = 0x02, /* Optional */ IPU_FW_ISYS_MIPI_DATA_TYPE_LINE_END_CODE = 0x03, /* Optional */ /** Reserved 0x04-0x07 */ IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x04 = 0x04, IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x05 = 0x05, IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x06 = 0x06, IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x07 = 0x07, /** GENERIC SHORT PACKET DATA TYPES */ /** They are used to keep the timing information for * the opening/closing of shutters, * triggering of flashes and etc. */ /* Generic Short Packet Codes 1 - 8 */ IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT1 = 0x08, IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT2 = 0x09, IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT3 = 0x0A, IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT4 = 0x0B, IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT5 = 0x0C, IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT6 = 0x0D, IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT7 = 0x0E, IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT8 = 0x0F, /** GENERIC LONG PACKET DATA TYPES */ IPU_FW_ISYS_MIPI_DATA_TYPE_NULL = 0x10, IPU_FW_ISYS_MIPI_DATA_TYPE_BLANKING_DATA = 0x11, /* Embedded 8-bit non Image Data */ IPU_FW_ISYS_MIPI_DATA_TYPE_EMBEDDED = 0x12, /** Reserved 0x13-0x17 */ IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x13 = 0x13, IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x14 = 0x14, IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x15 = 0x15, IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x16 = 0x16, IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x17 = 0x17, /** YUV DATA TYPES */ /* 8 bits per subpixel */ IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_8 = 0x18, /* 10 bits per subpixel */ IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_10 = 0x19, /* 8 bits per subpixel */ IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_8_LEGACY = 0x1A, /** Reserved 0x1B */ IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x1B = 0x1B, /* YUV420 8-bit Chroma Shifted Pixel Sampling) */ IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_8_SHIFT = 0x1C, /* YUV420 8-bit (Chroma Shifted Pixel Sampling) */ IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_10_SHIFT = 0x1D, /* UYVY..UVYV, 8 bits per subpixel */ IPU_FW_ISYS_MIPI_DATA_TYPE_YUV422_8 = 0x1E, /* UYVY..UVYV, 10 bits per subpixel */ IPU_FW_ISYS_MIPI_DATA_TYPE_YUV422_10 = 0x1F, /** RGB DATA TYPES */ /* BGR..BGR, 4 bits per subpixel */ IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_444 = 0x20, /* BGR..BGR, 5 bits per subpixel */ IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_555 = 0x21, /* BGR..BGR, 5 bits B and R, 6 bits G */ IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_565 = 0x22, /* BGR..BGR, 6 bits per subpixel */ IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_666 = 0x23, /* BGR..BGR, 8 bits per subpixel */ IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_888 = 0x24, /** Reserved 0x25-0x27 */ IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x25 = 0x25, IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x26 = 0x26, IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x27 = 0x27, /** RAW DATA TYPES */ /* RAW data, 6 - 14 bits per pixel */ IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_6 = 0x28, IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_7 = 0x29, IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_8 = 0x2A, IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_10 = 0x2B, IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_12 = 0x2C, IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_14 = 0x2D, /** Reserved 0x2E-2F are used with assigned meaning */ /* RAW data, 16 bits per pixel, not specified in CSI-MIPI standard */ IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_16 = 0x2E, /* Binary byte stream, which is target at JPEG, * not specified in CSI-MIPI standard */ IPU_FW_ISYS_MIPI_DATA_TYPE_BINARY_8 = 0x2F, /** USER DEFINED 8-BIT DATA TYPES */ /** For example, the data transmitter (e.g. the SoC sensor) * can keep the JPEG data as * the User Defined Data Type 4 and the MPEG data as the * User Defined Data Type 7. */ IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF1 = 0x30, IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF2 = 0x31, IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF3 = 0x32, IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF4 = 0x33, IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF5 = 0x34, IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF6 = 0x35, IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF7 = 0x36, IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF8 = 0x37, /** Reserved 0x38-0x3F */ IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x38 = 0x38, IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x39 = 0x39, IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x3A = 0x3A, IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x3B = 0x3B, IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x3C = 0x3C, IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x3D = 0x3D, IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x3E = 0x3E, IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x3F = 0x3F, /* Keep always last and max value */ N_IPU_FW_ISYS_MIPI_DATA_TYPE = 0x40 }; /** enum ipu_fw_isys_pin_type: output pin buffer types. * Buffers can be queued and de-queued to hand them over between IA and ISYS */ enum ipu_fw_isys_pin_type { /* Captured as MIPI packets */ IPU_FW_ISYS_PIN_TYPE_MIPI = 0, /* Captured through the RAW path */ IPU_FW_ISYS_PIN_TYPE_RAW_NS = 1, /* Captured through the SoC path */ IPU_FW_ISYS_PIN_TYPE_RAW_SOC = 3, /* Reserved for future use, maybe short packets */ IPU_FW_ISYS_PIN_TYPE_METADATA_0 = 4, /* Reserved for future use */ IPU_FW_ISYS_PIN_TYPE_METADATA_1 = 5, /* Keep always last and max value */ N_IPU_FW_ISYS_PIN_TYPE }; /** * enum ipu_fw_isys_mipi_store_mode. Describes if long MIPI packets reach * MIPI SRAM with the long packet header or * if not, then only option is to capture it with pin type MIPI. */ enum ipu_fw_isys_mipi_store_mode { IPU_FW_ISYS_MIPI_STORE_MODE_NORMAL = 0, IPU_FW_ISYS_MIPI_STORE_MODE_DISCARD_LONG_HEADER, N_IPU_FW_ISYS_MIPI_STORE_MODE }; /** * ISYS capture mode and sensor enums * Used for Tobii sensor, if doubt, use default value 0 */ enum ipu_fw_isys_capture_mode { IPU_FW_ISYS_CAPTURE_MODE_REGULAR = 0, IPU_FW_ISYS_CAPTURE_MODE_BURST, N_IPU_FW_ISYS_CAPTURE_MODE, }; enum ipu_fw_isys_sensor_mode { IPU_FW_ISYS_SENSOR_MODE_NORMAL = 0, IPU_FW_ISYS_SENSOR_MODE_TOBII, N_IPU_FW_ISYS_SENSOR_MODE, }; /** * enum ipu_fw_isys_error. Describes the error type detected by the FW */ enum ipu_fw_isys_error { IPU_FW_ISYS_ERROR_NONE = 0, /* No details */ IPU_FW_ISYS_ERROR_FW_INTERNAL_CONSISTENCY, /* enum */ IPU_FW_ISYS_ERROR_HW_CONSISTENCY, /* enum */ IPU_FW_ISYS_ERROR_DRIVER_INVALID_COMMAND_SEQUENCE, /* enum */ IPU_FW_ISYS_ERROR_DRIVER_INVALID_DEVICE_CONFIGURATION, /* enum */ IPU_FW_ISYS_ERROR_DRIVER_INVALID_STREAM_CONFIGURATION, /* enum */ IPU_FW_ISYS_ERROR_DRIVER_INVALID_FRAME_CONFIGURATION, /* enum */ IPU_FW_ISYS_ERROR_INSUFFICIENT_RESOURCES, /* enum */ IPU_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO, /* HW code */ IPU_FW_ISYS_ERROR_HW_REPORTED_SIG2CIO, /* HW code */ IPU_FW_ISYS_ERROR_SENSOR_FW_SYNC, /* enum */ IPU_FW_ISYS_ERROR_STREAM_IN_SUSPENSION, /* FW code */ IPU_FW_ISYS_ERROR_RESPONSE_QUEUE_FULL, /* FW code */ N_IPU_FW_ISYS_ERROR }; /** * enum ipu_fw_proxy_error. Describes the error type for * the proxy detected by the FW */ enum ipu_fw_proxy_error { IPU_FW_PROXY_ERROR_NONE = 0, IPU_FW_PROXY_ERROR_INVALID_WRITE_REGION, IPU_FW_PROXY_ERROR_INVALID_WRITE_OFFSET, N_IPU_FW_PROXY_ERROR }; struct ipu_isys; struct ipu6_fw_isys_buffer_partition_abi { u32 num_gda_pages[IPU6_STREAM_ID_MAX]; }; struct ipu6_fw_isys_fw_config { struct ipu6_fw_isys_buffer_partition_abi buffer_partition; u32 num_send_queues[N_IPU_FW_ISYS_QUEUE_TYPE]; u32 num_recv_queues[N_IPU_FW_ISYS_QUEUE_TYPE]; }; /** * struct ipu_fw_isys_resolution_abi: Generic resolution structure. * @Width * @Height */ struct ipu_fw_isys_resolution_abi { u32 width; u32 height; }; /** * struct ipu_fw_isys_output_pin_payload_abi * @out_buf_id: Points to output pin buffer - buffer identifier * @addr: Points to output pin buffer - CSS Virtual Address * @compress: Request frame compression (1), or not (0) */ struct ipu_fw_isys_output_pin_payload_abi { u64 out_buf_id; u32 addr; u32 compress; }; /** * struct ipu_fw_isys_output_pin_info_abi * @output_res: output pin resolution * @stride: output stride in Bytes (not valid for statistics) * @watermark_in_lines: pin watermark level in lines * @payload_buf_size: minimum size in Bytes of all buffers that will be * supplied for capture on this pin * @send_irq: assert if pin event should trigger irq * @pt: pin type -real format "enum ipu_fw_isys_pin_type" * @ft: frame format type -real format "enum ipu_fw_isys_frame_format_type" * @input_pin_id: related input pin id * @reserve_compression: reserve compression resources for pin */ struct ipu_fw_isys_output_pin_info_abi { struct ipu_fw_isys_resolution_abi output_res; u32 stride; u32 watermark_in_lines; u32 payload_buf_size; u32 ts_offsets[IPU_PIN_PLANES_MAX]; u32 s2m_pixel_soc_pixel_remapping; u32 csi_be_soc_pixel_remapping; u8 send_irq; u8 input_pin_id; u8 pt; u8 ft; u8 reserved; u8 reserve_compression; u8 snoopable; u8 error_handling_enable; u32 sensor_type; }; /** * struct ipu_fw_isys_param_pin_abi * @param_buf_id: Points to param port buffer - buffer identifier * @addr: Points to param pin buffer - CSS Virtual Address */ struct ipu_fw_isys_param_pin_abi { u64 param_buf_id; u32 addr; }; /** * struct ipu_fw_isys_input_pin_info_abi * @input_res: input resolution * @dt: mipi data type ((enum ipu_fw_isys_mipi_data_type) * @mipi_store_mode: defines if legacy long packet header will be stored or * discarded if discarded, output pin type for this * input pin can only be MIPI * (enum ipu_fw_isys_mipi_store_mode) * @bits_per_pix: native bits per pixel * @mapped_dt: actual data type from sensor * @mipi_decompression: defines which compression will be in mipi backend * @crop_first_and_last_lines Control whether to crop the * first and last line of the * input image. Crop done by HW * device. * @capture_mode: mode of capture, regular or burst, default value is regular */ struct ipu_fw_isys_input_pin_info_abi { struct ipu_fw_isys_resolution_abi input_res; u8 dt; u8 mipi_store_mode; u8 bits_per_pix; u8 mapped_dt; u8 mipi_decompression; u8 crop_first_and_last_lines; u8 capture_mode; }; /** * struct ipu_fw_isys_cropping_abi - cropping coordinates */ struct ipu_fw_isys_cropping_abi { s32 top_offset; s32 left_offset; s32 bottom_offset; s32 right_offset; }; /** * struct ipu_fw_isys_stream_cfg_data_abi * ISYS stream configuration data structure * @crop: defines cropping resolution for the * maximum number of input pins which can be cropped, * it is directly mapped to the HW devices * @input_pins: input pin descriptors * @output_pins: output pin descriptors * @compfmt: de-compression setting for User Defined Data * @nof_input_pins: number of input pins * @nof_output_pins: number of output pins * @send_irq_sof_discarded: send irq on discarded frame sof response * - if '1' it will override the send_resp_sof_discarded * and send the response * - if '0' the send_resp_sof_discarded will determine * whether to send the response * @send_irq_eof_discarded: send irq on discarded frame eof response * - if '1' it will override the send_resp_eof_discarded * and send the response * - if '0' the send_resp_eof_discarded will determine * whether to send the response * @send_resp_sof_discarded: send response for discarded frame sof detected, * used only when send_irq_sof_discarded is '0' * @send_resp_eof_discarded: send response for discarded frame eof detected, * used only when send_irq_eof_discarded is '0' * @src: Stream source index e.g. MIPI_generator_0, CSI2-rx_1 * @vc: MIPI Virtual Channel (up to 4 virtual per physical channel) * @isl_use: indicates whether stream requires ISL and how * @sensor_type: type of connected sensor, tobii or others, default is 0 */ struct ipu_fw_isys_stream_cfg_data_abi { struct ipu_fw_isys_cropping_abi crop; struct ipu_fw_isys_input_pin_info_abi input_pins[IPU_MAX_IPINS]; struct ipu_fw_isys_output_pin_info_abi output_pins[IPU_MAX_OPINS]; u32 compfmt; u8 nof_input_pins; u8 nof_output_pins; u8 send_irq_sof_discarded; u8 send_irq_eof_discarded; u8 send_resp_sof_discarded; u8 send_resp_eof_discarded; u8 src; u8 vc; u8 isl_use; u8 sensor_type; }; /** * struct ipu_fw_isys_frame_buff_set - frame buffer set * @output_pins: output pin addresses * @send_irq_sof: send irq on frame sof response * - if '1' it will override the send_resp_sof and * send the response * - if '0' the send_resp_sof will determine whether to * send the response * @send_irq_eof: send irq on frame eof response * - if '1' it will override the send_resp_eof and * send the response * - if '0' the send_resp_eof will determine whether to * send the response * @send_resp_sof: send response for frame sof detected, * used only when send_irq_sof is '0' * @send_resp_eof: send response for frame eof detected, * used only when send_irq_eof is '0' * @send_resp_capture_ack: send response for capture ack event * @send_resp_capture_done: send response for capture done event */ struct ipu_fw_isys_frame_buff_set_abi { struct ipu_fw_isys_output_pin_payload_abi output_pins[IPU_MAX_OPINS]; u8 send_irq_sof; u8 send_irq_eof; u8 send_irq_capture_ack; u8 send_irq_capture_done; u8 send_resp_sof; u8 send_resp_eof; u8 send_resp_capture_ack; u8 send_resp_capture_done; u8 reserved; }; /** * struct ipu_fw_isys_error_info_abi * @error: error code if something went wrong * @error_details: depending on error code, it may contain additional error info */ struct ipu_fw_isys_error_info_abi { enum ipu_fw_isys_error error; u32 error_details; }; /** * struct ipu_fw_isys_resp_info_comm * @pin: this var is only valid for pin event related responses, * contains pin addresses * @error_info: error information from the FW * @timestamp: Time information for event if available * @stream_handle: stream id the response corresponds to * @type: response type (enum ipu_fw_isys_resp_type) * @pin_id: pin id that the pin payload corresponds to */ struct ipu_fw_isys_resp_info_abi { u64 buf_id; struct ipu_fw_isys_output_pin_payload_abi pin; struct ipu_fw_isys_error_info_abi error_info; u32 timestamp[2]; u8 stream_handle; u8 type; u8 pin_id; u16 reserved; }; /** * struct ipu_fw_isys_proxy_error_info_comm * @proxy_error: error code if something went wrong * @proxy_error_details: depending on error code, it may contain additional * error info */ struct ipu_fw_isys_proxy_error_info_abi { enum ipu_fw_proxy_error error; u32 error_details; }; struct ipu_fw_isys_proxy_resp_info_abi { u32 request_id; struct ipu_fw_isys_proxy_error_info_abi error_info; }; /** * struct ipu_fw_proxy_write_queue_token * @request_id: update id for the specific proxy write request * @region_index: Region id for the proxy write request * @offset: Offset of the write request according to the base address * of the region * @value: Value that is requested to be written with the proxy write request */ struct ipu_fw_proxy_write_queue_token { u32 request_id; u32 region_index; u32 offset; u32 value; }; /* From here on type defines not coming from the ISYSAPI interface */ /** * struct ipu_fw_resp_queue_token */ struct ipu_fw_resp_queue_token { struct ipu_fw_isys_resp_info_abi resp_info; }; /** * struct ipu_fw_send_queue_token */ struct ipu_fw_send_queue_token { u64 buf_handle; u32 payload; u16 send_type; u16 stream_id; }; /** * struct ipu_fw_proxy_resp_queue_token */ struct ipu_fw_proxy_resp_queue_token { struct ipu_fw_isys_proxy_resp_info_abi proxy_resp_info; }; /** * struct ipu_fw_proxy_send_queue_token */ struct ipu_fw_proxy_send_queue_token { u32 request_id; u32 region_index; u32 offset; u32 value; }; void ipu_fw_isys_set_params(struct ipu_fw_isys_stream_cfg_data_abi *stream_cfg); void ipu_fw_isys_dump_stream_cfg(struct device *dev, struct ipu_fw_isys_stream_cfg_data_abi *stream_cfg); void ipu_fw_isys_dump_frame_buff_set(struct device *dev, struct ipu_fw_isys_frame_buff_set_abi *buf, unsigned int outputs); int ipu_fw_isys_init(struct ipu_isys *isys, unsigned int num_streams); int ipu_fw_isys_close(struct ipu_isys *isys); int ipu_fw_isys_simple_cmd(struct ipu_isys *isys, const unsigned int stream_handle, enum ipu_fw_isys_send_type send_type); int ipu_fw_isys_complex_cmd(struct ipu_isys *isys, const unsigned int stream_handle, void *cpu_mapped_buf, dma_addr_t dma_mapped_buf, size_t size, enum ipu_fw_isys_send_type send_type); int ipu_fw_isys_send_proxy_token(struct ipu_isys *isys, unsigned int req_id, unsigned int index, unsigned int offset, u32 value); void ipu_fw_isys_cleanup(struct ipu_isys *isys); struct ipu_fw_isys_resp_info_abi * ipu_fw_isys_get_resp(void *context, unsigned int queue, struct ipu_fw_isys_resp_info_abi *response); void ipu_fw_isys_put_resp(void *context, unsigned int queue); #endif ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu-isys-csi2-be-soc.c000066400000000000000000000257431471702545500273010ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2014 - 2024 Intel Corporation #include #include #include #include #include #include "ipu.h" #include "ipu-bus.h" #include "ipu-isys.h" #include "ipu-isys-csi2-be.h" #include "ipu-isys-subdev.h" #include "ipu-isys-video.h" /* * Raw bayer format pixel order MUST BE MAINTAINED in groups of four codes. * Otherwise pixel order calculation below WILL BREAK! */ static const u32 csi2_be_soc_supported_codes_pad[] = { MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_RGB565_1X16, MEDIA_BUS_FMT_RGB888_1X24, MEDIA_BUS_FMT_UYVY8_1X16, MEDIA_BUS_FMT_YUYV8_1X16, MEDIA_BUS_FMT_VYUY8_1X16, MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_Y8_1X8, 0, }; /* * Raw bayer format pixel order MUST BE MAINTAINED in groups of four codes. * Otherwise pixel order calculation below WILL BREAK! */ static const u32 csi2_be_soc_supported_raw_bayer_codes_pad[] = { MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_Y8_1X8, 0, }; static int get_supported_code_index(u32 code) { int i; for (i = 0; csi2_be_soc_supported_raw_bayer_codes_pad[i]; i++) { if (csi2_be_soc_supported_raw_bayer_codes_pad[i] == code) return i; } return -EINVAL; } static const u32 *csi2_be_soc_supported_codes[NR_OF_CSI2_BE_SOC_PADS]; static struct v4l2_subdev_internal_ops csi2_be_soc_sd_internal_ops = { .open = ipu_isys_subdev_open, .close = ipu_isys_subdev_close, }; static const struct v4l2_subdev_core_ops csi2_be_soc_sd_core_ops = { }; static const struct v4l2_ctrl_config compression_ctrl_cfg = { .ops = NULL, .id = V4L2_CID_IPU_ISYS_COMPRESSION, .name = "ISYS BE-SOC compression", .type = V4L2_CTRL_TYPE_BOOLEAN, .min = 0, .max = 1, .step = 1, .def = 0, }; static int set_stream(struct v4l2_subdev *sd, int enable) { return 0; } static const struct v4l2_subdev_video_ops csi2_be_soc_sd_video_ops = { .s_stream = set_stream, }; static int __subdev_link_validate(struct v4l2_subdev *sd, struct media_link *link, struct v4l2_subdev_format *source_fmt, struct v4l2_subdev_format *sink_fmt) { struct media_pipeline *mp = media_entity_pipeline(&sd->entity); struct ipu_isys_pipeline *ip = container_of(mp, struct ipu_isys_pipeline, pipe); ip->csi2_be_soc = to_ipu_isys_csi2_be_soc(sd); return ipu_isys_subdev_link_validate(sd, link, source_fmt, sink_fmt); } static int ipu_isys_csi2_be_soc_set_sel(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *state, #endif struct v4l2_subdev_selection *sel) { struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); struct media_pad *pad = &asd->sd.entity.pads[sel->pad]; if (sel->target == V4L2_SEL_TGT_CROP && pad->flags & MEDIA_PAD_FL_SOURCE && asd->valid_tgts[sel->pad].crop) { enum isys_subdev_prop_tgt tgt = IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_CROP; #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_mbus_framefmt *ffmt = __ipu_isys_get_ffmt(sd, cfg, sel->pad, sel->which); #else struct v4l2_mbus_framefmt *ffmt = __ipu_isys_get_ffmt(sd, state, sel->pad, sel->which); #endif if (get_supported_code_index(ffmt->code) < 0) { /* Non-bayer formats can't be odd lines cropped */ sel->r.left &= ~1; sel->r.top &= ~1; } sel->r.width = clamp(sel->r.width, IPU_ISYS_MIN_WIDTH, IPU_ISYS_MAX_WIDTH); sel->r.height = clamp(sel->r.height, IPU_ISYS_MIN_HEIGHT, IPU_ISYS_MAX_HEIGHT); #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) *__ipu_isys_get_selection(sd, cfg, sel->target, sel->pad, sel->which) = sel->r; ipu_isys_subdev_fmt_propagate(sd, cfg, NULL, &sel->r, tgt, sel->pad, sel->which); #else *__ipu_isys_get_selection(sd, state, sel->target, sel->pad, sel->which) = sel->r; ipu_isys_subdev_fmt_propagate(sd, state, NULL, &sel->r, tgt, sel->pad, sel->which); #endif return 0; } return -EINVAL; } static const struct v4l2_subdev_pad_ops csi2_be_soc_sd_pad_ops = { .link_validate = __subdev_link_validate, .get_fmt = ipu_isys_subdev_get_ffmt, .set_fmt = ipu_isys_subdev_set_ffmt, .get_selection = ipu_isys_subdev_get_sel, .set_selection = ipu_isys_csi2_be_soc_set_sel, .enum_mbus_code = ipu_isys_subdev_enum_mbus_code, }; static struct v4l2_subdev_ops csi2_be_soc_sd_ops = { .core = &csi2_be_soc_sd_core_ops, .video = &csi2_be_soc_sd_video_ops, .pad = &csi2_be_soc_sd_pad_ops, }; static struct media_entity_operations csi2_be_soc_entity_ops = { .link_validate = v4l2_subdev_link_validate, }; static void csi2_be_soc_set_ffmt(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) struct v4l2_subdev_fh *cfg, #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *state, #endif struct v4l2_subdev_format *fmt) { #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_mbus_framefmt *ffmt = __ipu_isys_get_ffmt(sd, cfg, fmt->pad, fmt->which); #else struct v4l2_mbus_framefmt *ffmt = __ipu_isys_get_ffmt(sd, state, fmt->pad, fmt->which); #endif if (sd->entity.pads[fmt->pad].flags & MEDIA_PAD_FL_SINK) { if (fmt->format.field != V4L2_FIELD_ALTERNATE) fmt->format.field = V4L2_FIELD_NONE; *ffmt = fmt->format; #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) ipu_isys_subdev_fmt_propagate(sd, cfg, &fmt->format, NULL, IPU_ISYS_SUBDEV_PROP_TGT_SINK_FMT, fmt->pad, fmt->which); #else ipu_isys_subdev_fmt_propagate(sd, state, &fmt->format, NULL, IPU_ISYS_SUBDEV_PROP_TGT_SINK_FMT, fmt->pad, fmt->which); #endif } else if (sd->entity.pads[fmt->pad].flags & MEDIA_PAD_FL_SOURCE) { struct v4l2_mbus_framefmt *sink_ffmt; #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_rect *r = __ipu_isys_get_selection(sd, cfg, V4L2_SEL_TGT_CROP, fmt->pad, fmt->which); #else struct v4l2_rect *r = __ipu_isys_get_selection(sd, state, V4L2_SEL_TGT_CROP, fmt->pad, fmt->which); #endif struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); u32 code; int idx; #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) sink_ffmt = __ipu_isys_get_ffmt(sd, cfg, 0, fmt->which); #else sink_ffmt = __ipu_isys_get_ffmt(sd, state, 0, fmt->which); #endif code = sink_ffmt->code; idx = get_supported_code_index(code); if (asd->valid_tgts[fmt->pad].crop && idx >= 0) { int crop_info = 0; /* Only croping odd line at top side. */ if (r->top & 1) crop_info |= CSI2_BE_CROP_VER; code = csi2_be_soc_supported_raw_bayer_codes_pad [((idx & CSI2_BE_CROP_MASK) ^ crop_info) + (idx & ~CSI2_BE_CROP_MASK)]; } ffmt->code = code; ffmt->width = r->width; ffmt->height = r->height; ffmt->field = sink_ffmt->field; } } void ipu_isys_csi2_be_soc_cleanup(struct ipu_isys_csi2_be_soc *csi2_be_soc) { v4l2_device_unregister_subdev(&csi2_be_soc->asd.sd); ipu_isys_subdev_cleanup(&csi2_be_soc->asd); v4l2_ctrl_handler_free(&csi2_be_soc->av.ctrl_handler); ipu_isys_video_cleanup(&csi2_be_soc->av); } int ipu_isys_csi2_be_soc_init(struct ipu_isys_csi2_be_soc *csi2_be_soc, struct ipu_isys *isys, int index) { struct v4l2_subdev_format fmt = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, .pad = CSI2_BE_SOC_PAD_SINK, .format = { .width = 4096, .height = 3072, }, }; int rval, i; csi2_be_soc->asd.sd.entity.ops = &csi2_be_soc_entity_ops; csi2_be_soc->asd.isys = isys; rval = ipu_isys_subdev_init(&csi2_be_soc->asd, &csi2_be_soc_sd_ops, 0, NR_OF_CSI2_BE_SOC_PADS, NR_OF_CSI2_BE_SOC_SOURCE_PADS, NR_OF_CSI2_BE_SOC_SINK_PADS, 0, CSI2_BE_SOC_PAD_SOURCE, CSI2_BE_SOC_PAD_SINK); if (rval) goto fail; csi2_be_soc->asd.valid_tgts[CSI2_BE_SOC_PAD_SOURCE].crop = true; for (i = 0; i < NR_OF_CSI2_BE_SOC_PADS; i++) csi2_be_soc_supported_codes[i] = csi2_be_soc_supported_codes_pad; csi2_be_soc->asd.supported_codes = csi2_be_soc_supported_codes; csi2_be_soc->asd.be_mode = IPU_BE_SOC; csi2_be_soc->asd.isl_mode = IPU_ISL_OFF; csi2_be_soc->asd.set_ffmt = csi2_be_soc_set_ffmt; fmt.pad = CSI2_BE_SOC_PAD_SINK; ipu_isys_subdev_set_ffmt(&csi2_be_soc->asd.sd, NULL, &fmt); fmt.pad = CSI2_BE_SOC_PAD_SOURCE; ipu_isys_subdev_set_ffmt(&csi2_be_soc->asd.sd, NULL, &fmt); csi2_be_soc->asd.sd.internal_ops = &csi2_be_soc_sd_internal_ops; snprintf(csi2_be_soc->asd.sd.name, sizeof(csi2_be_soc->asd.sd.name), IPU_ISYS_ENTITY_PREFIX " CSI2 BE SOC %d", index); v4l2_set_subdevdata(&csi2_be_soc->asd.sd, &csi2_be_soc->asd); rval = v4l2_device_register_subdev(&isys->v4l2_dev, &csi2_be_soc->asd.sd); if (rval) { dev_info(&isys->adev->dev, "can't register v4l2 subdev\n"); goto fail; } snprintf(csi2_be_soc->av.vdev.name, sizeof(csi2_be_soc->av.vdev.name), IPU_ISYS_ENTITY_PREFIX " BE SOC capture %d", index); /* * Pin type could be overwritten for YUV422 to I420 case, at * set_format phase */ csi2_be_soc->av.aq.css_pin_type = IPU_FW_ISYS_PIN_TYPE_RAW_SOC; csi2_be_soc->av.isys = isys; csi2_be_soc->av.pfmts = ipu_isys_pfmts_be_soc; csi2_be_soc->av.try_fmt_vid_mplane = ipu_isys_video_try_fmt_vid_mplane_default; csi2_be_soc->av.prepare_fw_stream = ipu_isys_prepare_fw_cfg_default; csi2_be_soc->av.aq.buf_prepare = ipu_isys_buf_prepare; csi2_be_soc->av.aq.fill_frame_buff_set_pin = ipu_isys_buffer_to_fw_frame_buff_pin; csi2_be_soc->av.aq.link_fmt_validate = ipu_isys_link_fmt_validate; csi2_be_soc->av.aq.vbq.buf_struct_size = sizeof(struct ipu_isys_video_buffer); /* create v4l2 ctrl for be-soc video node */ rval = v4l2_ctrl_handler_init(&csi2_be_soc->av.ctrl_handler, 0); if (rval) { dev_err(&isys->adev->dev, "failed to init v4l2 ctrl handler for be_soc\n"); goto fail; } csi2_be_soc->av.compression_ctrl = v4l2_ctrl_new_custom(&csi2_be_soc->av.ctrl_handler, &compression_ctrl_cfg, NULL); if (!csi2_be_soc->av.compression_ctrl) { dev_err(&isys->adev->dev, "failed to create BE-SOC cmprs ctrl\n"); goto fail; } csi2_be_soc->av.compression = 0; csi2_be_soc->av.vdev.ctrl_handler = &csi2_be_soc->av.ctrl_handler; rval = ipu_isys_video_init(&csi2_be_soc->av, &csi2_be_soc->asd.sd.entity, CSI2_BE_SOC_PAD_SOURCE, MEDIA_PAD_FL_SINK, MEDIA_LNK_FL_DYNAMIC); if (rval) { dev_info(&isys->adev->dev, "can't init video node\n"); goto fail; } return 0; fail: ipu_isys_csi2_be_soc_cleanup(csi2_be_soc); return rval; } ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu-isys-csi2-be.c000066400000000000000000000255361471702545500265170ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2014 - 2024 Intel Corporation #include #include #include #include #include #include "ipu.h" #include "ipu-bus.h" #include "ipu-isys.h" #include "ipu-isys-csi2-be.h" #include "ipu-isys-subdev.h" #include "ipu-isys-video.h" /* * Raw bayer format pixel order MUST BE MAINTAINED in groups of four codes. * Otherwise pixel order calculation below WILL BREAK! */ static const u32 csi2_be_supported_codes_pad[] = { MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8, 0, }; static const u32 *csi2_be_supported_codes[] = { csi2_be_supported_codes_pad, csi2_be_supported_codes_pad, }; static struct v4l2_subdev_internal_ops csi2_be_sd_internal_ops = { .open = ipu_isys_subdev_open, .close = ipu_isys_subdev_close, }; static const struct v4l2_subdev_core_ops csi2_be_sd_core_ops = { }; static const struct v4l2_ctrl_config compression_ctrl_cfg = { .ops = NULL, .id = V4L2_CID_IPU_ISYS_COMPRESSION, .name = "ISYS CSI-BE compression", .type = V4L2_CTRL_TYPE_BOOLEAN, .min = 0, .max = 1, .step = 1, .def = 0, }; static int set_stream(struct v4l2_subdev *sd, int enable) { return 0; } static const struct v4l2_subdev_video_ops csi2_be_sd_video_ops = { .s_stream = set_stream, }; static int __subdev_link_validate(struct v4l2_subdev *sd, struct media_link *link, struct v4l2_subdev_format *source_fmt, struct v4l2_subdev_format *sink_fmt) { struct media_pipeline *mp = media_entity_pipeline(&sd->entity); struct ipu_isys_pipeline *ip = container_of(mp, struct ipu_isys_pipeline, pipe); ip->csi2_be = to_ipu_isys_csi2_be(sd); return ipu_isys_subdev_link_validate(sd, link, source_fmt, sink_fmt); } static int get_supported_code_index(u32 code) { int i; for (i = 0; csi2_be_supported_codes_pad[i]; i++) { if (csi2_be_supported_codes_pad[i] == code) return i; } return -EINVAL; } static int ipu_isys_csi2_be_set_sel(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *state, #endif struct v4l2_subdev_selection *sel) { struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); struct media_pad *pad = &asd->sd.entity.pads[sel->pad]; if (sel->target == V4L2_SEL_TGT_CROP && pad->flags & MEDIA_PAD_FL_SOURCE && asd->valid_tgts[CSI2_BE_PAD_SOURCE].crop) { #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_mbus_framefmt *ffmt = __ipu_isys_get_ffmt(sd, cfg, sel->pad, sel->which); struct v4l2_rect *r = __ipu_isys_get_selection (sd, cfg, sel->target, CSI2_BE_PAD_SINK, sel->which); #else struct v4l2_mbus_framefmt *ffmt = __ipu_isys_get_ffmt(sd, state, sel->pad, sel->which); struct v4l2_rect *r = __ipu_isys_get_selection (sd, state, sel->target, CSI2_BE_PAD_SINK, sel->which); #endif if (get_supported_code_index(ffmt->code) < 0) { /* Non-bayer formats can't be single line cropped */ sel->r.left &= ~1; sel->r.top &= ~1; /* Non-bayer formats can't pe padded at all */ sel->r.width = clamp(sel->r.width, IPU_ISYS_MIN_WIDTH, r->width); } else { sel->r.width = clamp(sel->r.width, IPU_ISYS_MIN_WIDTH, IPU_ISYS_MAX_WIDTH); } /* * Vertical padding is not supported, height is * restricted by sink pad resolution. */ sel->r.height = clamp(sel->r.height, IPU_ISYS_MIN_HEIGHT, r->height); #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) *__ipu_isys_get_selection(sd, cfg, sel->target, sel->pad, sel->which) = sel->r; ipu_isys_subdev_fmt_propagate (sd, cfg, NULL, &sel->r, IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_CROP, sel->pad, sel->which); #else *__ipu_isys_get_selection(sd, state, sel->target, sel->pad, sel->which) = sel->r; ipu_isys_subdev_fmt_propagate (sd, state, NULL, &sel->r, IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_CROP, sel->pad, sel->which); #endif return 0; } #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) return ipu_isys_subdev_set_sel(sd, cfg, sel); #else return ipu_isys_subdev_set_sel(sd, state, sel); #endif } static const struct v4l2_subdev_pad_ops csi2_be_sd_pad_ops = { .link_validate = __subdev_link_validate, .get_fmt = ipu_isys_subdev_get_ffmt, .set_fmt = ipu_isys_subdev_set_ffmt, .get_selection = ipu_isys_subdev_get_sel, .set_selection = ipu_isys_csi2_be_set_sel, .enum_mbus_code = ipu_isys_subdev_enum_mbus_code, }; static struct v4l2_subdev_ops csi2_be_sd_ops = { .core = &csi2_be_sd_core_ops, .video = &csi2_be_sd_video_ops, .pad = &csi2_be_sd_pad_ops, }; static struct media_entity_operations csi2_be_entity_ops = { .link_validate = v4l2_subdev_link_validate, }; #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) static void csi2_be_set_ffmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *cfg, struct v4l2_subdev_format *fmt) #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) static void csi2_be_set_ffmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_format *fmt) #else static void csi2_be_set_ffmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_format *fmt) #endif { struct ipu_isys_csi2 *csi2 = to_ipu_isys_csi2(sd); #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_mbus_framefmt *ffmt = __ipu_isys_get_ffmt(sd, cfg, fmt->pad, fmt->which); #else struct v4l2_mbus_framefmt *ffmt = __ipu_isys_get_ffmt(sd, state, fmt->pad, fmt->which); #endif switch (fmt->pad) { case CSI2_BE_PAD_SINK: if (fmt->format.field != V4L2_FIELD_ALTERNATE) fmt->format.field = V4L2_FIELD_NONE; *ffmt = fmt->format; #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) ipu_isys_subdev_fmt_propagate (sd, cfg, &fmt->format, NULL, IPU_ISYS_SUBDEV_PROP_TGT_SINK_FMT, fmt->pad, fmt->which); #else ipu_isys_subdev_fmt_propagate (sd, state, &fmt->format, NULL, IPU_ISYS_SUBDEV_PROP_TGT_SINK_FMT, fmt->pad, fmt->which); #endif return; case CSI2_BE_PAD_SOURCE: { #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_mbus_framefmt *sink_ffmt = __ipu_isys_get_ffmt(sd, cfg, CSI2_BE_PAD_SINK, fmt->which); struct v4l2_rect *r = __ipu_isys_get_selection(sd, cfg, V4L2_SEL_TGT_CROP, CSI2_BE_PAD_SOURCE, fmt->which); #else struct v4l2_mbus_framefmt *sink_ffmt = __ipu_isys_get_ffmt(sd, state, CSI2_BE_PAD_SINK, fmt->which); struct v4l2_rect *r = __ipu_isys_get_selection(sd, state, V4L2_SEL_TGT_CROP, CSI2_BE_PAD_SOURCE, fmt->which); #endif struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); u32 code = sink_ffmt->code; int idx = get_supported_code_index(code); if (asd->valid_tgts[CSI2_BE_PAD_SOURCE].crop && idx >= 0) { int crop_info = 0; if (r->top & 1) crop_info |= CSI2_BE_CROP_VER; if (r->left & 1) crop_info |= CSI2_BE_CROP_HOR; code = csi2_be_supported_codes_pad [((idx & CSI2_BE_CROP_MASK) ^ crop_info) + (idx & ~CSI2_BE_CROP_MASK)]; } ffmt->width = r->width; ffmt->height = r->height; ffmt->code = code; ffmt->field = sink_ffmt->field; return; } default: dev_err(&csi2->isys->adev->dev, "Unknown pad type\n"); WARN_ON(1); } } void ipu_isys_csi2_be_cleanup(struct ipu_isys_csi2_be *csi2_be) { v4l2_ctrl_handler_free(&csi2_be->av.ctrl_handler); v4l2_device_unregister_subdev(&csi2_be->asd.sd); ipu_isys_subdev_cleanup(&csi2_be->asd); ipu_isys_video_cleanup(&csi2_be->av); } int ipu_isys_csi2_be_init(struct ipu_isys_csi2_be *csi2_be, struct ipu_isys *isys) { struct v4l2_subdev_format fmt = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, .pad = CSI2_BE_PAD_SINK, .format = { .width = 4096, .height = 3072, }, }; struct v4l2_subdev_selection sel = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, .pad = CSI2_BE_PAD_SOURCE, .target = V4L2_SEL_TGT_CROP, .r = { .width = fmt.format.width, .height = fmt.format.height, }, }; int rval; csi2_be->asd.sd.entity.ops = &csi2_be_entity_ops; csi2_be->asd.isys = isys; rval = ipu_isys_subdev_init(&csi2_be->asd, &csi2_be_sd_ops, 0, NR_OF_CSI2_BE_PADS, NR_OF_CSI2_BE_SOURCE_PADS, NR_OF_CSI2_BE_SINK_PADS, 0, CSI2_BE_PAD_SOURCE, CSI2_BE_PAD_SINK); if (rval) goto fail; csi2_be->asd.pad[CSI2_BE_PAD_SINK].flags |= MEDIA_PAD_FL_MUST_CONNECT; csi2_be->asd.valid_tgts[CSI2_BE_PAD_SOURCE].crop = true; csi2_be->asd.set_ffmt = csi2_be_set_ffmt; BUILD_BUG_ON(ARRAY_SIZE(csi2_be_supported_codes) != NR_OF_CSI2_BE_PADS); csi2_be->asd.supported_codes = csi2_be_supported_codes; csi2_be->asd.be_mode = IPU_BE_RAW; csi2_be->asd.isl_mode = IPU_ISL_CSI2_BE; ipu_isys_subdev_set_ffmt(&csi2_be->asd.sd, NULL, &fmt); ipu_isys_csi2_be_set_sel(&csi2_be->asd.sd, NULL, &sel); csi2_be->asd.sd.internal_ops = &csi2_be_sd_internal_ops; snprintf(csi2_be->asd.sd.name, sizeof(csi2_be->asd.sd.name), IPU_ISYS_ENTITY_PREFIX " CSI2 BE"); snprintf(csi2_be->av.vdev.name, sizeof(csi2_be->av.vdev.name), IPU_ISYS_ENTITY_PREFIX " CSI2 BE capture"); csi2_be->av.aq.css_pin_type = IPU_FW_ISYS_PIN_TYPE_RAW_NS; v4l2_set_subdevdata(&csi2_be->asd.sd, &csi2_be->asd); rval = v4l2_device_register_subdev(&isys->v4l2_dev, &csi2_be->asd.sd); if (rval) { dev_info(&isys->adev->dev, "can't register v4l2 subdev\n"); goto fail; } csi2_be->av.isys = isys; csi2_be->av.pfmts = ipu_isys_pfmts; csi2_be->av.try_fmt_vid_mplane = ipu_isys_video_try_fmt_vid_mplane_default; csi2_be->av.prepare_fw_stream = ipu_isys_prepare_fw_cfg_default; csi2_be->av.aq.buf_prepare = ipu_isys_buf_prepare; csi2_be->av.aq.fill_frame_buff_set_pin = ipu_isys_buffer_to_fw_frame_buff_pin; csi2_be->av.aq.link_fmt_validate = ipu_isys_link_fmt_validate; csi2_be->av.aq.vbq.buf_struct_size = sizeof(struct ipu_isys_video_buffer); /* create v4l2 ctrl for csi-be video node */ rval = v4l2_ctrl_handler_init(&csi2_be->av.ctrl_handler, 0); if (rval) { dev_err(&isys->adev->dev, "failed to init v4l2 ctrl handler for csi2_be\n"); goto fail; } csi2_be->av.compression_ctrl = v4l2_ctrl_new_custom(&csi2_be->av.ctrl_handler, &compression_ctrl_cfg, NULL); if (!csi2_be->av.compression_ctrl) { dev_err(&isys->adev->dev, "failed to create CSI-BE cmprs ctrl\n"); goto fail; } csi2_be->av.compression = 0; csi2_be->av.vdev.ctrl_handler = &csi2_be->av.ctrl_handler; rval = ipu_isys_video_init(&csi2_be->av, &csi2_be->asd.sd.entity, CSI2_BE_PAD_SOURCE, MEDIA_PAD_FL_SINK, 0); if (rval) { dev_info(&isys->adev->dev, "can't init video node\n"); goto fail; } return 0; fail: ipu_isys_csi2_be_cleanup(csi2_be); return rval; } ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu-isys-csi2-be.h000066400000000000000000000031421471702545500265110ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2014 - 2024 Intel Corporation */ #ifndef IPU_ISYS_CSI2_BE_H #define IPU_ISYS_CSI2_BE_H #include #include #include "ipu-isys-queue.h" #include "ipu-isys-subdev.h" #include "ipu-isys-video.h" #include "ipu-platform-isys.h" struct ipu_isys_csi2_be_pdata; struct ipu_isys; #define CSI2_BE_PAD_SINK 0 #define CSI2_BE_PAD_SOURCE 1 #define NR_OF_CSI2_BE_PADS 2 #define NR_OF_CSI2_BE_SOURCE_PADS 1 #define NR_OF_CSI2_BE_SINK_PADS 1 #define NR_OF_CSI2_BE_SOC_SOURCE_PADS 1 #define NR_OF_CSI2_BE_SOC_SINK_PADS 1 #define CSI2_BE_SOC_PAD_SINK 0 #define CSI2_BE_SOC_PAD_SOURCE 1 #define NR_OF_CSI2_BE_SOC_PADS \ (NR_OF_CSI2_BE_SOC_SOURCE_PADS + NR_OF_CSI2_BE_SOC_SINK_PADS) #define CSI2_BE_CROP_HOR BIT(0) #define CSI2_BE_CROP_VER BIT(1) #define CSI2_BE_CROP_MASK (CSI2_BE_CROP_VER | CSI2_BE_CROP_HOR) /* * struct ipu_isys_csi2_be */ struct ipu_isys_csi2_be { struct ipu_isys_csi2_be_pdata *pdata; struct ipu_isys_subdev asd; struct ipu_isys_video av; }; struct ipu_isys_csi2_be_soc { struct ipu_isys_csi2_be_pdata *pdata; struct ipu_isys_subdev asd; struct ipu_isys_video av; }; #define to_ipu_isys_csi2_be(sd) \ container_of(to_ipu_isys_subdev(sd), \ struct ipu_isys_csi2_be, asd) #define to_ipu_isys_csi2_be_soc(sd) \ container_of(to_ipu_isys_subdev(sd), \ struct ipu_isys_csi2_be_soc, asd) int ipu_isys_csi2_be_soc_init(struct ipu_isys_csi2_be_soc *csi2_be_soc, struct ipu_isys *isys, int index); void ipu_isys_csi2_be_soc_cleanup(struct ipu_isys_csi2_be_soc *csi2_be); #endif /* IPU_ISYS_CSI2_BE_H */ ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu-isys-csi2.c000066400000000000000000000450371471702545500261310ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2013 - 2024 Intel Corporation #include #include #include #include #include #include #include #include "ipu.h" #include "ipu-bus.h" #include "ipu-buttress.h" #include "ipu-isys.h" #include "ipu-isys-subdev.h" #include "ipu-isys-video.h" #include "ipu-platform-regs.h" static const u32 csi2_supported_codes_pad_sink[] = { MEDIA_BUS_FMT_Y8_1X8, MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_RGB565_1X16, MEDIA_BUS_FMT_RGB888_1X24, MEDIA_BUS_FMT_UYVY8_1X16, MEDIA_BUS_FMT_YUYV8_1X16, MEDIA_BUS_FMT_VYUY8_1X16, #ifdef V4L2_PIX_FMT_Y210 MEDIA_BUS_FMT_YUYV10_1X20, #endif MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8, MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8, MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8, MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8, 0, }; static const u32 csi2_supported_codes_pad_source[] = { MEDIA_BUS_FMT_Y8_1X8, MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_RGB565_1X16, MEDIA_BUS_FMT_RGB888_1X24, MEDIA_BUS_FMT_UYVY8_1X16, MEDIA_BUS_FMT_YUYV8_1X16, MEDIA_BUS_FMT_VYUY8_1X16, #ifdef V4L2_PIX_FMT_Y210 MEDIA_BUS_FMT_YUYV10_1X20, #endif MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8, 0, }; static const u32 *csi2_supported_codes[NR_OF_CSI2_PADS]; static struct v4l2_subdev_internal_ops csi2_sd_internal_ops = { .open = ipu_isys_subdev_open, .close = ipu_isys_subdev_close, }; #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 255) int ipu_isys_csi2_get_link_freq(struct ipu_isys_csi2 *csi2, s64 *link_freq) { struct media_pipeline *mp = media_entity_pipeline(&csi2->asd.sd.entity); struct ipu_isys_pipeline *pipe = container_of(mp, struct ipu_isys_pipeline, pipe); struct v4l2_subdev *ext_sd = media_entity_to_v4l2_subdev(pipe->external->entity); struct device *dev = &csi2->isys->adev->dev; unsigned int bpp, lanes; s64 ret; if (!ext_sd) { WARN_ON(1); return -ENODEV; } bpp = ipu_isys_mbus_code_to_bpp(csi2->asd.ffmt->code); lanes = csi2->nlanes; ret = v4l2_get_link_freq(ext_sd->ctrl_handler, bpp, lanes * 2); if (ret < 0) { dev_err(dev, "can't get link frequency (%lld)\n", ret); return ret; } dev_dbg(dev, "link freq of %s is %lld\n", ext_sd->name, ret); *link_freq = ret; return 0; } #else int ipu_isys_csi2_get_link_freq(struct ipu_isys_csi2 *csi2, __s64 *link_freq) { struct media_pipeline *mp = media_entity_pipeline(&csi2->asd.sd.entity); struct ipu_isys_pipeline *pipe = container_of(mp, struct ipu_isys_pipeline, pipe); struct v4l2_subdev *ext_sd = media_entity_to_v4l2_subdev(pipe->external->entity); struct v4l2_ext_control c = {.id = V4L2_CID_LINK_FREQ, }; struct v4l2_ext_controls cs = {.count = 1, .controls = &c, }; struct v4l2_querymenu qm = {.id = c.id, }; int rval; if (!ext_sd) { WARN_ON(1); return -ENODEV; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0) rval = v4l2_g_ext_ctrls(ext_sd->ctrl_handler, ext_sd->devnode, ext_sd->v4l2_dev->mdev, &cs); #elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0) rval = v4l2_g_ext_ctrls(ext_sd->ctrl_handler, ext_sd->v4l2_dev->mdev, &cs); #else rval = v4l2_g_ext_ctrls(ext_sd->ctrl_handler, &cs); #endif if (rval) { dev_info(&csi2->isys->adev->dev, "can't get link frequency\n"); return rval; } qm.index = c.value; rval = v4l2_querymenu(ext_sd->ctrl_handler, &qm); if (rval) { dev_info(&csi2->isys->adev->dev, "can't get menu item\n"); return rval; } dev_dbg(&csi2->isys->adev->dev, "%s: link frequency %lld\n", __func__, qm.value); if (!qm.value) return -EINVAL; *link_freq = qm.value; return 0; } #endif static int subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh, struct v4l2_event_subscription *sub) { struct ipu_isys_csi2 *csi2 = to_ipu_isys_csi2(sd); dev_dbg(&csi2->isys->adev->dev, "subscribe event(type %u id %u)\n", sub->type, sub->id); switch (sub->type) { case V4L2_EVENT_FRAME_SYNC: return v4l2_event_subscribe(fh, sub, 10, NULL); case V4L2_EVENT_CTRL: return v4l2_ctrl_subscribe_event(fh, sub); default: return -EINVAL; } } static const struct v4l2_subdev_core_ops csi2_sd_core_ops = { .subscribe_event = subscribe_event, .unsubscribe_event = v4l2_event_subdev_unsubscribe, }; /* * The input system CSI2+ receiver has several * parameters affecting the receiver timings. These depend * on the MIPI bus frequency F in Hz (sensor transmitter rate) * as follows: * register value = (A/1e9 + B * UI) / COUNT_ACC * where * UI = 1 / (2 * F) in seconds * COUNT_ACC = counter accuracy in seconds * For legacy IPU, COUNT_ACC = 0.125 ns * * A and B are coefficients from the table below, * depending whether the register minimum or maximum value is * calculated. * Minimum Maximum * Clock lane A B A B * reg_rx_csi_dly_cnt_termen_clane 0 0 38 0 * reg_rx_csi_dly_cnt_settle_clane 95 -8 300 -16 * Data lanes * reg_rx_csi_dly_cnt_termen_dlane0 0 0 35 4 * reg_rx_csi_dly_cnt_settle_dlane0 85 -2 145 -6 * reg_rx_csi_dly_cnt_termen_dlane1 0 0 35 4 * reg_rx_csi_dly_cnt_settle_dlane1 85 -2 145 -6 * reg_rx_csi_dly_cnt_termen_dlane2 0 0 35 4 * reg_rx_csi_dly_cnt_settle_dlane2 85 -2 145 -6 * reg_rx_csi_dly_cnt_termen_dlane3 0 0 35 4 * reg_rx_csi_dly_cnt_settle_dlane3 85 -2 145 -6 * * We use the minimum values of both A and B. */ #define DIV_SHIFT 8 static uint32_t calc_timing(s32 a, int32_t b, int64_t link_freq, int32_t accinv) { return accinv * a + (accinv * b * (500000000 >> DIV_SHIFT) / (int32_t)(link_freq >> DIV_SHIFT)); } static int ipu_isys_csi2_calc_timing(struct ipu_isys_csi2 *csi2, struct ipu_isys_csi2_timing *timing, uint32_t accinv) { __s64 link_freq; int rval; rval = ipu_isys_csi2_get_link_freq(csi2, &link_freq); if (rval) return rval; timing->ctermen = calc_timing(CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_A, CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_B, link_freq, accinv); timing->csettle = calc_timing(CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_A, CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_B, link_freq, accinv); dev_dbg(&csi2->isys->adev->dev, "ctermen %u\n", timing->ctermen); dev_dbg(&csi2->isys->adev->dev, "csettle %u\n", timing->csettle); timing->dtermen = calc_timing(CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_A, CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_B, link_freq, accinv); timing->dsettle = calc_timing(CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_A, CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_B, link_freq, accinv); dev_dbg(&csi2->isys->adev->dev, "dtermen %u\n", timing->dtermen); dev_dbg(&csi2->isys->adev->dev, "dsettle %u\n", timing->dsettle); return 0; } #define CSI2_ACCINV 8 static int set_stream(struct v4l2_subdev *sd, int enable) { struct ipu_isys_csi2 *csi2 = to_ipu_isys_csi2(sd); struct media_pipeline *mp = media_entity_pipeline(&sd->entity); struct ipu_isys_pipeline *ip = container_of(mp, struct ipu_isys_pipeline, pipe); struct ipu_isys_csi2_config *cfg; struct v4l2_subdev *ext_sd; struct ipu_isys_csi2_timing timing = {0}; unsigned int nlanes; int rval; dev_dbg(&csi2->isys->adev->dev, "csi2 s_stream %d\n", enable); if (!ip->external->entity) { WARN_ON(1); return -ENODEV; } ext_sd = media_entity_to_v4l2_subdev(ip->external->entity); cfg = v4l2_get_subdev_hostdata(ext_sd); if (!enable) { ipu_isys_csi2_set_stream(sd, timing, 0, enable); return 0; } ip->has_sof = true; nlanes = cfg->nlanes; dev_dbg(&csi2->isys->adev->dev, "lane nr %d.\n", nlanes); rval = ipu_isys_csi2_calc_timing(csi2, &timing, CSI2_ACCINV); if (rval) return rval; rval = ipu_isys_csi2_set_stream(sd, timing, nlanes, enable); return rval; } static void csi2_capture_done(struct ipu_isys_pipeline *ip, struct ipu_fw_isys_resp_info_abi *info) { if (ip->interlaced && ip->isys->short_packet_source == IPU_ISYS_SHORT_PACKET_FROM_RECEIVER) { struct ipu_isys_buffer *ib; unsigned long flags; spin_lock_irqsave(&ip->short_packet_queue_lock, flags); if (!list_empty(&ip->short_packet_active)) { ib = list_last_entry(&ip->short_packet_active, struct ipu_isys_buffer, head); list_move(&ib->head, &ip->short_packet_incoming); } spin_unlock_irqrestore(&ip->short_packet_queue_lock, flags); } } static int csi2_link_validate(struct media_link *link) { struct ipu_isys_csi2 *csi2; struct ipu_isys_pipeline *ip; struct media_pipeline *mp; int rval; mp = media_entity_pipeline(link->sink->entity); if (!link->sink->entity || !link->source->entity || !mp) return -EINVAL; csi2 = to_ipu_isys_csi2(media_entity_to_v4l2_subdev(link->sink->entity)); ip = to_ipu_isys_pipeline(mp); csi2->receiver_errors = 0; ip->csi2 = csi2; ipu_isys_video_add_capture_done(ip, csi2_capture_done); rval = v4l2_subdev_link_validate(link); if (rval) return rval; if (!v4l2_ctrl_g_ctrl(csi2->store_csi2_header)) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0) struct media_pad *remote_pad = media_entity_remote_pad(&csi2->asd.pad[CSI2_PAD_SOURCE]); #else struct media_pad *remote_pad = media_pad_remote_pad_first(&csi2->asd.pad[CSI2_PAD_SOURCE]); #endif if (remote_pad && is_media_entity_v4l2_subdev(remote_pad->entity)) { dev_err(&csi2->isys->adev->dev, "CSI2 BE requires CSI2 headers.\n"); return -EINVAL; } } return 0; } static const struct v4l2_subdev_video_ops csi2_sd_video_ops = { .s_stream = set_stream, }; #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) static int ipu_isys_csi2_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_format *fmt) { return ipu_isys_subdev_get_ffmt(sd, cfg, fmt); } #else static int ipu_isys_csi2_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_format *fmt) { return ipu_isys_subdev_get_ffmt(sd, state, fmt); } #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) static int ipu_isys_csi2_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_format *fmt) { return ipu_isys_subdev_set_ffmt(sd, cfg, fmt); } #else static int ipu_isys_csi2_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_format *fmt) { return ipu_isys_subdev_set_ffmt(sd, state, fmt); } #endif static int __subdev_link_validate(struct v4l2_subdev *sd, struct media_link *link, struct v4l2_subdev_format *source_fmt, struct v4l2_subdev_format *sink_fmt) { struct media_pipeline *mp = media_entity_pipeline(&sd->entity); struct ipu_isys_pipeline *ip = container_of(mp, struct ipu_isys_pipeline, pipe); if (source_fmt->format.field == V4L2_FIELD_ALTERNATE) ip->interlaced = true; return ipu_isys_subdev_link_validate(sd, link, source_fmt, sink_fmt); } static const struct v4l2_subdev_pad_ops csi2_sd_pad_ops = { .link_validate = __subdev_link_validate, .get_fmt = ipu_isys_csi2_get_fmt, .set_fmt = ipu_isys_csi2_set_fmt, .enum_mbus_code = ipu_isys_subdev_enum_mbus_code, }; static struct v4l2_subdev_ops csi2_sd_ops = { .core = &csi2_sd_core_ops, .video = &csi2_sd_video_ops, .pad = &csi2_sd_pad_ops, }; static struct media_entity_operations csi2_entity_ops = { .link_validate = csi2_link_validate, }; static void csi2_set_ffmt(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *state, #endif struct v4l2_subdev_format *fmt) { enum isys_subdev_prop_tgt tgt = IPU_ISYS_SUBDEV_PROP_TGT_SINK_FMT; struct v4l2_mbus_framefmt *ffmt = #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) __ipu_isys_get_ffmt(sd, cfg, fmt->pad, #else __ipu_isys_get_ffmt(sd, state, fmt->pad, #endif fmt->which); if (fmt->format.field != V4L2_FIELD_ALTERNATE) fmt->format.field = V4L2_FIELD_NONE; if (fmt->pad == CSI2_PAD_SINK) { *ffmt = fmt->format; #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) ipu_isys_subdev_fmt_propagate(sd, cfg, &fmt->format, NULL, tgt, fmt->pad, fmt->which); #else ipu_isys_subdev_fmt_propagate(sd, state, &fmt->format, NULL, tgt, fmt->pad, fmt->which); #endif return; } if (sd->entity.pads[fmt->pad].flags & MEDIA_PAD_FL_SOURCE) { ffmt->width = fmt->format.width; ffmt->height = fmt->format.height; ffmt->field = fmt->format.field; ffmt->code = ipu_isys_subdev_code_to_uncompressed(fmt->format.code); return; } WARN_ON(1); } void ipu_isys_csi2_cleanup(struct ipu_isys_csi2 *csi2) { if (!csi2->isys) return; v4l2_device_unregister_subdev(&csi2->asd.sd); ipu_isys_subdev_cleanup(&csi2->asd); csi2->isys = NULL; } static void csi_ctrl_init(struct v4l2_subdev *sd) { struct ipu_isys_csi2 *csi2 = to_ipu_isys_csi2(sd); static const struct v4l2_ctrl_config cfg = { .id = V4L2_CID_IPU_STORE_CSI2_HEADER, .name = "Store CSI-2 Headers", .type = V4L2_CTRL_TYPE_BOOLEAN, .min = 0, .max = 1, .step = 1, .def = 1, }; csi2->store_csi2_header = v4l2_ctrl_new_custom(&csi2->asd.ctrl_handler, &cfg, NULL); } int ipu_isys_csi2_init(struct ipu_isys_csi2 *csi2, struct ipu_isys *isys, void __iomem *base, unsigned int index) { struct v4l2_subdev_format fmt = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, .pad = CSI2_PAD_SINK, .format = { .width = 4096, .height = 3072, }, }; int i, rval, src; dev_dbg(&isys->adev->dev, "csi-%d base = 0x%lx\n", index, (unsigned long)base); csi2->isys = isys; csi2->base = base; csi2->index = index; csi2->asd.sd.entity.ops = &csi2_entity_ops; csi2->asd.ctrl_init = csi_ctrl_init; csi2->asd.isys = isys; init_completion(&csi2->eof_completion); rval = ipu_isys_subdev_init(&csi2->asd, &csi2_sd_ops, 0, NR_OF_CSI2_PADS, NR_OF_CSI2_SOURCE_PADS, NR_OF_CSI2_SINK_PADS, 0, CSI2_PAD_SOURCE, CSI2_PAD_SINK); if (rval) goto fail; csi2->asd.pad[CSI2_PAD_SINK].flags |= MEDIA_PAD_FL_MUST_CONNECT; src = index; csi2->asd.source = IPU_FW_ISYS_STREAM_SRC_CSI2_PORT0 + src; csi2_supported_codes[CSI2_PAD_SINK] = csi2_supported_codes_pad_sink; for (i = 0; i < NR_OF_CSI2_SOURCE_PADS; i++) csi2_supported_codes[i + 1] = csi2_supported_codes_pad_source; csi2->asd.supported_codes = csi2_supported_codes; csi2->asd.set_ffmt = csi2_set_ffmt; csi2->asd.sd.flags |= V4L2_SUBDEV_FL_HAS_EVENTS; csi2->asd.sd.internal_ops = &csi2_sd_internal_ops; snprintf(csi2->asd.sd.name, sizeof(csi2->asd.sd.name), IPU_ISYS_ENTITY_PREFIX " CSI-2 %u", index); v4l2_set_subdevdata(&csi2->asd.sd, &csi2->asd); rval = v4l2_device_register_subdev(&isys->v4l2_dev, &csi2->asd.sd); if (rval) { dev_info(&isys->adev->dev, "can't register v4l2 subdev\n"); goto fail; } mutex_lock(&csi2->asd.mutex); __ipu_isys_subdev_set_ffmt(&csi2->asd.sd, NULL, &fmt); mutex_unlock(&csi2->asd.mutex); return 0; fail: ipu_isys_csi2_cleanup(csi2); return rval; } void ipu_isys_csi2_sof_event(struct ipu_isys_csi2 *csi2) { struct ipu_isys_pipeline *ip = NULL; struct v4l2_event ev = { .type = V4L2_EVENT_FRAME_SYNC, }; struct video_device *vdev = csi2->asd.sd.devnode; unsigned long flags; unsigned int i; spin_lock_irqsave(&csi2->isys->lock, flags); csi2->in_frame = true; for (i = 0; i < IPU_ISYS_MAX_STREAMS; i++) { if (csi2->isys->pipes[i] && csi2->isys->pipes[i]->csi2 == csi2) { ip = csi2->isys->pipes[i]; break; } } /* Pipe already vanished */ if (!ip) { spin_unlock_irqrestore(&csi2->isys->lock, flags); return; } ev.u.frame_sync.frame_sequence = atomic_inc_return(&ip->sequence) - 1; spin_unlock_irqrestore(&csi2->isys->lock, flags); v4l2_event_queue(vdev, &ev); dev_dbg(&csi2->isys->adev->dev, "sof_event::csi2-%i sequence: %i\n", csi2->index, ev.u.frame_sync.frame_sequence); } void ipu_isys_csi2_eof_event(struct ipu_isys_csi2 *csi2) { struct ipu_isys_pipeline *ip = NULL; unsigned long flags; unsigned int i; u32 frame_sequence; spin_lock_irqsave(&csi2->isys->lock, flags); csi2->in_frame = false; if (csi2->wait_for_sync) complete(&csi2->eof_completion); for (i = 0; i < IPU_ISYS_MAX_STREAMS; i++) { if (csi2->isys->pipes[i] && csi2->isys->pipes[i]->csi2 == csi2) { ip = csi2->isys->pipes[i]; break; } } if (ip) { frame_sequence = atomic_read(&ip->sequence); spin_unlock_irqrestore(&csi2->isys->lock, flags); dev_dbg(&csi2->isys->adev->dev, "eof_event::csi2-%i sequence: %i\n", csi2->index, frame_sequence); return; } spin_unlock_irqrestore(&csi2->isys->lock, flags); } /* Call this function only _after_ the sensor has been stopped */ void ipu_isys_csi2_wait_last_eof(struct ipu_isys_csi2 *csi2) { unsigned long flags, tout; spin_lock_irqsave(&csi2->isys->lock, flags); if (!csi2->in_frame) { spin_unlock_irqrestore(&csi2->isys->lock, flags); return; } reinit_completion(&csi2->eof_completion); csi2->wait_for_sync = true; spin_unlock_irqrestore(&csi2->isys->lock, flags); tout = wait_for_completion_timeout(&csi2->eof_completion, IPU_EOF_TIMEOUT_JIFFIES); if (!tout) dev_err(&csi2->isys->adev->dev, "csi2-%d: timeout at sync to eof\n", csi2->index); csi2->wait_for_sync = false; } struct ipu_isys_buffer * ipu_isys_csi2_get_short_packet_buffer(struct ipu_isys_pipeline *ip, struct ipu_isys_buffer_list *bl) { struct ipu_isys_buffer *ib; struct ipu_isys_private_buffer *pb; struct ipu_isys_mipi_packet_header *ph; unsigned long flags; spin_lock_irqsave(&ip->short_packet_queue_lock, flags); if (list_empty(&ip->short_packet_incoming)) { spin_unlock_irqrestore(&ip->short_packet_queue_lock, flags); return NULL; } ib = list_last_entry(&ip->short_packet_incoming, struct ipu_isys_buffer, head); pb = ipu_isys_buffer_to_private_buffer(ib); ph = (struct ipu_isys_mipi_packet_header *)pb->buffer; /* Fill the packet header with magic number. */ ph->word_count = 0xffff; ph->dtype = 0xff; dma_sync_single_for_cpu(&ip->isys->adev->dev, pb->dma_addr, sizeof(*ph), DMA_BIDIRECTIONAL); spin_unlock_irqrestore(&ip->short_packet_queue_lock, flags); list_move(&ib->head, &bl->head); return ib; } ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu-isys-csi2.h000066400000000000000000000115631471702545500261330ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2013 - 2024 Intel Corporation */ #ifndef IPU_ISYS_CSI2_H #define IPU_ISYS_CSI2_H #include #include #include "ipu-isys-queue.h" #include "ipu-isys-subdev.h" #include "ipu-isys-video.h" #include "ipu-platform-isys.h" struct ipu_isys_csi2_timing; struct ipu_isys_csi2_pdata; struct ipu_isys; #define NR_OF_CSI2_SINK_PADS 1 #define CSI2_PAD_SINK 0 #define NR_OF_CSI2_SOURCE_PADS 1 #define CSI2_PAD_SOURCE 1 #define NR_OF_CSI2_PADS (NR_OF_CSI2_SINK_PADS + NR_OF_CSI2_SOURCE_PADS) #define IPU_ISYS_SHORT_PACKET_BUFFER_NUM VIDEO_MAX_FRAME #define IPU_ISYS_SHORT_PACKET_WIDTH 32 #define IPU_ISYS_SHORT_PACKET_FRAME_PACKETS 2 #define IPU_ISYS_SHORT_PACKET_EXTRA_PACKETS 64 #define IPU_ISYS_SHORT_PACKET_UNITSIZE 8 #define IPU_ISYS_SHORT_PACKET_GENERAL_DT 0 #define IPU_ISYS_SHORT_PACKET_PT 0 #define IPU_ISYS_SHORT_PACKET_FT 0 #define IPU_ISYS_SHORT_PACKET_STRIDE \ (IPU_ISYS_SHORT_PACKET_WIDTH * \ IPU_ISYS_SHORT_PACKET_UNITSIZE) #define IPU_ISYS_SHORT_PACKET_NUM(num_lines) \ ((num_lines) * 2 + IPU_ISYS_SHORT_PACKET_FRAME_PACKETS + \ IPU_ISYS_SHORT_PACKET_EXTRA_PACKETS) #define IPU_ISYS_SHORT_PACKET_PKT_LINES(num_lines) \ DIV_ROUND_UP(IPU_ISYS_SHORT_PACKET_NUM(num_lines) * \ IPU_ISYS_SHORT_PACKET_UNITSIZE, \ IPU_ISYS_SHORT_PACKET_STRIDE) #define IPU_ISYS_SHORT_PACKET_BUF_SIZE(num_lines) \ (IPU_ISYS_SHORT_PACKET_WIDTH * \ IPU_ISYS_SHORT_PACKET_PKT_LINES(num_lines) * \ IPU_ISYS_SHORT_PACKET_UNITSIZE) #define IPU_ISYS_SHORT_PACKET_TRACE_MSG_NUMBER 256 #define IPU_ISYS_SHORT_PACKET_TRACE_MSG_SIZE 16 #define IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE \ (IPU_ISYS_SHORT_PACKET_TRACE_MSG_NUMBER * \ IPU_ISYS_SHORT_PACKET_TRACE_MSG_SIZE) #define IPU_ISYS_SHORT_PACKET_FROM_RECEIVER 0 #define IPU_ISYS_SHORT_PACKET_FROM_TUNIT 1 #define IPU_ISYS_SHORT_PACKET_TRACE_MAX_TIMESHIFT 100 #define IPU_ISYS_SHORT_PACKET_TRACE_EVENT_MASK 0x2082 #define IPU_SKEW_CAL_LIMIT_HZ (1500000000ul / 2) #define CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_A 0 #define CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_B 0 #define CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_A 95 #define CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_B -8 #define CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_A 0 #define CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_B 0 #define CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_A 85 #define CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_B -2 #define IPU_EOF_TIMEOUT 300 #define IPU_EOF_TIMEOUT_JIFFIES msecs_to_jiffies(IPU_EOF_TIMEOUT) /* * struct ipu_isys_csi2 * * @nlanes: number of lanes in the receiver */ struct ipu_isys_csi2 { struct ipu_isys_csi2_pdata *pdata; struct ipu_isys *isys; struct ipu_isys_subdev asd; struct ipu_isys_video av; struct completion eof_completion; void __iomem *base; u32 receiver_errors; unsigned int nlanes; unsigned int index; atomic_t sof_sequence; bool in_frame; bool wait_for_sync; struct v4l2_ctrl *store_csi2_header; }; struct ipu_isys_csi2_timing { u32 ctermen; u32 csettle; u32 dtermen; u32 dsettle; }; /* * This structure defines the MIPI packet header output * from IPU MIPI receiver. Due to hardware conversion, * this structure is not the same as defined in CSI-2 spec. */ struct ipu_isys_mipi_packet_header { u32 word_count:16, dtype:13, sync:2, stype:1; u32 sid:4, port_id:4, reserved:23, odd_even:1; } __packed; /* * This structure defines the trace message content * for CSI2 receiver monitor messages. */ struct ipu_isys_csi2_monitor_message { u64 fe:1, fs:1, pe:1, ps:1, le:1, ls:1, reserved1:2, sequence:2, reserved2:2, flash_shutter:4, error_cause:12, fifo_overrun:1, crc_error:2, reserved3:1, timestamp_l:16, port:4, vc:2, reserved4:2, frame_sync:4, reserved5:4; u64 reserved6:3, cmd:2, reserved7:1, monitor_id:7, reserved8:1, timestamp_h:50; } __packed; #define to_ipu_isys_csi2(sd) container_of(to_ipu_isys_subdev(sd), \ struct ipu_isys_csi2, asd) int ipu_isys_csi2_get_link_freq(struct ipu_isys_csi2 *csi2, __s64 *link_freq); int ipu_isys_csi2_init(struct ipu_isys_csi2 *csi2, struct ipu_isys *isys, void __iomem *base, unsigned int index); void ipu_isys_csi2_cleanup(struct ipu_isys_csi2 *csi2); struct ipu_isys_buffer * ipu_isys_csi2_get_short_packet_buffer(struct ipu_isys_pipeline *ip, struct ipu_isys_buffer_list *bl); void ipu_isys_csi2_sof_event(struct ipu_isys_csi2 *csi2); void ipu_isys_csi2_eof_event(struct ipu_isys_csi2 *csi2); void ipu_isys_csi2_wait_last_eof(struct ipu_isys_csi2 *csi2); /* interface for platform specific */ int ipu_isys_csi2_set_stream(struct v4l2_subdev *sd, struct ipu_isys_csi2_timing timing, unsigned int nlanes, int enable); unsigned int ipu_isys_csi2_get_current_field(struct ipu_isys_pipeline *ip, unsigned int *timestamp); void ipu_isys_csi2_isr(struct ipu_isys_csi2 *csi2); void ipu_isys_csi2_error(struct ipu_isys_csi2 *csi2); #endif /* IPU_ISYS_CSI2_H */ ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu-isys-media.h000066400000000000000000000072701471702545500263520ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2016 - 2024 Intel Corporation */ #ifndef IPU_ISYS_MEDIA_H #define IPU_ISYS_MEDIA_H #include #include #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) #define is_media_entity_v4l2_subdev(e) \ (media_entity_type(e) == MEDIA_ENT_T_V4L2_SUBDEV) #define is_media_entity_v4l2_io(e) \ (media_entity_type(e) == MEDIA_ENT_T_DEVNODE) #define media_create_pad_link(a, b, c, d, e) \ media_entity_create_link(a, b, c, d, e) #define media_entity_pads_init(a, b, c) \ media_entity_init(a, b, c, 0) #define media_entity_id(ent) ((ent)->id) #define media_entity_graph_walk_init(a, b) 0 #define media_entity_graph_walk_cleanup(a) do { } while (0) #define IPU_COMPAT_MAX_ENTITIES MEDIA_ENTITY_ENUM_MAX_ID struct media_entity_enum { unsigned long *bmap; int idx_max; }; static inline int media_entity_enum_init(struct media_entity_enum *ent_enum, struct media_device *mdev) { int idx_max = IPU_COMPAT_MAX_ENTITIES; ent_enum->bmap = kcalloc(DIV_ROUND_UP(idx_max, BITS_PER_LONG), sizeof(long), GFP_KERNEL); if (!ent_enum->bmap) return -ENOMEM; bitmap_zero(ent_enum->bmap, idx_max); ent_enum->idx_max = idx_max; return 0; } static inline void media_entity_enum_cleanup(struct media_entity_enum *ent_enum) { kfree(ent_enum->bmap); } static inline void media_entity_enum_set(struct media_entity_enum *ent_enum, struct media_entity *entity) { if (media_entity_id(entity) >= ent_enum->idx_max) { WARN_ON(1); return; } __set_bit(media_entity_id(entity), ent_enum->bmap); } static inline void media_entity_enum_zero(struct media_entity_enum *ent_enum) { bitmap_zero(ent_enum->bmap, ent_enum->idx_max); } static inline bool media_entity_enum_test(struct media_entity_enum *ent_enum, struct media_entity *entity) { if (media_entity_id(entity) >= ent_enum->idx_max) { WARN_ON(1); return false; } return test_bit(media_entity_id(entity), ent_enum->bmap); } #elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) #define media_pipeline_start(e, p) media_entity_pipeline_start(e, p) #define media_pipeline_stop(e) media_entity_pipeline_stop(e) #define media_graph_walk_init(g, d) media_entity_graph_walk_init(g, d) #define media_graph_walk_start(g, p) media_entity_graph_walk_start(g, p) #define media_graph_walk_next(g) media_entity_graph_walk_next(g) #define media_graph_walk_cleanup(g) media_entity_graph_walk_cleanup(g) #endif struct __packed media_request_cmd { __u32 cmd; __u32 request; __u32 flags; }; struct __packed media_event_request_complete { __u32 id; }; #define MEDIA_EVENT_TYPE_REQUEST_COMPLETE 1 struct __packed media_event { __u32 type; __u32 sequence; __u32 reserved[4]; union { struct media_event_request_complete req_complete; }; }; enum media_device_request_state { MEDIA_DEVICE_REQUEST_STATE_IDLE, MEDIA_DEVICE_REQUEST_STATE_QUEUED, MEDIA_DEVICE_REQUEST_STATE_DELETED, MEDIA_DEVICE_REQUEST_STATE_COMPLETE, }; struct media_kevent { struct list_head list; struct media_event ev; }; struct media_device_request { u32 id; struct media_device *mdev; struct file *filp; struct media_kevent *kev; struct kref kref; struct list_head list; struct list_head fh_list; enum media_device_request_state state; struct list_head data; u32 flags; }; static inline struct media_device_request * media_device_request_find(struct media_device *mdev, u16 reqid) { return NULL; } static inline void media_device_request_get(struct media_device_request *req) { } static inline void media_device_request_put(struct media_device_request *req) { } static inline void media_device_request_complete(struct media_device *mdev, struct media_device_request *req) { } #endif /* IPU_ISYS_MEDIA_H */ ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu-isys-queue.c000066400000000000000000000750071471702545500264150ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2013 - 2024 Intel Corporation #include #include #include #include #include #include #include #include "ipu.h" #include "ipu-bus.h" #include "ipu-buttress.h" #include "ipu-isys.h" #include "ipu-isys-csi2.h" #include "ipu-isys-video.h" static bool wall_clock_ts_on; module_param(wall_clock_ts_on, bool, 0660); MODULE_PARM_DESC(wall_clock_ts_on, "Timestamp based on REALTIME clock"); static int queue_setup(struct vb2_queue *q, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) const struct v4l2_format *__fmt, #endif unsigned int *num_buffers, unsigned int *num_planes, unsigned int sizes[], #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) void *alloc_ctxs[]) #else struct device *alloc_devs[]) #endif { struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(q); struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) const struct v4l2_format *fmt = __fmt; const struct ipu_isys_pixelformat *pfmt; struct v4l2_pix_format_mplane mpix; #else bool use_fmt = false; #endif unsigned int i; #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) if (fmt) mpix = fmt->fmt.pix_mp; else mpix = av->mpix; pfmt = av->try_fmt_vid_mplane(av, &mpix); *num_planes = mpix.num_planes; #else /* num_planes == 0: we're being called through VIDIOC_REQBUFS */ if (!*num_planes) { use_fmt = true; *num_planes = av->mpix.num_planes; } #endif for (i = 0; i < *num_planes; i++) { #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) sizes[i] = mpix.plane_fmt[i].sizeimage; #else if (use_fmt) sizes[i] = av->mpix.plane_fmt[i].sizeimage; #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) alloc_ctxs[i] = aq->ctx; #else alloc_devs[i] = aq->dev; #endif dev_dbg(&av->isys->adev->dev, "%s: queue setup: plane %d size %u\n", av->vdev.name, i, sizes[i]); } return 0; } static void ipu_isys_queue_lock(struct vb2_queue *q) { struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(q); struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); dev_dbg(&av->isys->adev->dev, "%s: queue lock\n", av->vdev.name); mutex_lock(&av->mutex); } static void ipu_isys_queue_unlock(struct vb2_queue *q) { struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(q); struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); dev_dbg(&av->isys->adev->dev, "%s: queue unlock\n", av->vdev.name); mutex_unlock(&av->mutex); } static int buf_init(struct vb2_buffer *vb) { struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue); struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); dev_dbg(&av->isys->adev->dev, "buffer: %s: %s\n", av->vdev.name, __func__); if (aq->buf_init) return aq->buf_init(vb); return 0; } int ipu_isys_buf_prepare(struct vb2_buffer *vb) { struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue); struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); dev_dbg(&av->isys->adev->dev, "buffer: %s: configured size %u, buffer size %lu\n", av->vdev.name, av->mpix.plane_fmt[0].sizeimage, vb2_plane_size(vb, 0)); if (av->mpix.plane_fmt[0].sizeimage > vb2_plane_size(vb, 0)) return -EINVAL; vb2_set_plane_payload(vb, 0, av->mpix.plane_fmt[0].bytesperline * av->mpix.height); #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) vb->v4l2_planes[0].data_offset = av->line_header_length / BITS_PER_BYTE; #else vb->planes[0].data_offset = av->line_header_length / BITS_PER_BYTE; #endif return 0; } static int buf_prepare(struct vb2_buffer *vb) { struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue); struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); int rval; if (av->isys->adev->isp->flr_done) return -EIO; rval = aq->buf_prepare(vb); return rval; } static void buf_finish(struct vb2_buffer *vb) { struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue); struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); dev_dbg(&av->isys->adev->dev, "buffer: %s: %s\n", av->vdev.name, __func__); } static void buf_cleanup(struct vb2_buffer *vb) { struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue); struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); dev_dbg(&av->isys->adev->dev, "buffer: %s: %s\n", av->vdev.name, __func__); if (aq->buf_cleanup) aq->buf_cleanup(vb); } /* * Queue a buffer list back to incoming or active queues. The buffers * are removed from the buffer list. */ void ipu_isys_buffer_list_queue(struct ipu_isys_buffer_list *bl, unsigned long op_flags, enum vb2_buffer_state state) { struct ipu_isys_buffer *ib, *ib_safe; unsigned long flags; bool first = true; if (!bl) return; WARN_ON(!bl->nbufs); WARN_ON(op_flags & IPU_ISYS_BUFFER_LIST_FL_ACTIVE && op_flags & IPU_ISYS_BUFFER_LIST_FL_INCOMING); list_for_each_entry_safe(ib, ib_safe, &bl->head, head) { struct ipu_isys_video *av; if (ib->type == IPU_ISYS_VIDEO_BUFFER) { struct vb2_buffer *vb = ipu_isys_buffer_to_vb2_buffer(ib); struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue); av = ipu_isys_queue_to_video(aq); spin_lock_irqsave(&aq->lock, flags); list_del(&ib->head); if (op_flags & IPU_ISYS_BUFFER_LIST_FL_ACTIVE) list_add(&ib->head, &aq->active); else if (op_flags & IPU_ISYS_BUFFER_LIST_FL_INCOMING) list_add_tail(&ib->head, &aq->incoming); spin_unlock_irqrestore(&aq->lock, flags); if (op_flags & IPU_ISYS_BUFFER_LIST_FL_SET_STATE) vb2_buffer_done(vb, state); } else if (ib->type == IPU_ISYS_SHORT_PACKET_BUFFER) { struct ipu_isys_private_buffer *pb = ipu_isys_buffer_to_private_buffer(ib); struct ipu_isys_pipeline *ip = pb->ip; av = container_of(ip, struct ipu_isys_video, ip); spin_lock_irqsave(&ip->short_packet_queue_lock, flags); list_del(&ib->head); if (op_flags & IPU_ISYS_BUFFER_LIST_FL_ACTIVE) list_add(&ib->head, &ip->short_packet_active); else if (op_flags & IPU_ISYS_BUFFER_LIST_FL_INCOMING) list_add(&ib->head, &ip->short_packet_incoming); spin_unlock_irqrestore(&ip->short_packet_queue_lock, flags); } else { WARN_ON(1); return; } if (first) { dev_dbg(&av->isys->adev->dev, "queue buf list %p flags %lx, s %d, %d bufs\n", bl, op_flags, state, bl->nbufs); first = false; } bl->nbufs--; } WARN_ON(bl->nbufs); } /* * flush_firmware_streamon_fail() - Flush in cases where requests may * have been queued to firmware and the *firmware streamon fails for a * reason or another. */ static void flush_firmware_streamon_fail(struct ipu_isys_pipeline *ip) { struct ipu_isys_video *pipe_av = container_of(ip, struct ipu_isys_video, ip); struct ipu_isys_queue *aq; unsigned long flags; lockdep_assert_held(&pipe_av->mutex); list_for_each_entry(aq, &ip->queues, node) { struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); struct ipu_isys_buffer *ib, *ib_safe; spin_lock_irqsave(&aq->lock, flags); list_for_each_entry_safe(ib, ib_safe, &aq->active, head) { struct vb2_buffer *vb = ipu_isys_buffer_to_vb2_buffer(ib); list_del(&ib->head); if (av->streaming) { dev_dbg(&av->isys->adev->dev, "%s: queue buffer %u back to incoming\n", av->vdev.name, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) vb->v4l2_buf.index); #else vb->index); #endif /* Queue already streaming, return to driver. */ list_add(&ib->head, &aq->incoming); continue; } /* Queue not yet streaming, return to user. */ dev_dbg(&av->isys->adev->dev, "%s: return %u back to videobuf2\n", av->vdev.name, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) vb->v4l2_buf.index); #else vb->index); #endif vb2_buffer_done(ipu_isys_buffer_to_vb2_buffer(ib), VB2_BUF_STATE_QUEUED); } spin_unlock_irqrestore(&aq->lock, flags); } } /* * Attempt obtaining a buffer list from the incoming queues, a list of * buffers that contains one entry from each video buffer queue. If * all queues have no buffers, the buffers that were already dequeued * are returned to their queues. */ static int buffer_list_get(struct ipu_isys_pipeline *ip, struct ipu_isys_buffer_list *bl) { struct ipu_isys_queue *aq; struct ipu_isys_buffer *ib; unsigned long flags; int ret = 0; bl->nbufs = 0; INIT_LIST_HEAD(&bl->head); list_for_each_entry(aq, &ip->queues, node) { struct ipu_isys_buffer *ib; spin_lock_irqsave(&aq->lock, flags); if (list_empty(&aq->incoming)) { spin_unlock_irqrestore(&aq->lock, flags); ret = -ENODATA; goto error; } ib = list_last_entry(&aq->incoming, struct ipu_isys_buffer, head); if (ib->req) { spin_unlock_irqrestore(&aq->lock, flags); ret = -ENODATA; goto error; } dev_dbg(&ip->isys->adev->dev, "buffer: %s: buffer %u\n", ipu_isys_queue_to_video(aq)->vdev.name, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) ipu_isys_buffer_to_vb2_buffer(ib)->v4l2_buf.index #else ipu_isys_buffer_to_vb2_buffer(ib)->index #endif ); list_del(&ib->head); list_add(&ib->head, &bl->head); spin_unlock_irqrestore(&aq->lock, flags); bl->nbufs++; } list_for_each_entry(ib, &bl->head, head) { struct vb2_buffer *vb = ipu_isys_buffer_to_vb2_buffer(ib); aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue); if (aq->prepare_frame_buff_set) aq->prepare_frame_buff_set(vb); } /* Get short packet buffer. */ if (ip->interlaced && ip->isys->short_packet_source == IPU_ISYS_SHORT_PACKET_FROM_RECEIVER) { ib = ipu_isys_csi2_get_short_packet_buffer(ip, bl); if (!ib) { ret = -ENODATA; dev_err(&ip->isys->adev->dev, "No more short packet buffers. Driver bug?"); WARN_ON(1); goto error; } bl->nbufs++; } dev_dbg(&ip->isys->adev->dev, "get buffer list %p, %u buffers\n", bl, bl->nbufs); return ret; error: if (!list_empty(&bl->head)) ipu_isys_buffer_list_queue(bl, IPU_ISYS_BUFFER_LIST_FL_INCOMING, 0); return ret; } void ipu_isys_buffer_to_fw_frame_buff_pin(struct vb2_buffer *vb, struct ipu_fw_isys_frame_buff_set_abi *set) { struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue); struct ipu_isys_video *av = container_of(aq, struct ipu_isys_video, aq); if (av->compression) set->output_pins[aq->fw_output].compress = 1; set->output_pins[aq->fw_output].addr = vb2_dma_contig_plane_dma_addr(vb, 0); set->output_pins[aq->fw_output].out_buf_id = #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) vb->v4l2_buf.index + 1; #else vb->index + 1; #endif } /* * Convert a buffer list to a isys fw ABI framebuffer set. The * buffer list is not modified. */ #define IPU_ISYS_FRAME_NUM_THRESHOLD (30) void ipu_isys_buffer_to_fw_frame_buff(struct ipu_fw_isys_frame_buff_set_abi *set, struct ipu_isys_pipeline *ip, struct ipu_isys_buffer_list *bl) { struct ipu_isys_buffer *ib; WARN_ON(!bl->nbufs); set->send_irq_sof = 1; set->send_resp_sof = 1; set->send_irq_eof = 0; set->send_resp_eof = 0; if (ip->streaming) set->send_irq_capture_ack = 0; else set->send_irq_capture_ack = 1; set->send_irq_capture_done = 0; set->send_resp_capture_ack = 1; set->send_resp_capture_done = 1; if (!ip->interlaced && atomic_read(&ip->sequence) >= IPU_ISYS_FRAME_NUM_THRESHOLD) { set->send_resp_capture_ack = 0; set->send_resp_capture_done = 0; } list_for_each_entry(ib, &bl->head, head) { if (ib->type == IPU_ISYS_VIDEO_BUFFER) { struct vb2_buffer *vb = ipu_isys_buffer_to_vb2_buffer(ib); struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue); if (aq->fill_frame_buff_set_pin) aq->fill_frame_buff_set_pin(vb, set); } else if (ib->type == IPU_ISYS_SHORT_PACKET_BUFFER) { struct ipu_isys_private_buffer *pb = ipu_isys_buffer_to_private_buffer(ib); struct ipu_fw_isys_output_pin_payload_abi *output_pin = &set->output_pins[ip->short_packet_output_pin]; output_pin->addr = pb->dma_addr; output_pin->out_buf_id = pb->index + 1; } else { WARN_ON(1); } } } /* Start streaming for real. The buffer list must be available. */ static int ipu_isys_stream_start(struct ipu_isys_pipeline *ip, struct ipu_isys_buffer_list *bl, bool error) { struct ipu_isys_video *pipe_av = container_of(ip, struct ipu_isys_video, ip); struct ipu_isys_buffer_list __bl; int rval; mutex_lock(&pipe_av->isys->stream_mutex); rval = ipu_isys_video_set_streaming(pipe_av, 1, bl); if (rval) { mutex_unlock(&pipe_av->isys->stream_mutex); goto out_requeue; } ip->streaming = 1; mutex_unlock(&pipe_av->isys->stream_mutex); bl = &__bl; do { struct ipu_fw_isys_frame_buff_set_abi *buf = NULL; struct isys_fw_msgs *msg; enum ipu_fw_isys_send_type send_type = IPU_FW_ISYS_SEND_TYPE_STREAM_CAPTURE; rval = buffer_list_get(ip, bl); if (rval == -EINVAL) goto out_requeue; else if (rval < 0) break; msg = ipu_get_fw_msg_buf(ip); if (!msg) return -ENOMEM; buf = to_frame_msg_buf(msg); ipu_isys_buffer_to_fw_frame_buff(buf, ip, bl); ipu_fw_isys_dump_frame_buff_set(&pipe_av->isys->adev->dev, buf, ip->nr_output_pins); ipu_isys_buffer_list_queue(bl, IPU_ISYS_BUFFER_LIST_FL_ACTIVE, 0); rval = ipu_fw_isys_complex_cmd(pipe_av->isys, ip->stream_handle, buf, to_dma_addr(msg), sizeof(*buf), send_type); } while (!WARN_ON(rval)); return 0; out_requeue: if (bl && bl->nbufs) ipu_isys_buffer_list_queue(bl, IPU_ISYS_BUFFER_LIST_FL_INCOMING | (error ? IPU_ISYS_BUFFER_LIST_FL_SET_STATE : 0), error ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_QUEUED); flush_firmware_streamon_fail(ip); return rval; } static void buf_queue(struct vb2_buffer *vb) { struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue); struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); struct ipu_isys_buffer *ib = vb2_buffer_to_ipu_isys_buffer(vb); struct media_pipeline *mp = media_entity_pipeline(&av->vdev.entity); struct ipu_isys_pipeline *ip = to_ipu_isys_pipeline(mp); struct ipu_isys_buffer_list bl; struct ipu_fw_isys_frame_buff_set_abi *buf = NULL; struct isys_fw_msgs *msg; struct ipu_isys_video *pipe_av = container_of(ip, struct ipu_isys_video, ip); unsigned long flags; unsigned int i; int rval; dev_dbg(&av->isys->adev->dev, "buffer: %s: buf_queue %u\n", av->vdev.name, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) vb->v4l2_buf.index #else vb->index #endif ); for (i = 0; i < vb->num_planes; i++) dev_dbg(&av->isys->adev->dev, "iova: plane %u iova 0x%x\n", i, (u32)vb2_dma_contig_plane_dma_addr(vb, i)); spin_lock_irqsave(&aq->lock, flags); list_add(&ib->head, &aq->incoming); spin_unlock_irqrestore(&aq->lock, flags); if (ib->req) return; if (!pipe_av || !mp || !vb->vb2_queue->start_streaming_called) { dev_dbg(&av->isys->adev->dev, "no pipe or streaming, adding to incoming\n"); return; } mutex_unlock(&av->mutex); mutex_lock(&pipe_av->mutex); if (ip->nr_streaming != ip->nr_queues) { dev_dbg(&av->isys->adev->dev, "not streaming yet, adding to incoming\n"); goto out; } /* * We just put one buffer to the incoming list of this queue * (above). Let's see whether all queues in the pipeline would * have a buffer. */ rval = buffer_list_get(ip, &bl); if (rval < 0) { if (rval == -EINVAL) { dev_err(&av->isys->adev->dev, "error: buffer list get failed\n"); WARN_ON(1); } else { dev_dbg(&av->isys->adev->dev, "not enough buffers available\n"); } goto out; } msg = ipu_get_fw_msg_buf(ip); if (!msg) { rval = -ENOMEM; goto out; } buf = to_frame_msg_buf(msg); ipu_isys_buffer_to_fw_frame_buff(buf, ip, &bl); ipu_fw_isys_dump_frame_buff_set(&pipe_av->isys->adev->dev, buf, ip->nr_output_pins); if (!ip->streaming) { dev_dbg(&av->isys->adev->dev, "got a buffer to start streaming!\n"); rval = ipu_isys_stream_start(ip, &bl, true); if (rval) dev_err(&av->isys->adev->dev, "stream start failed.\n"); goto out; } /* * We must queue the buffers in the buffer list to the * appropriate video buffer queues BEFORE passing them to the * firmware since we could get a buffer event back before we * have queued them ourselves to the active queue. */ ipu_isys_buffer_list_queue(&bl, IPU_ISYS_BUFFER_LIST_FL_ACTIVE, 0); rval = ipu_fw_isys_complex_cmd(pipe_av->isys, ip->stream_handle, buf, to_dma_addr(msg), sizeof(*buf), IPU_FW_ISYS_SEND_TYPE_STREAM_CAPTURE); if (!WARN_ON(rval < 0)) dev_dbg(&av->isys->adev->dev, "queued buffer\n"); out: mutex_unlock(&pipe_av->mutex); mutex_lock(&av->mutex); } int ipu_isys_link_fmt_validate(struct ipu_isys_queue *aq) { struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); struct v4l2_subdev_format fmt = { 0 }; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0) struct media_pad *pad = media_entity_remote_pad(av->vdev.entity.pads); #else struct media_pad *pad = media_pad_remote_pad_first(av->vdev.entity.pads); #endif struct v4l2_subdev *sd; int rval; if (!pad) { dev_dbg(&av->isys->adev->dev, "video node %s pad not connected\n", av->vdev.name); return -ENOTCONN; } sd = media_entity_to_v4l2_subdev(pad->entity); fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; fmt.pad = pad->index; rval = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt); if (rval) return rval; if (fmt.format.width != av->mpix.width || fmt.format.height != av->mpix.height) { dev_dbg(&av->isys->adev->dev, "wrong width or height %ux%u (%ux%u expected)\n", av->mpix.width, av->mpix.height, fmt.format.width, fmt.format.height); return -EINVAL; } if (fmt.format.field != av->mpix.field) { dev_dbg(&av->isys->adev->dev, "wrong field value 0x%8.8x (0x%8.8x expected)\n", av->mpix.field, fmt.format.field); return -EINVAL; } if (fmt.format.code != av->pfmt->code) { dev_dbg(&av->isys->adev->dev, "wrong media bus code 0x%8.8x (0x%8.8x expected)\n", av->pfmt->code, fmt.format.code); return -EINVAL; } return 0; } /* Return buffers back to videobuf2. */ static void return_buffers(struct ipu_isys_queue *aq, enum vb2_buffer_state state) { struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); int reset_needed = 0; unsigned long flags; spin_lock_irqsave(&aq->lock, flags); while (!list_empty(&aq->incoming)) { struct ipu_isys_buffer *ib = list_first_entry(&aq->incoming, struct ipu_isys_buffer, head); struct vb2_buffer *vb = ipu_isys_buffer_to_vb2_buffer(ib); list_del(&ib->head); spin_unlock_irqrestore(&aq->lock, flags); vb2_buffer_done(vb, state); dev_dbg(&av->isys->adev->dev, "%s: stop_streaming incoming %u\n", ipu_isys_queue_to_video(vb2_queue_to_ipu_isys_queue (vb->vb2_queue))->vdev.name, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) vb->v4l2_buf.index); #else vb->index); #endif spin_lock_irqsave(&aq->lock, flags); } /* * Something went wrong (FW crash / HW hang / not all buffers * returned from isys) if there are still buffers queued in active * queue. We have to clean up places a bit. */ while (!list_empty(&aq->active)) { struct ipu_isys_buffer *ib = list_first_entry(&aq->active, struct ipu_isys_buffer, head); struct vb2_buffer *vb = ipu_isys_buffer_to_vb2_buffer(ib); list_del(&ib->head); spin_unlock_irqrestore(&aq->lock, flags); vb2_buffer_done(vb, state); dev_warn(&av->isys->adev->dev, "%s: cleaning active queue %u\n", ipu_isys_queue_to_video(vb2_queue_to_ipu_isys_queue (vb->vb2_queue))->vdev.name, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) vb->v4l2_buf.index); #else vb->index); #endif spin_lock_irqsave(&aq->lock, flags); reset_needed = 1; } spin_unlock_irqrestore(&aq->lock, flags); if (reset_needed) { mutex_lock(&av->isys->mutex); av->isys->reset_needed = true; mutex_unlock(&av->isys->mutex); } } static int start_streaming(struct vb2_queue *q, unsigned int count) { struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(q); struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); struct ipu_isys_video *pipe_av; struct ipu_isys_pipeline *ip; struct ipu_isys_buffer_list __bl, *bl = NULL; bool first; int rval; dev_dbg(&av->isys->adev->dev, "stream: %s: width %u, height %u, css pixelformat %u\n", av->vdev.name, av->mpix.width, av->mpix.height, av->pfmt->css_pixelformat); mutex_lock(&av->isys->stream_mutex); first = !media_entity_pipeline(&av->vdev.entity); if (first) { rval = ipu_isys_video_prepare_streaming(av, 1); if (rval) goto out_return_buffers; } mutex_unlock(&av->isys->stream_mutex); rval = aq->link_fmt_validate(aq); if (rval) { dev_dbg(&av->isys->adev->dev, "%s: link format validation failed (%d)\n", av->vdev.name, rval); goto out_unprepare_streaming; } ip = to_ipu_isys_pipeline(media_entity_pipeline(&av->vdev.entity)); pipe_av = container_of(ip, struct ipu_isys_video, ip); mutex_unlock(&av->mutex); mutex_lock(&pipe_av->mutex); ip->nr_streaming++; dev_dbg(&av->isys->adev->dev, "queue %u of %u\n", ip->nr_streaming, ip->nr_queues); list_add(&aq->node, &ip->queues); if (ip->nr_streaming != ip->nr_queues) goto out; if (list_empty(&av->isys->requests)) { bl = &__bl; rval = buffer_list_get(ip, bl); if (rval == -EINVAL) { goto out_stream_start; } else if (rval < 0) { dev_dbg(&av->isys->adev->dev, "no request available, postponing streamon\n"); goto out; } } rval = ipu_isys_stream_start(ip, bl, false); if (rval) goto out_stream_start; out: mutex_unlock(&pipe_av->mutex); mutex_lock(&av->mutex); return 0; out_stream_start: list_del(&aq->node); ip->nr_streaming--; mutex_unlock(&pipe_av->mutex); mutex_lock(&av->mutex); out_unprepare_streaming: mutex_lock(&av->isys->stream_mutex); if (first) ipu_isys_video_prepare_streaming(av, 0); out_return_buffers: mutex_unlock(&av->isys->stream_mutex); return_buffers(aq, VB2_BUF_STATE_QUEUED); return rval; } static void stop_streaming(struct vb2_queue *q) { struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(q); struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); struct media_pipeline *mp = media_entity_pipeline(&av->vdev.entity); struct ipu_isys_pipeline *ip = to_ipu_isys_pipeline(mp); struct ipu_isys_video *pipe_av = container_of(ip, struct ipu_isys_video, ip); if (pipe_av != av) { mutex_unlock(&av->mutex); mutex_lock(&pipe_av->mutex); } mutex_lock(&av->isys->stream_mutex); if (ip->nr_streaming == ip->nr_queues && ip->streaming) ipu_isys_video_set_streaming(av, 0, NULL); if (ip->nr_streaming == 1) ipu_isys_video_prepare_streaming(av, 0); mutex_unlock(&av->isys->stream_mutex); ip->nr_streaming--; list_del(&aq->node); ip->streaming = 0; if (pipe_av != av) { mutex_unlock(&pipe_av->mutex); mutex_lock(&av->mutex); } return_buffers(aq, VB2_BUF_STATE_ERROR); } static unsigned int get_sof_sequence_by_timestamp(struct ipu_isys_pipeline *ip, struct ipu_fw_isys_resp_info_abi *info) { struct ipu_isys *isys = container_of(ip, struct ipu_isys_video, ip)->isys; u64 time = (u64)info->timestamp[1] << 32 | info->timestamp[0]; unsigned int i; /* * The timestamp is invalid as no TSC in some FPGA platform, * so get the sequence from pipeline directly in this case. */ if (time == 0) return atomic_read(&ip->sequence) - 1; for (i = 0; i < IPU_ISYS_MAX_PARALLEL_SOF; i++) if (time == ip->seq[i].timestamp) { dev_dbg(&isys->adev->dev, "sof: using seq nr %u for ts 0x%16.16llx\n", ip->seq[i].sequence, time); return ip->seq[i].sequence; } dev_dbg(&isys->adev->dev, "SOF: looking for 0x%16.16llx\n", time); for (i = 0; i < IPU_ISYS_MAX_PARALLEL_SOF; i++) dev_dbg(&isys->adev->dev, "SOF: sequence %u, timestamp value 0x%16.16llx\n", ip->seq[i].sequence, ip->seq[i].timestamp); dev_dbg(&isys->adev->dev, "SOF sequence number not found\n"); return 0; } static u64 get_sof_ns_delta(struct ipu_isys_video *av, struct ipu_fw_isys_resp_info_abi *info) { struct ipu_bus_device *adev = to_ipu_bus_device(&av->isys->adev->dev); struct ipu_device *isp = adev->isp; u64 delta, tsc_now; if (!ipu_buttress_tsc_read(isp, &tsc_now)) delta = tsc_now - ((u64)info->timestamp[1] << 32 | info->timestamp[0]); else delta = 0; return ipu_buttress_tsc_ticks_to_ns(delta, isp); } void ipu_isys_buf_calc_sequence_time(struct ipu_isys_buffer *ib, struct ipu_fw_isys_resp_info_abi *info) { struct vb2_buffer *vb = ipu_isys_buffer_to_vb2_buffer(ib); #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) struct timespec ts_now; #endif struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue); struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); struct device *dev = &av->isys->adev->dev; struct media_pipeline *mp = media_entity_pipeline(&av->vdev.entity); struct ipu_isys_pipeline *ip = to_ipu_isys_pipeline(mp); u64 ns; u32 sequence; if (ip->has_sof) { ns = (wall_clock_ts_on) ? ktime_get_real_ns() : ktime_get_ns(); ns -= get_sof_ns_delta(av, info); sequence = get_sof_sequence_by_timestamp(ip, info); } else { ns = ((wall_clock_ts_on) ? ktime_get_real_ns() : ktime_get_ns()); sequence = (atomic_inc_return(&ip->sequence) - 1) / ip->nr_queues; } #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) vb->v4l2_buf.sequence = sequence; ts_now = ns_to_timespec(ns); vb->v4l2_buf.timestamp.tv_sec = ts_now.tv_sec; vb->v4l2_buf.timestamp.tv_usec = ts_now.tv_nsec / NSEC_PER_USEC; dev_dbg(dev, "buffer: %s: buffer done %u\n", av->vdev.name, vb->v4l2_buf.index); #elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) vbuf->sequence = sequence; ts_now = ns_to_timespec(ns); vbuf->timestamp.tv_sec = ts_now.tv_sec; vbuf->timestamp.tv_usec = ts_now.tv_nsec / NSEC_PER_USEC; dev_dbg(dev, "%s: buffer done %u\n", av->vdev.name, vb->index); #else vbuf->vb2_buf.timestamp = ns; vbuf->sequence = sequence; dev_dbg(dev, "buf: %s: buffer done, CPU-timestamp:%lld, sequence:%d\n", av->vdev.name, ktime_get_ns(), sequence); dev_dbg(dev, "index:%d, vbuf timestamp:%lld, endl\n", vb->index, vbuf->vb2_buf.timestamp); #endif } void ipu_isys_queue_buf_done(struct ipu_isys_buffer *ib) { struct vb2_buffer *vb = ipu_isys_buffer_to_vb2_buffer(ib); if (atomic_read(&ib->str2mmio_flag)) { vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); /* * Operation on buffer is ended with error and will be reported * to the userspace when it is de-queued */ atomic_set(&ib->str2mmio_flag, 0); } else { vb2_buffer_done(vb, VB2_BUF_STATE_DONE); } } void ipu_isys_queue_buf_ready(struct ipu_isys_pipeline *ip, struct ipu_fw_isys_resp_info_abi *info) { struct ipu_isys *isys = container_of(ip, struct ipu_isys_video, ip)->isys; struct ipu_isys_queue *aq = ip->output_pins[info->pin_id].aq; struct ipu_isys_buffer *ib; struct vb2_buffer *vb; unsigned long flags; bool first = true; #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) struct v4l2_buffer *buf; #else struct vb2_v4l2_buffer *buf; #endif dev_dbg(&isys->adev->dev, "buffer: %s: received buffer %8.8x\n", ipu_isys_queue_to_video(aq)->vdev.name, info->pin.addr); spin_lock_irqsave(&aq->lock, flags); if (list_empty(&aq->active)) { spin_unlock_irqrestore(&aq->lock, flags); dev_err(&isys->adev->dev, "active queue empty\n"); return; } list_for_each_entry_reverse(ib, &aq->active, head) { dma_addr_t addr; vb = ipu_isys_buffer_to_vb2_buffer(ib); addr = vb2_dma_contig_plane_dma_addr(vb, 0); if (info->pin.addr != addr) { if (first) dev_err(&isys->adev->dev, "WARN: buffer address %pad expected!\n", &addr); first = false; continue; } if (info->error_info.error == IPU_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO) { /* * Check for error message: * 'IPU_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO' */ atomic_set(&ib->str2mmio_flag, 1); } dev_dbg(&isys->adev->dev, "buffer: found buffer %pad\n", &addr); #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) buf = &vb->v4l2_buf; #else buf = to_vb2_v4l2_buffer(vb); #endif buf->field = V4L2_FIELD_NONE; list_del(&ib->head); spin_unlock_irqrestore(&aq->lock, flags); ipu_isys_buf_calc_sequence_time(ib, info); /* * For interlaced buffers, the notification to user space * is postponed to capture_done event since the field * information is available only at that time. */ if (ip->interlaced) { spin_lock_irqsave(&ip->short_packet_queue_lock, flags); list_add(&ib->head, &ip->pending_interlaced_bufs); spin_unlock_irqrestore(&ip->short_packet_queue_lock, flags); } else { ipu_isys_queue_buf_done(ib); } return; } dev_err(&isys->adev->dev, "WARNING: cannot find a matching video buffer!\n"); spin_unlock_irqrestore(&aq->lock, flags); } void ipu_isys_queue_short_packet_ready(struct ipu_isys_pipeline *ip, struct ipu_fw_isys_resp_info_abi *info) { struct ipu_isys *isys = container_of(ip, struct ipu_isys_video, ip)->isys; unsigned long flags; dev_dbg(&isys->adev->dev, "receive short packet buffer %8.8x\n", info->pin.addr); spin_lock_irqsave(&ip->short_packet_queue_lock, flags); ip->cur_field = ipu_isys_csi2_get_current_field(ip, info->timestamp); spin_unlock_irqrestore(&ip->short_packet_queue_lock, flags); } struct vb2_ops ipu_isys_queue_ops = { .queue_setup = queue_setup, .wait_prepare = ipu_isys_queue_unlock, .wait_finish = ipu_isys_queue_lock, .buf_init = buf_init, .buf_prepare = buf_prepare, .buf_finish = buf_finish, .buf_cleanup = buf_cleanup, .start_streaming = start_streaming, .stop_streaming = stop_streaming, .buf_queue = buf_queue, }; int ipu_isys_queue_init(struct ipu_isys_queue *aq) { struct ipu_isys *isys = ipu_isys_queue_to_video(aq)->isys; int rval; if (!aq->vbq.io_modes) aq->vbq.io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF; aq->vbq.drv_priv = aq; aq->vbq.ops = &ipu_isys_queue_ops; aq->vbq.mem_ops = &vb2_dma_contig_memops; aq->vbq.timestamp_flags = (wall_clock_ts_on) ? V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN : V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; rval = vb2_queue_init(&aq->vbq); if (rval) return rval; #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) aq->ctx = vb2_dma_contig_init_ctx(&isys->adev->dev); if (IS_ERR(aq->ctx)) { vb2_queue_release(&aq->vbq); return PTR_ERR(aq->ctx); } #else aq->dev = &isys->adev->dev; aq->vbq.dev = &isys->adev->dev; #endif spin_lock_init(&aq->lock); INIT_LIST_HEAD(&aq->active); INIT_LIST_HEAD(&aq->incoming); return 0; } void ipu_isys_queue_cleanup(struct ipu_isys_queue *aq) { #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) if (IS_ERR_OR_NULL(aq->ctx)) return; vb2_dma_contig_cleanup_ctx(aq->ctx); aq->ctx = NULL; #endif vb2_queue_release(&aq->vbq); } ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu-isys-queue.h000066400000000000000000000106351471702545500264160ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2013 - 2024 Intel Corporation */ #ifndef IPU_ISYS_QUEUE_H #define IPU_ISYS_QUEUE_H #include #include #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) #include #else #include #endif #include "ipu-isys-media.h" struct ipu_isys_video; struct ipu_isys_pipeline; struct ipu_fw_isys_resp_info_abi; struct ipu_fw_isys_frame_buff_set_abi; enum ipu_isys_buffer_type { IPU_ISYS_VIDEO_BUFFER, IPU_ISYS_SHORT_PACKET_BUFFER, }; struct ipu_isys_queue { struct list_head node; /* struct ipu_isys_pipeline.queues */ struct vb2_queue vbq; #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) struct vb2_alloc_ctx *ctx; #else struct device *dev; #endif /* * @lock: serialise access to queued and pre_streamon_queued */ spinlock_t lock; struct list_head active; struct list_head incoming; u32 css_pin_type; unsigned int fw_output; int (*buf_init)(struct vb2_buffer *vb); void (*buf_cleanup)(struct vb2_buffer *vb); int (*buf_prepare)(struct vb2_buffer *vb); void (*prepare_frame_buff_set)(struct vb2_buffer *vb); void (*fill_frame_buff_set_pin)(struct vb2_buffer *vb, struct ipu_fw_isys_frame_buff_set_abi * set); int (*link_fmt_validate)(struct ipu_isys_queue *aq); }; struct ipu_isys_buffer { struct list_head head; enum ipu_isys_buffer_type type; struct list_head req_head; struct media_device_request *req; atomic_t str2mmio_flag; }; struct ipu_isys_video_buffer { #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) struct vb2_buffer vb; #else struct vb2_v4l2_buffer vb_v4l2; #endif struct ipu_isys_buffer ib; }; struct ipu_isys_private_buffer { struct ipu_isys_buffer ib; struct ipu_isys_pipeline *ip; unsigned int index; unsigned int bytesused; dma_addr_t dma_addr; void *buffer; }; #define IPU_ISYS_BUFFER_LIST_FL_INCOMING BIT(0) #define IPU_ISYS_BUFFER_LIST_FL_ACTIVE BIT(1) #define IPU_ISYS_BUFFER_LIST_FL_SET_STATE BIT(2) struct ipu_isys_buffer_list { struct list_head head; unsigned int nbufs; }; #define vb2_queue_to_ipu_isys_queue(__vb2) \ container_of(__vb2, struct ipu_isys_queue, vbq) #define ipu_isys_to_isys_video_buffer(__ib) \ container_of(__ib, struct ipu_isys_video_buffer, ib) #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) #define vb2_buffer_to_ipu_isys_video_buffer(__vb) \ container_of(__vb, struct ipu_isys_video_buffer, vb) #define ipu_isys_buffer_to_vb2_buffer(__ib) \ (&ipu_isys_to_isys_video_buffer(__ib)->vb) #else #define vb2_buffer_to_ipu_isys_video_buffer(__vb) \ container_of(to_vb2_v4l2_buffer(__vb), \ struct ipu_isys_video_buffer, vb_v4l2) #define ipu_isys_buffer_to_vb2_buffer(__ib) \ (&ipu_isys_to_isys_video_buffer(__ib)->vb_v4l2.vb2_buf) #endif #define vb2_buffer_to_ipu_isys_buffer(__vb) \ (&vb2_buffer_to_ipu_isys_video_buffer(__vb)->ib) #define ipu_isys_buffer_to_private_buffer(__ib) \ container_of(__ib, struct ipu_isys_private_buffer, ib) struct ipu_isys_request { struct media_device_request req; /* serialise access to buffers */ spinlock_t lock; struct list_head buffers; /* struct ipu_isys_buffer.head */ bool dispatched; /* * struct ipu_isys.requests; * struct ipu_isys_pipeline.struct.* */ struct list_head head; }; #define to_ipu_isys_request(__req) \ container_of(__req, struct ipu_isys_request, req) int ipu_isys_buf_prepare(struct vb2_buffer *vb); void ipu_isys_buffer_list_queue(struct ipu_isys_buffer_list *bl, unsigned long op_flags, enum vb2_buffer_state state); struct ipu_isys_request * ipu_isys_next_queued_request(struct ipu_isys_pipeline *ip); void ipu_isys_buffer_to_fw_frame_buff_pin(struct vb2_buffer *vb, struct ipu_fw_isys_frame_buff_set_abi * set); void ipu_isys_buffer_to_fw_frame_buff(struct ipu_fw_isys_frame_buff_set_abi *set, struct ipu_isys_pipeline *ip, struct ipu_isys_buffer_list *bl); int ipu_isys_link_fmt_validate(struct ipu_isys_queue *aq); void ipu_isys_buf_calc_sequence_time(struct ipu_isys_buffer *ib, struct ipu_fw_isys_resp_info_abi *info); void ipu_isys_queue_buf_done(struct ipu_isys_buffer *ib); void ipu_isys_queue_buf_ready(struct ipu_isys_pipeline *ip, struct ipu_fw_isys_resp_info_abi *info); void ipu_isys_queue_short_packet_ready(struct ipu_isys_pipeline *ip, struct ipu_fw_isys_resp_info_abi *inf); int ipu_isys_queue_init(struct ipu_isys_queue *aq); void ipu_isys_queue_cleanup(struct ipu_isys_queue *aq); #endif /* IPU_ISYS_QUEUE_H */ ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu-isys-subdev.c000066400000000000000000000605001471702545500265510ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2013 - 2024 Intel Corporation #include #include #include #include #include "ipu-isys.h" #include "ipu-isys-video.h" #include "ipu-isys-subdev.h" unsigned int ipu_isys_mbus_code_to_bpp(u32 code) { switch (code) { case MEDIA_BUS_FMT_RGB888_1X24: return 24; #ifdef V4L2_PIX_FMT_Y210 case MEDIA_BUS_FMT_YUYV10_1X20: return 20; #endif case MEDIA_BUS_FMT_Y10_1X10: case MEDIA_BUS_FMT_RGB565_1X16: case MEDIA_BUS_FMT_UYVY8_1X16: case MEDIA_BUS_FMT_YUYV8_1X16: case MEDIA_BUS_FMT_VYUY8_1X16: return 16; case MEDIA_BUS_FMT_SBGGR12_1X12: case MEDIA_BUS_FMT_SGBRG12_1X12: case MEDIA_BUS_FMT_SGRBG12_1X12: case MEDIA_BUS_FMT_SRGGB12_1X12: return 12; case MEDIA_BUS_FMT_SBGGR10_1X10: case MEDIA_BUS_FMT_SGBRG10_1X10: case MEDIA_BUS_FMT_SGRBG10_1X10: case MEDIA_BUS_FMT_SRGGB10_1X10: return 10; case MEDIA_BUS_FMT_Y8_1X8: case MEDIA_BUS_FMT_SBGGR8_1X8: case MEDIA_BUS_FMT_SGBRG8_1X8: case MEDIA_BUS_FMT_SGRBG8_1X8: case MEDIA_BUS_FMT_SRGGB8_1X8: case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8: case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8: case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8: case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8: return 8; default: WARN_ON(1); return -EINVAL; } } unsigned int ipu_isys_mbus_code_to_mipi(u32 code) { switch (code) { case MEDIA_BUS_FMT_RGB565_1X16: return IPU_ISYS_MIPI_CSI2_TYPE_RGB565; case MEDIA_BUS_FMT_RGB888_1X24: return IPU_ISYS_MIPI_CSI2_TYPE_RGB888; #ifdef V4L2_PIX_FMT_Y210 case MEDIA_BUS_FMT_YUYV10_1X20: return IPU_ISYS_MIPI_CSI2_TYPE_YUV422_10; #endif case MEDIA_BUS_FMT_UYVY8_1X16: case MEDIA_BUS_FMT_YUYV8_1X16: case MEDIA_BUS_FMT_VYUY8_1X16: return IPU_ISYS_MIPI_CSI2_TYPE_YUV422_8; case MEDIA_BUS_FMT_SBGGR12_1X12: case MEDIA_BUS_FMT_SGBRG12_1X12: case MEDIA_BUS_FMT_SGRBG12_1X12: case MEDIA_BUS_FMT_SRGGB12_1X12: return IPU_ISYS_MIPI_CSI2_TYPE_RAW12; case MEDIA_BUS_FMT_Y10_1X10: case MEDIA_BUS_FMT_SBGGR10_1X10: case MEDIA_BUS_FMT_SGBRG10_1X10: case MEDIA_BUS_FMT_SGRBG10_1X10: case MEDIA_BUS_FMT_SRGGB10_1X10: return IPU_ISYS_MIPI_CSI2_TYPE_RAW10; case MEDIA_BUS_FMT_Y8_1X8: case MEDIA_BUS_FMT_SBGGR8_1X8: case MEDIA_BUS_FMT_SGBRG8_1X8: case MEDIA_BUS_FMT_SGRBG8_1X8: case MEDIA_BUS_FMT_SRGGB8_1X8: return IPU_ISYS_MIPI_CSI2_TYPE_RAW8; case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8: case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8: case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8: case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8: return IPU_ISYS_MIPI_CSI2_TYPE_USER_DEF(1); default: WARN_ON(1); return -EINVAL; } } enum ipu_isys_subdev_pixelorder ipu_isys_subdev_get_pixelorder(u32 code) { switch (code) { case MEDIA_BUS_FMT_SBGGR12_1X12: case MEDIA_BUS_FMT_SBGGR10_1X10: case MEDIA_BUS_FMT_SBGGR8_1X8: case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8: return IPU_ISYS_SUBDEV_PIXELORDER_BGGR; case MEDIA_BUS_FMT_SGBRG12_1X12: case MEDIA_BUS_FMT_SGBRG10_1X10: case MEDIA_BUS_FMT_SGBRG8_1X8: case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8: return IPU_ISYS_SUBDEV_PIXELORDER_GBRG; case MEDIA_BUS_FMT_SGRBG12_1X12: case MEDIA_BUS_FMT_SGRBG10_1X10: case MEDIA_BUS_FMT_SGRBG8_1X8: case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8: return IPU_ISYS_SUBDEV_PIXELORDER_GRBG; case MEDIA_BUS_FMT_SRGGB12_1X12: case MEDIA_BUS_FMT_SRGGB10_1X10: case MEDIA_BUS_FMT_SRGGB8_1X8: case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8: return IPU_ISYS_SUBDEV_PIXELORDER_RGGB; default: WARN_ON(1); return -EINVAL; } } u32 ipu_isys_subdev_code_to_uncompressed(u32 sink_code) { switch (sink_code) { case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8: return MEDIA_BUS_FMT_SBGGR10_1X10; case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8: return MEDIA_BUS_FMT_SGBRG10_1X10; case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8: return MEDIA_BUS_FMT_SGRBG10_1X10; case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8: return MEDIA_BUS_FMT_SRGGB10_1X10; default: return sink_code; } } struct v4l2_mbus_framefmt *__ipu_isys_get_ffmt(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) struct v4l2_subdev_fh *cfg, #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *state, #endif unsigned int pad, unsigned int which) { struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); if (which == V4L2_SUBDEV_FORMAT_ACTIVE) return &asd->ffmt[pad]; else #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) return v4l2_subdev_get_try_format(cfg, pad); #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) return v4l2_subdev_get_try_format(sd, cfg, pad); #elif LINUX_VERSION_CODE < KERNEL_VERSION(6, 8, 0) return v4l2_subdev_get_try_format(sd, state, pad); #else return v4l2_subdev_state_get_format(state, pad); #endif } struct v4l2_rect *__ipu_isys_get_selection(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) struct v4l2_subdev_fh *cfg, #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *state, #endif unsigned int target, unsigned int pad, unsigned int which) { struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); if (which == V4L2_SUBDEV_FORMAT_ACTIVE) { switch (target) { case V4L2_SEL_TGT_CROP: return &asd->crop[pad]; case V4L2_SEL_TGT_COMPOSE: return &asd->compose[pad]; } } else { switch (target) { #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) case V4L2_SEL_TGT_CROP: return v4l2_subdev_get_try_crop(cfg, pad); case V4L2_SEL_TGT_COMPOSE: return v4l2_subdev_get_try_compose(cfg, pad); #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) case V4L2_SEL_TGT_CROP: return v4l2_subdev_get_try_crop(sd, cfg, pad); case V4L2_SEL_TGT_COMPOSE: return v4l2_subdev_get_try_compose(sd, cfg, pad); #elif LINUX_VERSION_CODE < KERNEL_VERSION(6, 8, 0) case V4L2_SEL_TGT_CROP: return v4l2_subdev_get_try_crop(sd, state, pad); case V4L2_SEL_TGT_COMPOSE: return v4l2_subdev_get_try_compose(sd, state, pad); #else case V4L2_SEL_TGT_CROP: return v4l2_subdev_state_get_crop(state, pad); case V4L2_SEL_TGT_COMPOSE: return v4l2_subdev_state_get_compose(state, pad); #endif } } WARN_ON(1); return NULL; } static int target_valid(struct v4l2_subdev *sd, unsigned int target, unsigned int pad) { struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); switch (target) { case V4L2_SEL_TGT_CROP: return asd->valid_tgts[pad].crop; case V4L2_SEL_TGT_COMPOSE: return asd->valid_tgts[pad].compose; default: return 0; } } int ipu_isys_subdev_fmt_propagate(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) struct v4l2_subdev_fh *cfg, #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *state, #endif struct v4l2_mbus_framefmt *ffmt, struct v4l2_rect *r, enum isys_subdev_prop_tgt tgt, unsigned int pad, unsigned int which) { struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); struct v4l2_mbus_framefmt **ffmts = NULL; struct v4l2_rect **crops = NULL; struct v4l2_rect **compose = NULL; unsigned int i; int rval = 0; if (tgt == IPU_ISYS_SUBDEV_PROP_TGT_NR_OF) return 0; if (WARN_ON(pad >= sd->entity.num_pads)) return -EINVAL; ffmts = kcalloc(sd->entity.num_pads, sizeof(*ffmts), GFP_KERNEL); if (!ffmts) { rval = -ENOMEM; goto out_subdev_fmt_propagate; } crops = kcalloc(sd->entity.num_pads, sizeof(*crops), GFP_KERNEL); if (!crops) { rval = -ENOMEM; goto out_subdev_fmt_propagate; } compose = kcalloc(sd->entity.num_pads, sizeof(*compose), GFP_KERNEL); if (!compose) { rval = -ENOMEM; goto out_subdev_fmt_propagate; } for (i = 0; i < sd->entity.num_pads; i++) { #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) ffmts[i] = __ipu_isys_get_ffmt(sd, cfg, i, which); crops[i] = __ipu_isys_get_selection(sd, cfg, V4L2_SEL_TGT_CROP, i, which); compose[i] = __ipu_isys_get_selection(sd, cfg, V4L2_SEL_TGT_COMPOSE, i, which); #else ffmts[i] = __ipu_isys_get_ffmt(sd, state, i, which); crops[i] = __ipu_isys_get_selection(sd, state, V4L2_SEL_TGT_CROP, i, which); compose[i] = __ipu_isys_get_selection(sd, state, V4L2_SEL_TGT_COMPOSE, i, which); #endif } switch (tgt) { case IPU_ISYS_SUBDEV_PROP_TGT_SINK_FMT: crops[pad]->left = 0; crops[pad]->top = 0; crops[pad]->width = ffmt->width; crops[pad]->height = ffmt->height; #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) rval = ipu_isys_subdev_fmt_propagate(sd, cfg, ffmt, crops[pad], tgt + 1, pad, which); #else rval = ipu_isys_subdev_fmt_propagate(sd, state, ffmt, crops[pad], tgt + 1, pad, which); #endif goto out_subdev_fmt_propagate; case IPU_ISYS_SUBDEV_PROP_TGT_SINK_CROP: if (WARN_ON(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE)) goto out_subdev_fmt_propagate; compose[pad]->left = 0; compose[pad]->top = 0; compose[pad]->width = r->width; compose[pad]->height = r->height; #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) rval = ipu_isys_subdev_fmt_propagate(sd, cfg, ffmt, compose[pad], tgt + 1, pad, which); #else rval = ipu_isys_subdev_fmt_propagate(sd, state, ffmt, compose[pad], tgt + 1, pad, which); #endif goto out_subdev_fmt_propagate; case IPU_ISYS_SUBDEV_PROP_TGT_SINK_COMPOSE: if (WARN_ON(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE)) { rval = -EINVAL; goto out_subdev_fmt_propagate; } for (i = 1; i < sd->entity.num_pads; i++) { if (!(sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE)) continue; compose[i]->left = 0; compose[i]->top = 0; compose[i]->width = r->width; compose[i]->height = r->height; #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) rval = ipu_isys_subdev_fmt_propagate(sd, cfg, ffmt, compose[i], tgt + 1, i, which); #else rval = ipu_isys_subdev_fmt_propagate(sd, state, ffmt, compose[i], tgt + 1, i, which); #endif if (rval) goto out_subdev_fmt_propagate; } goto out_subdev_fmt_propagate; case IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_COMPOSE: if (WARN_ON(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SINK)) { rval = -EINVAL; goto out_subdev_fmt_propagate; } crops[pad]->left = 0; crops[pad]->top = 0; crops[pad]->width = r->width; crops[pad]->height = r->height; #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) rval = ipu_isys_subdev_fmt_propagate(sd, cfg, ffmt, crops[pad], tgt + 1, pad, which); #else rval = ipu_isys_subdev_fmt_propagate(sd, state, ffmt, crops[pad], tgt + 1, pad, which); #endif goto out_subdev_fmt_propagate; case IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_CROP:{ struct v4l2_subdev_format fmt = { .which = which, .pad = pad, .format = { .width = r->width, .height = r->height, /* * Either use the code from sink pad * or the current one. */ .code = ffmt ? ffmt->code : ffmts[pad]->code, .field = ffmt ? ffmt->field : ffmts[pad]->field, }, }; #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) asd->set_ffmt(sd, cfg, &fmt); #else asd->set_ffmt(sd, state, &fmt); #endif goto out_subdev_fmt_propagate; } } out_subdev_fmt_propagate: kfree(ffmts); kfree(crops); kfree(compose); return rval; } int ipu_isys_subdev_set_ffmt_default(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) struct v4l2_subdev_fh *cfg, #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *state, #endif struct v4l2_subdev_format *fmt) { struct v4l2_mbus_framefmt *ffmt = #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) __ipu_isys_get_ffmt(sd, cfg, fmt->pad, fmt->which); #else __ipu_isys_get_ffmt(sd, state, fmt->pad, fmt->which); #endif /* No propagation for non-zero pads. */ if (fmt->pad) { struct v4l2_mbus_framefmt *sink_ffmt = #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) __ipu_isys_get_ffmt(sd, cfg, 0, fmt->which); #else __ipu_isys_get_ffmt(sd, state, 0, fmt->which); #endif ffmt->width = sink_ffmt->width; ffmt->height = sink_ffmt->height; ffmt->code = sink_ffmt->code; ffmt->field = sink_ffmt->field; return 0; } ffmt->width = fmt->format.width; ffmt->height = fmt->format.height; ffmt->code = fmt->format.code; ffmt->field = fmt->format.field; #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) return ipu_isys_subdev_fmt_propagate(sd, cfg, &fmt->format, NULL, IPU_ISYS_SUBDEV_PROP_TGT_SINK_FMT, fmt->pad, fmt->which); #else return ipu_isys_subdev_fmt_propagate(sd, state, &fmt->format, NULL, IPU_ISYS_SUBDEV_PROP_TGT_SINK_FMT, fmt->pad, fmt->which); #endif } int __ipu_isys_subdev_set_ffmt(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) struct v4l2_subdev_fh *cfg, #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *state, #endif struct v4l2_subdev_format *fmt) { struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); struct v4l2_mbus_framefmt *ffmt = #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) __ipu_isys_get_ffmt(sd, cfg, fmt->pad, fmt->which); #else __ipu_isys_get_ffmt(sd, state, fmt->pad, fmt->which); #endif u32 code = asd->supported_codes[fmt->pad][0]; unsigned int i; WARN_ON(!mutex_is_locked(&asd->mutex)); fmt->format.width = clamp(fmt->format.width, IPU_ISYS_MIN_WIDTH, IPU_ISYS_MAX_WIDTH); fmt->format.height = clamp(fmt->format.height, IPU_ISYS_MIN_HEIGHT, IPU_ISYS_MAX_HEIGHT); for (i = 0; asd->supported_codes[fmt->pad][i]; i++) { if (asd->supported_codes[fmt->pad][i] == fmt->format.code) { code = asd->supported_codes[fmt->pad][i]; break; } } fmt->format.code = code; #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) asd->set_ffmt(sd, cfg, fmt); #else asd->set_ffmt(sd, state, fmt); #endif fmt->format = *ffmt; return 0; } int ipu_isys_subdev_set_ffmt(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) struct v4l2_subdev_fh *cfg, #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *state, #endif struct v4l2_subdev_format *fmt) { struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); int rval; mutex_lock(&asd->mutex); #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) rval = __ipu_isys_subdev_set_ffmt(sd, cfg, fmt); #else rval = __ipu_isys_subdev_set_ffmt(sd, state, fmt); #endif mutex_unlock(&asd->mutex); return rval; } int ipu_isys_subdev_get_ffmt(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) struct v4l2_subdev_fh *cfg, #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *state, #endif struct v4l2_subdev_format *fmt) { struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); mutex_lock(&asd->mutex); #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) fmt->format = *__ipu_isys_get_ffmt(sd, cfg, fmt->pad, fmt->which); #else fmt->format = *__ipu_isys_get_ffmt(sd, state, fmt->pad, fmt->which); #endif mutex_unlock(&asd->mutex); return 0; } int ipu_isys_subdev_set_sel(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) struct v4l2_subdev_fh *cfg, #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *state, #endif struct v4l2_subdev_selection *sel) { struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); struct media_pad *pad = &asd->sd.entity.pads[sel->pad]; struct v4l2_rect *r, __r = { 0 }; unsigned int tgt; if (!target_valid(sd, sel->target, sel->pad)) return -EINVAL; switch (sel->target) { case V4L2_SEL_TGT_CROP: if (pad->flags & MEDIA_PAD_FL_SINK) { struct v4l2_mbus_framefmt *ffmt = #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) __ipu_isys_get_ffmt(sd, cfg, sel->pad, sel->which); #else __ipu_isys_get_ffmt(sd, state, sel->pad, sel->which); #endif __r.width = ffmt->width; __r.height = ffmt->height; r = &__r; tgt = IPU_ISYS_SUBDEV_PROP_TGT_SINK_CROP; } else { /* 0 is the sink pad. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) r = __ipu_isys_get_selection(sd, cfg, sel->target, 0, sel->which); #else r = __ipu_isys_get_selection(sd, state, sel->target, 0, sel->which); #endif tgt = IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_CROP; } break; case V4L2_SEL_TGT_COMPOSE: if (pad->flags & MEDIA_PAD_FL_SINK) { #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) r = __ipu_isys_get_selection(sd, cfg, V4L2_SEL_TGT_CROP, sel->pad, sel->which); #else r = __ipu_isys_get_selection(sd, state, V4L2_SEL_TGT_CROP, sel->pad, sel->which); #endif tgt = IPU_ISYS_SUBDEV_PROP_TGT_SINK_COMPOSE; } else { #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) r = __ipu_isys_get_selection(sd, cfg, V4L2_SEL_TGT_COMPOSE, 0, sel->which); #else r = __ipu_isys_get_selection(sd, state, V4L2_SEL_TGT_COMPOSE, 0, sel->which); #endif tgt = IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_COMPOSE; } break; default: return -EINVAL; } sel->r.width = clamp(sel->r.width, IPU_ISYS_MIN_WIDTH, r->width); sel->r.height = clamp(sel->r.height, IPU_ISYS_MIN_HEIGHT, r->height); #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) *__ipu_isys_get_selection(sd, cfg, sel->target, sel->pad, sel->which) = sel->r; return ipu_isys_subdev_fmt_propagate(sd, cfg, NULL, &sel->r, tgt, sel->pad, sel->which); #else *__ipu_isys_get_selection(sd, state, sel->target, sel->pad, sel->which) = sel->r; return ipu_isys_subdev_fmt_propagate(sd, state, NULL, &sel->r, tgt, sel->pad, sel->which); #endif } int ipu_isys_subdev_get_sel(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) struct v4l2_subdev_fh *cfg, #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *state, #endif struct v4l2_subdev_selection *sel) { if (!target_valid(sd, sel->target, sel->pad)) return -EINVAL; #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) sel->r = *__ipu_isys_get_selection(sd, cfg, sel->target, sel->pad, sel->which); #else sel->r = *__ipu_isys_get_selection(sd, state, sel->target, sel->pad, sel->which); #endif return 0; } int ipu_isys_subdev_enum_mbus_code(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) struct v4l2_subdev_fh *cfg, #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *state, #endif struct v4l2_subdev_mbus_code_enum *code) { struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); const u32 *supported_codes = asd->supported_codes[code->pad]; u32 index; for (index = 0; supported_codes[index]; index++) { if (index == code->index) { code->code = supported_codes[index]; return 0; } } return -EINVAL; } /* * Besides validating the link, figure out the external pad and the * ISYS FW ABI source. */ int ipu_isys_subdev_link_validate(struct v4l2_subdev *sd, struct media_link *link, struct v4l2_subdev_format *source_fmt, struct v4l2_subdev_format *sink_fmt) { struct v4l2_subdev *source_sd = media_entity_to_v4l2_subdev(link->source->entity); struct media_pipeline *mp = media_entity_pipeline(&sd->entity); struct ipu_isys_pipeline *ip = container_of(mp, struct ipu_isys_pipeline, pipe); struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); if (!source_sd) return -ENODEV; if (strncmp(source_sd->name, IPU_ISYS_ENTITY_PREFIX, strlen(IPU_ISYS_ENTITY_PREFIX)) != 0) { /* * source_sd isn't ours --- sd must be the external * sub-device. */ ip->external = link->source; ip->source = to_ipu_isys_subdev(sd)->source; dev_dbg(&asd->isys->adev->dev, "%s: using source %d\n", sd->entity.name, ip->source); } else if (source_sd->entity.num_pads == 1) { /* All internal sources have a single pad. */ ip->external = link->source; ip->source = to_ipu_isys_subdev(source_sd)->source; dev_dbg(&asd->isys->adev->dev, "%s: using source %d\n", sd->entity.name, ip->source); } if (asd->isl_mode != IPU_ISL_OFF) ip->isl_mode = asd->isl_mode; return v4l2_subdev_link_validate_default(sd, link, source_fmt, sink_fmt); } int ipu_isys_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); unsigned int i; mutex_lock(&asd->mutex); for (i = 0; i < asd->sd.entity.num_pads; i++) { #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) struct v4l2_mbus_framefmt *try_fmt = v4l2_subdev_get_try_format(fh, i); struct v4l2_rect *try_crop = v4l2_subdev_get_try_crop(fh, i); struct v4l2_rect *try_compose = v4l2_subdev_get_try_compose(fh, i); #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_mbus_framefmt *try_fmt = v4l2_subdev_get_try_format(sd, fh->pad, i); struct v4l2_rect *try_crop = v4l2_subdev_get_try_crop(sd, fh->pad, i); struct v4l2_rect *try_compose = v4l2_subdev_get_try_compose(sd, fh->pad, i); #elif LINUX_VERSION_CODE < KERNEL_VERSION(6, 8, 0) struct v4l2_mbus_framefmt *try_fmt = v4l2_subdev_get_try_format(sd, fh->state, i); struct v4l2_rect *try_crop = v4l2_subdev_get_try_crop(sd, fh->state, i); struct v4l2_rect *try_compose = v4l2_subdev_get_try_compose(sd, fh->state, i); #else struct v4l2_mbus_framefmt *try_fmt = v4l2_subdev_state_get_format(fh->state, i); struct v4l2_rect *try_crop = v4l2_subdev_state_get_crop(fh->state, i); struct v4l2_rect *try_compose = v4l2_subdev_state_get_compose(fh->state, i); #endif *try_fmt = asd->ffmt[i]; *try_crop = asd->crop[i]; *try_compose = asd->compose[i]; } mutex_unlock(&asd->mutex); return 0; } int ipu_isys_subdev_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { return 0; } int ipu_isys_subdev_init(struct ipu_isys_subdev *asd, struct v4l2_subdev_ops *ops, unsigned int nr_ctrls, unsigned int num_pads, unsigned int num_source, unsigned int num_sink, unsigned int sd_flags, int src_pad_idx, int sink_pad_idx) { unsigned int i; int rval = -EINVAL; mutex_init(&asd->mutex); v4l2_subdev_init(&asd->sd, ops); asd->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | sd_flags; asd->sd.owner = THIS_MODULE; asd->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE; asd->nsources = num_source; asd->nsinks = num_sink; asd->pad = devm_kcalloc(&asd->isys->adev->dev, num_pads, sizeof(*asd->pad), GFP_KERNEL); /* * Out of range IDX means that this particular type of pad * does not exist. */ if (src_pad_idx != ISYS_SUBDEV_NO_PAD) { for (i = 0; i < num_source; i++) asd->pad[src_pad_idx + i].flags = MEDIA_PAD_FL_SOURCE; } if (sink_pad_idx != ISYS_SUBDEV_NO_PAD) { for (i = 0; i < num_sink; i++) asd->pad[sink_pad_idx + i].flags = MEDIA_PAD_FL_SINK; } asd->ffmt = devm_kcalloc(&asd->isys->adev->dev, num_pads, sizeof(*asd->ffmt), GFP_KERNEL); asd->crop = devm_kcalloc(&asd->isys->adev->dev, num_pads, sizeof(*asd->crop), GFP_KERNEL); asd->compose = devm_kcalloc(&asd->isys->adev->dev, num_pads, sizeof(*asd->compose), GFP_KERNEL); asd->valid_tgts = devm_kcalloc(&asd->isys->adev->dev, num_pads, sizeof(*asd->valid_tgts), GFP_KERNEL); if (!asd->pad || !asd->ffmt || !asd->crop || !asd->compose || !asd->valid_tgts) return -ENOMEM; rval = media_entity_pads_init(&asd->sd.entity, num_pads, asd->pad); if (rval) goto out_mutex_destroy; if (asd->ctrl_init) { rval = v4l2_ctrl_handler_init(&asd->ctrl_handler, nr_ctrls); if (rval) goto out_media_entity_cleanup; asd->ctrl_init(&asd->sd); if (asd->ctrl_handler.error) { rval = asd->ctrl_handler.error; goto out_v4l2_ctrl_handler_free; } asd->sd.ctrl_handler = &asd->ctrl_handler; } asd->source = -1; return 0; out_v4l2_ctrl_handler_free: v4l2_ctrl_handler_free(&asd->ctrl_handler); out_media_entity_cleanup: media_entity_cleanup(&asd->sd.entity); out_mutex_destroy: mutex_destroy(&asd->mutex); return rval; } void ipu_isys_subdev_cleanup(struct ipu_isys_subdev *asd) { media_entity_cleanup(&asd->sd.entity); v4l2_ctrl_handler_free(&asd->ctrl_handler); mutex_destroy(&asd->mutex); } ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu-isys-subdev.h000066400000000000000000000154511471702545500265630ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2013 - 2024 Intel Corporation */ #ifndef IPU_ISYS_SUBDEV_H #define IPU_ISYS_SUBDEV_H #include #include #include #include #include "ipu-isys-queue.h" #define IPU_ISYS_MIPI_CSI2_TYPE_NULL 0x10 #define IPU_ISYS_MIPI_CSI2_TYPE_BLANKING 0x11 #define IPU_ISYS_MIPI_CSI2_TYPE_EMBEDDED8 0x12 #define IPU_ISYS_MIPI_CSI2_TYPE_YUV422_8 0x1e #define IPU_ISYS_MIPI_CSI2_TYPE_YUV422_10 0x1f #define IPU_ISYS_MIPI_CSI2_TYPE_RGB565 0x22 #define IPU_ISYS_MIPI_CSI2_TYPE_RGB888 0x24 #define IPU_ISYS_MIPI_CSI2_TYPE_RAW6 0x28 #define IPU_ISYS_MIPI_CSI2_TYPE_RAW7 0x29 #define IPU_ISYS_MIPI_CSI2_TYPE_RAW8 0x2a #define IPU_ISYS_MIPI_CSI2_TYPE_RAW10 0x2b #define IPU_ISYS_MIPI_CSI2_TYPE_RAW12 0x2c #define IPU_ISYS_MIPI_CSI2_TYPE_RAW14 0x2d /* 1-8 */ #define IPU_ISYS_MIPI_CSI2_TYPE_USER_DEF(i) (0x30 + (i) - 1) #define FMT_ENTRY (struct ipu_isys_fmt_entry []) enum isys_subdev_prop_tgt { IPU_ISYS_SUBDEV_PROP_TGT_SINK_FMT, IPU_ISYS_SUBDEV_PROP_TGT_SINK_CROP, IPU_ISYS_SUBDEV_PROP_TGT_SINK_COMPOSE, IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_COMPOSE, IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_CROP, }; #define IPU_ISYS_SUBDEV_PROP_TGT_NR_OF \ (IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_CROP + 1) enum ipu_isl_mode { IPU_ISL_OFF = 0, /* SOC BE */ IPU_ISL_CSI2_BE, /* RAW BE */ }; enum ipu_be_mode { IPU_BE_RAW = 0, IPU_BE_SOC }; enum ipu_isys_subdev_pixelorder { IPU_ISYS_SUBDEV_PIXELORDER_BGGR = 0, IPU_ISYS_SUBDEV_PIXELORDER_GBRG, IPU_ISYS_SUBDEV_PIXELORDER_GRBG, IPU_ISYS_SUBDEV_PIXELORDER_RGGB, }; struct ipu_isys; struct ipu_isys_subdev { /* Serialise access to any other field in the struct */ struct mutex mutex; struct v4l2_subdev sd; struct ipu_isys *isys; u32 const *const *supported_codes; struct media_pad *pad; struct v4l2_mbus_framefmt *ffmt; struct v4l2_rect *crop; struct v4l2_rect *compose; unsigned int nsinks; unsigned int nsources; struct v4l2_ctrl_handler ctrl_handler; void (*ctrl_init)(struct v4l2_subdev *sd); void (*set_ffmt)(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) struct v4l2_subdev_fh *cfg, #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *state, #endif struct v4l2_subdev_format *fmt); struct { bool crop; bool compose; } *valid_tgts; enum ipu_isl_mode isl_mode; enum ipu_be_mode be_mode; int source; /* SSI stream source; -1 if unset */ }; #define to_ipu_isys_subdev(__sd) \ container_of(__sd, struct ipu_isys_subdev, sd) struct v4l2_mbus_framefmt *__ipu_isys_get_ffmt(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) struct v4l2_subdev_fh *cfg, #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *state, #endif unsigned int pad, unsigned int which); unsigned int ipu_isys_mbus_code_to_bpp(u32 code); unsigned int ipu_isys_mbus_code_to_mipi(u32 code); u32 ipu_isys_subdev_code_to_uncompressed(u32 sink_code); enum ipu_isys_subdev_pixelorder ipu_isys_subdev_get_pixelorder(u32 code); int ipu_isys_subdev_fmt_propagate(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) struct v4l2_subdev_fh *cfg, #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *state, #endif struct v4l2_mbus_framefmt *ffmt, struct v4l2_rect *r, enum isys_subdev_prop_tgt tgt, unsigned int pad, unsigned int which); int ipu_isys_subdev_set_ffmt_default(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) struct v4l2_subdev_fh *cfg, #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *state, #endif struct v4l2_subdev_format *fmt); int __ipu_isys_subdev_set_ffmt(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) struct v4l2_subdev_fh *cfg, #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *state, #endif struct v4l2_subdev_format *fmt); struct v4l2_rect *__ipu_isys_get_selection(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) struct v4l2_subdev_fh *cfg, #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *state, #endif unsigned int target, unsigned int pad, unsigned int which); int ipu_isys_subdev_set_ffmt(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) struct v4l2_subdev_fh *cfg, #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *state, #endif struct v4l2_subdev_format *fmt); int ipu_isys_subdev_get_ffmt(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) struct v4l2_subdev_fh *cfg, #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *state, #endif struct v4l2_subdev_format *fmt); int ipu_isys_subdev_get_sel(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *state, #endif struct v4l2_subdev_selection *sel); int ipu_isys_subdev_set_sel(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *state, #endif struct v4l2_subdev_selection *sel); int ipu_isys_subdev_enum_mbus_code(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) struct v4l2_subdev_fh *cfg, #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *state, #endif struct v4l2_subdev_mbus_code_enum *code); int ipu_isys_subdev_link_validate(struct v4l2_subdev *sd, struct media_link *link, struct v4l2_subdev_format *source_fmt, struct v4l2_subdev_format *sink_fmt); int ipu_isys_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh); int ipu_isys_subdev_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh); #define ISYS_SUBDEV_NO_PAD (-1) int ipu_isys_subdev_init(struct ipu_isys_subdev *asd, struct v4l2_subdev_ops *ops, unsigned int nr_ctrls, unsigned int num_pads, unsigned int num_source, unsigned int num_sink, unsigned int sd_flags, int src_pad_idx, int sink_pad_idx); void ipu_isys_subdev_cleanup(struct ipu_isys_subdev *asd); #endif /* IPU_ISYS_SUBDEV_H */ ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu-isys-tpg.c000066400000000000000000000220551471702545500260560ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2013 - 2024 Intel Corporation #include #include #include #include #include #include "ipu.h" #include "ipu-bus.h" #include "ipu-isys.h" #include "ipu-isys-subdev.h" #include "ipu-isys-tpg.h" #include "ipu-isys-video.h" #include "ipu-platform-isys-csi2-reg.h" static const u32 tpg_supported_codes_pad[] = { MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SRGGB10_1X10, 0, }; static const u32 *tpg_supported_codes[] = { tpg_supported_codes_pad, }; static struct v4l2_subdev_internal_ops tpg_sd_internal_ops = { .open = ipu_isys_subdev_open, .close = ipu_isys_subdev_close, }; static const struct v4l2_subdev_video_ops tpg_sd_video_ops = { .s_stream = tpg_set_stream, }; static int ipu_isys_tpg_s_ctrl(struct v4l2_ctrl *ctrl) { struct ipu_isys_tpg *tpg = container_of(container_of(ctrl->handler, struct ipu_isys_subdev, ctrl_handler), struct ipu_isys_tpg, asd); switch (ctrl->id) { case V4L2_CID_HBLANK: writel(ctrl->val, tpg->base + MIPI_GEN_REG_SYNG_HBLANK_CYC); break; case V4L2_CID_VBLANK: writel(ctrl->val, tpg->base + MIPI_GEN_REG_SYNG_VBLANK_CYC); break; case V4L2_CID_TEST_PATTERN: writel(ctrl->val, tpg->base + MIPI_GEN_REG_TPG_MODE); break; } return 0; } static const struct v4l2_ctrl_ops ipu_isys_tpg_ctrl_ops = { .s_ctrl = ipu_isys_tpg_s_ctrl, }; static s64 ipu_isys_tpg_rate(struct ipu_isys_tpg *tpg, unsigned int bpp) { return MIPI_GEN_PPC * IPU_ISYS_FREQ / bpp; } static const char *const tpg_mode_items[] = { "Ramp", "Checkerboard", /* Does not work, disabled. */ "Frame Based Colour", }; static struct v4l2_ctrl_config tpg_mode = { .ops = &ipu_isys_tpg_ctrl_ops, .id = V4L2_CID_TEST_PATTERN, .name = "Test Pattern", .type = V4L2_CTRL_TYPE_MENU, .min = 0, .max = ARRAY_SIZE(tpg_mode_items) - 1, .def = 0, .menu_skip_mask = 0x2, .qmenu = tpg_mode_items, }; static const struct v4l2_ctrl_config csi2_header_cfg = { .id = V4L2_CID_IPU_STORE_CSI2_HEADER, .name = "Store CSI-2 Headers", .type = V4L2_CTRL_TYPE_BOOLEAN, .min = 0, .max = 1, .step = 1, .def = 1, }; static void ipu_isys_tpg_init_controls(struct v4l2_subdev *sd) { struct ipu_isys_tpg *tpg = to_ipu_isys_tpg(sd); int hblank; u64 default_pixel_rate; hblank = 1024; tpg->hblank = v4l2_ctrl_new_std(&tpg->asd.ctrl_handler, &ipu_isys_tpg_ctrl_ops, V4L2_CID_HBLANK, 8, 65535, 1, hblank); tpg->vblank = v4l2_ctrl_new_std(&tpg->asd.ctrl_handler, &ipu_isys_tpg_ctrl_ops, V4L2_CID_VBLANK, 8, 65535, 1, 1024); default_pixel_rate = ipu_isys_tpg_rate(tpg, 8); tpg->pixel_rate = v4l2_ctrl_new_std(&tpg->asd.ctrl_handler, &ipu_isys_tpg_ctrl_ops, V4L2_CID_PIXEL_RATE, default_pixel_rate, default_pixel_rate, 1, default_pixel_rate); if (tpg->pixel_rate) { tpg->pixel_rate->cur.val = default_pixel_rate; tpg->pixel_rate->flags |= V4L2_CTRL_FLAG_READ_ONLY; } v4l2_ctrl_new_custom(&tpg->asd.ctrl_handler, &tpg_mode, NULL); tpg->store_csi2_header = v4l2_ctrl_new_custom(&tpg->asd.ctrl_handler, &csi2_header_cfg, NULL); } static void tpg_set_ffmt(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) struct v4l2_subdev_fh *cfg, #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *state, #endif struct v4l2_subdev_format *fmt) { fmt->format.field = V4L2_FIELD_NONE; #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) *__ipu_isys_get_ffmt(sd, cfg, fmt->pad, fmt->which) = fmt->format; #else *__ipu_isys_get_ffmt(sd, state, fmt->pad, fmt->which) = fmt->format; #endif } static int ipu_isys_tpg_set_ffmt(struct v4l2_subdev *sd, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) struct v4l2_subdev_fh *cfg, #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) struct v4l2_subdev_pad_config *cfg, #else struct v4l2_subdev_state *state, #endif struct v4l2_subdev_format *fmt) { struct ipu_isys_tpg *tpg = to_ipu_isys_tpg(sd); __u32 code = tpg->asd.ffmt[TPG_PAD_SOURCE].code; unsigned int bpp = ipu_isys_mbus_code_to_bpp(code); s64 tpg_rate = ipu_isys_tpg_rate(tpg, bpp); int rval; mutex_lock(&tpg->asd.mutex); #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) rval = __ipu_isys_subdev_set_ffmt(sd, cfg, fmt); #else rval = __ipu_isys_subdev_set_ffmt(sd, state, fmt); #endif mutex_unlock(&tpg->asd.mutex); if (rval || fmt->which != V4L2_SUBDEV_FORMAT_ACTIVE) return rval; v4l2_ctrl_s_ctrl_int64(tpg->pixel_rate, tpg_rate); return 0; } static const struct ipu_isys_pixelformat * ipu_isys_tpg_try_fmt(struct ipu_isys_video *av, struct v4l2_pix_format_mplane *mpix) { #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) struct media_entity entity = av->vdev.entity; struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity.links[0].source->entity); #else struct media_link *link = list_first_entry(&av->vdev.entity.links, struct media_link, list); struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(link->source->entity); #endif struct ipu_isys_tpg *tpg; if (!sd) return NULL; tpg = to_ipu_isys_tpg(sd); return ipu_isys_video_try_fmt_vid_mplane(av, mpix, v4l2_ctrl_g_ctrl(tpg->store_csi2_header)); } static const struct v4l2_subdev_pad_ops tpg_sd_pad_ops = { .get_fmt = ipu_isys_subdev_get_ffmt, .set_fmt = ipu_isys_tpg_set_ffmt, .enum_mbus_code = ipu_isys_subdev_enum_mbus_code, }; static int subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh, struct v4l2_event_subscription *sub) { switch (sub->type) { #ifdef IPU_TPG_FRAME_SYNC case V4L2_EVENT_FRAME_SYNC: return v4l2_event_subscribe(fh, sub, 10, NULL); #endif case V4L2_EVENT_CTRL: return v4l2_ctrl_subscribe_event(fh, sub); default: return -EINVAL; } }; /* V4L2 subdev core operations */ static const struct v4l2_subdev_core_ops tpg_sd_core_ops = { .subscribe_event = subscribe_event, .unsubscribe_event = v4l2_event_subdev_unsubscribe, }; static struct v4l2_subdev_ops tpg_sd_ops = { .core = &tpg_sd_core_ops, .video = &tpg_sd_video_ops, .pad = &tpg_sd_pad_ops, }; static struct media_entity_operations tpg_entity_ops = { .link_validate = v4l2_subdev_link_validate, }; void ipu_isys_tpg_cleanup(struct ipu_isys_tpg *tpg) { v4l2_device_unregister_subdev(&tpg->asd.sd); ipu_isys_subdev_cleanup(&tpg->asd); ipu_isys_video_cleanup(&tpg->av); } int ipu_isys_tpg_init(struct ipu_isys_tpg *tpg, struct ipu_isys *isys, void __iomem *base, void __iomem *sel, unsigned int index) { struct v4l2_subdev_format fmt = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, .pad = TPG_PAD_SOURCE, .format = { .width = 4096, .height = 3072, }, }; int rval; tpg->isys = isys; tpg->base = base; tpg->sel = sel; tpg->index = index; tpg->asd.sd.entity.ops = &tpg_entity_ops; tpg->asd.ctrl_init = ipu_isys_tpg_init_controls; tpg->asd.isys = isys; /* Note: there is no FL_SINK pad here */ rval = ipu_isys_subdev_init(&tpg->asd, &tpg_sd_ops, 5, NR_OF_TPG_PADS, NR_OF_TPG_SOURCE_PADS, NR_OF_TPG_SINK_PADS, V4L2_SUBDEV_FL_HAS_EVENTS, TPG_PAD_SOURCE, ISYS_SUBDEV_NO_PAD); if (rval) return rval; #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) tpg->asd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV_SENSOR; #else tpg->asd.sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; #endif tpg->asd.source = IPU_FW_ISYS_STREAM_SRC_MIPIGEN_PORT0 + index; tpg->asd.supported_codes = tpg_supported_codes; tpg->asd.set_ffmt = tpg_set_ffmt; ipu_isys_subdev_set_ffmt(&tpg->asd.sd, NULL, &fmt); tpg->asd.sd.internal_ops = &tpg_sd_internal_ops; snprintf(tpg->asd.sd.name, sizeof(tpg->asd.sd.name), IPU_ISYS_ENTITY_PREFIX " TPG %u", index); v4l2_set_subdevdata(&tpg->asd.sd, &tpg->asd); rval = v4l2_device_register_subdev(&isys->v4l2_dev, &tpg->asd.sd); if (rval) { dev_info(&isys->adev->dev, "can't register v4l2 subdev\n"); goto fail; } snprintf(tpg->av.vdev.name, sizeof(tpg->av.vdev.name), IPU_ISYS_ENTITY_PREFIX " TPG %u capture", index); tpg->av.isys = isys; tpg->av.aq.css_pin_type = IPU_FW_ISYS_PIN_TYPE_MIPI; tpg->av.pfmts = ipu_isys_pfmts_packed; tpg->av.try_fmt_vid_mplane = ipu_isys_tpg_try_fmt; tpg->av.prepare_fw_stream = ipu_isys_prepare_fw_cfg_default; tpg->av.packed = true; tpg->av.line_header_length = IPU_ISYS_CSI2_LONG_PACKET_HEADER_SIZE; tpg->av.line_footer_length = IPU_ISYS_CSI2_LONG_PACKET_FOOTER_SIZE; tpg->av.aq.buf_prepare = ipu_isys_buf_prepare; tpg->av.aq.fill_frame_buff_set_pin = ipu_isys_buffer_to_fw_frame_buff_pin; tpg->av.aq.link_fmt_validate = ipu_isys_link_fmt_validate; tpg->av.aq.vbq.buf_struct_size = sizeof(struct ipu_isys_video_buffer); rval = ipu_isys_video_init(&tpg->av, &tpg->asd.sd.entity, TPG_PAD_SOURCE, MEDIA_PAD_FL_SINK, 0); if (rval) { dev_info(&isys->adev->dev, "can't init video node\n"); goto fail; } return 0; fail: ipu_isys_tpg_cleanup(tpg); return rval; } ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu-isys-tpg.h000066400000000000000000000054641471702545500260700ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2013 - 2024 Intel Corporation */ #ifndef IPU_ISYS_TPG_H #define IPU_ISYS_TPG_H #include #include #include #include "ipu-isys-subdev.h" #include "ipu-isys-video.h" #include "ipu-isys-queue.h" struct ipu_isys_tpg_pdata; struct ipu_isys; #define TPG_PAD_SOURCE 0 #define NR_OF_TPG_PADS 1 #define NR_OF_TPG_SOURCE_PADS 1 #define NR_OF_TPG_SINK_PADS 0 #define NR_OF_TPG_STREAMS 1 /* * PPC is 4 pixels for clock for RAW8, RAW10 and RAW12. * Source: FW validation test code. */ #define MIPI_GEN_PPC 4 #define MIPI_GEN_REG_COM_ENABLE 0x0 #define MIPI_GEN_REG_COM_DTYPE 0x4 /* RAW8, RAW10 or RAW12 */ #define MIPI_GEN_COM_DTYPE_RAW(n) (((n) - 8) / 2) #define MIPI_GEN_REG_COM_VTYPE 0x8 #define MIPI_GEN_REG_COM_VCHAN 0xc #define MIPI_GEN_REG_COM_WCOUNT 0x10 #define MIPI_GEN_REG_PRBS_RSTVAL0 0x14 #define MIPI_GEN_REG_PRBS_RSTVAL1 0x18 #define MIPI_GEN_REG_SYNG_FREE_RUN 0x1c #define MIPI_GEN_REG_SYNG_PAUSE 0x20 #define MIPI_GEN_REG_SYNG_NOF_FRAMES 0x24 #define MIPI_GEN_REG_SYNG_NOF_PIXELS 0x28 #define MIPI_GEN_REG_SYNG_NOF_LINES 0x2c #define MIPI_GEN_REG_SYNG_HBLANK_CYC 0x30 #define MIPI_GEN_REG_SYNG_VBLANK_CYC 0x34 #define MIPI_GEN_REG_SYNG_STAT_HCNT 0x38 #define MIPI_GEN_REG_SYNG_STAT_VCNT 0x3c #define MIPI_GEN_REG_SYNG_STAT_FCNT 0x40 #define MIPI_GEN_REG_SYNG_STAT_DONE 0x44 #define MIPI_GEN_REG_TPG_MODE 0x48 #define MIPI_GEN_REG_TPG_HCNT_MASK 0x4c #define MIPI_GEN_REG_TPG_VCNT_MASK 0x50 #define MIPI_GEN_REG_TPG_XYCNT_MASK 0x54 #define MIPI_GEN_REG_TPG_HCNT_DELTA 0x58 #define MIPI_GEN_REG_TPG_VCNT_DELTA 0x5c #define MIPI_GEN_REG_TPG_R1 0x60 #define MIPI_GEN_REG_TPG_G1 0x64 #define MIPI_GEN_REG_TPG_B1 0x68 #define MIPI_GEN_REG_TPG_R2 0x6c #define MIPI_GEN_REG_TPG_G2 0x70 #define MIPI_GEN_REG_TPG_B2 0x74 /* * struct ipu_isys_tpg * * @nlanes: number of lanes in the receiver */ struct ipu_isys_tpg { struct ipu_isys_tpg_pdata *pdata; struct ipu_isys *isys; struct ipu_isys_subdev asd; struct ipu_isys_video av; void __iomem *base; void __iomem *sel; unsigned int index; struct v4l2_ctrl *hblank; struct v4l2_ctrl *vblank; struct v4l2_ctrl *pixel_rate; struct v4l2_ctrl *store_csi2_header; }; #define to_ipu_isys_tpg(sd) \ container_of(to_ipu_isys_subdev(sd), \ struct ipu_isys_tpg, asd) #ifdef IPU_TPG_FRAME_SYNC void ipu_isys_tpg_sof_event(struct ipu_isys_tpg *tpg); void ipu_isys_tpg_eof_event(struct ipu_isys_tpg *tpg); #endif int ipu_isys_tpg_init(struct ipu_isys_tpg *tpg, struct ipu_isys *isys, void __iomem *base, void __iomem *sel, unsigned int index); void ipu_isys_tpg_cleanup(struct ipu_isys_tpg *tpg); int tpg_set_stream(struct v4l2_subdev *sd, int enable); #endif /* IPU_ISYS_TPG_H */ ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu-isys-video.c000066400000000000000000001546511471702545500264020ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2013 - 2024 Intel Corporation #include #include #include #include #include #include #include #include #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) #include #else #include #endif #include #include #include #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0) #include #endif #include "ipu.h" #include "ipu-bus.h" #include "ipu-cpd.h" #include "ipu-isys.h" #include "ipu-isys-video.h" #include "ipu-platform.h" #include "ipu-platform-regs.h" #include "ipu-platform-buttress-regs.h" #include "ipu-trace.h" #include "ipu-fw-isys.h" #include "ipu-fw-com.h" #define IPU_IS_DEFAULT_FREQ \ (IPU_IS_FREQ_RATIO_BASE * IPU_IS_FREQ_CTL_DEFAULT_RATIO) #define IPU6SE_IS_DEFAULT_FREQ \ (IPU_IS_FREQ_RATIO_BASE * IPU6SE_IS_FREQ_CTL_DEFAULT_RATIO) /* use max resolution pixel rate by default */ #define DEFAULT_PIXEL_RATE (360000000ULL * 2 * 4 / 10) #define MAX_VIDEO_DEVICES 8 static int video_nr[MAX_VIDEO_DEVICES] = { [0 ...(MAX_VIDEO_DEVICES - 1)] = -1 }; module_param_array(video_nr, int, NULL, 0444); MODULE_PARM_DESC(video_nr, "video device numbers (-1=auto, 0=/dev/video0, etc.)"); const struct ipu_isys_pixelformat ipu_isys_pfmts_be_soc[] = { {V4L2_PIX_FMT_Y10, 16, 10, 0, MEDIA_BUS_FMT_Y10_1X10, IPU_FW_ISYS_FRAME_FORMAT_RAW16}, {V4L2_PIX_FMT_Y8I, 16, 16, 0, MEDIA_BUS_FMT_VYUY8_1X16, IPU_FW_ISYS_FRAME_FORMAT_UYVY}, {V4L2_PIX_FMT_Z16, 16, 16, 0, MEDIA_BUS_FMT_UYVY8_1X16, IPU_FW_ISYS_FRAME_FORMAT_UYVY}, {V4L2_PIX_FMT_UYVY, 16, 16, 0, MEDIA_BUS_FMT_UYVY8_1X16, IPU_FW_ISYS_FRAME_FORMAT_UYVY}, {V4L2_PIX_FMT_YUYV, 16, 16, 0, MEDIA_BUS_FMT_YUYV8_1X16, IPU_FW_ISYS_FRAME_FORMAT_YUYV}, {V4L2_PIX_FMT_NV16, 16, 16, 8, MEDIA_BUS_FMT_UYVY8_1X16, IPU_FW_ISYS_FRAME_FORMAT_NV16}, {V4L2_PIX_FMT_XRGB32, 32, 32, 0, MEDIA_BUS_FMT_RGB565_1X16, IPU_FW_ISYS_FRAME_FORMAT_RGBA888}, {V4L2_PIX_FMT_Y12I, 24, 24, 0, MEDIA_BUS_FMT_RGB888_1X24, IPU_FW_ISYS_FRAME_FORMAT_RGBA888}, {V4L2_PIX_FMT_XBGR32, 32, 32, 0, MEDIA_BUS_FMT_RGB888_1X24, IPU_FW_ISYS_FRAME_FORMAT_RGBA888}, /* Raw bayer formats. */ {V4L2_PIX_FMT_SBGGR12, 16, 12, 0, MEDIA_BUS_FMT_SBGGR12_1X12, IPU_FW_ISYS_FRAME_FORMAT_RAW16}, {V4L2_PIX_FMT_SGBRG12, 16, 12, 0, MEDIA_BUS_FMT_SGBRG12_1X12, IPU_FW_ISYS_FRAME_FORMAT_RAW16}, {V4L2_PIX_FMT_SGRBG12, 16, 12, 0, MEDIA_BUS_FMT_SGRBG12_1X12, IPU_FW_ISYS_FRAME_FORMAT_RAW16}, {V4L2_PIX_FMT_SRGGB12, 16, 12, 0, MEDIA_BUS_FMT_SRGGB12_1X12, IPU_FW_ISYS_FRAME_FORMAT_RAW16}, {V4L2_PIX_FMT_SBGGR10, 16, 10, 0, MEDIA_BUS_FMT_SBGGR10_1X10, IPU_FW_ISYS_FRAME_FORMAT_RAW16}, {V4L2_PIX_FMT_SGBRG10, 16, 10, 0, MEDIA_BUS_FMT_SGBRG10_1X10, IPU_FW_ISYS_FRAME_FORMAT_RAW16}, {V4L2_PIX_FMT_SGRBG10, 16, 10, 0, MEDIA_BUS_FMT_SGRBG10_1X10, IPU_FW_ISYS_FRAME_FORMAT_RAW16}, {V4L2_PIX_FMT_SRGGB10, 16, 10, 0, MEDIA_BUS_FMT_SRGGB10_1X10, IPU_FW_ISYS_FRAME_FORMAT_RAW16}, {V4L2_PIX_FMT_SBGGR8, 8, 8, 0, MEDIA_BUS_FMT_SBGGR8_1X8, IPU_FW_ISYS_FRAME_FORMAT_RAW8}, {V4L2_PIX_FMT_SGBRG8, 8, 8, 0, MEDIA_BUS_FMT_SGBRG8_1X8, IPU_FW_ISYS_FRAME_FORMAT_RAW8}, {V4L2_PIX_FMT_SGRBG8, 8, 8, 0, MEDIA_BUS_FMT_SGRBG8_1X8, IPU_FW_ISYS_FRAME_FORMAT_RAW8}, {V4L2_PIX_FMT_SRGGB8, 8, 8, 0, MEDIA_BUS_FMT_SRGGB8_1X8, IPU_FW_ISYS_FRAME_FORMAT_RAW8}, {V4L2_PIX_FMT_GREY, 8, 8, 0, MEDIA_BUS_FMT_Y8_1X8, IPU_FW_ISYS_FRAME_FORMAT_RAW8}, #ifdef V4L2_PIX_FMT_Y210 {V4L2_PIX_FMT_Y210, 20, 20, 0, MEDIA_BUS_FMT_YUYV10_1X20, IPU_FW_ISYS_FRAME_FORMAT_YUYV}, #endif {} }; const struct ipu_isys_pixelformat ipu_isys_pfmts_packed[] = { {V4L2_PIX_FMT_Y10, 10, 10, 0, MEDIA_BUS_FMT_Y10_1X10, IPU_FW_ISYS_FRAME_FORMAT_RAW10}, #ifdef V4L2_PIX_FMT_Y210 {V4L2_PIX_FMT_Y210, 20, 20, 0, MEDIA_BUS_FMT_YUYV10_1X20, IPU_FW_ISYS_FRAME_FORMAT_YUYV}, #endif {V4L2_PIX_FMT_Y8I, 16, 16, 0, MEDIA_BUS_FMT_VYUY8_1X16, IPU_FW_ISYS_FRAME_FORMAT_UYVY}, {V4L2_PIX_FMT_Z16, 16, 16, 0, MEDIA_BUS_FMT_UYVY8_1X16, IPU_FW_ISYS_FRAME_FORMAT_UYVY}, {V4L2_PIX_FMT_UYVY, 16, 16, 0, MEDIA_BUS_FMT_UYVY8_1X16, IPU_FW_ISYS_FRAME_FORMAT_UYVY}, {V4L2_PIX_FMT_YUYV, 16, 16, 0, MEDIA_BUS_FMT_YUYV8_1X16, IPU_FW_ISYS_FRAME_FORMAT_YUYV}, {V4L2_PIX_FMT_RGB565, 16, 16, 0, MEDIA_BUS_FMT_RGB565_1X16, IPU_FW_ISYS_FRAME_FORMAT_RGB565}, {V4L2_PIX_FMT_BGR24, 24, 24, 0, MEDIA_BUS_FMT_RGB888_1X24, IPU_FW_ISYS_FRAME_FORMAT_RGBA888}, #ifndef V4L2_PIX_FMT_SBGGR12P {V4L2_PIX_FMT_SBGGR12, 12, 12, 0, MEDIA_BUS_FMT_SBGGR12_1X12, IPU_FW_ISYS_FRAME_FORMAT_RAW12}, {V4L2_PIX_FMT_SGBRG12, 12, 12, 0, MEDIA_BUS_FMT_SGBRG12_1X12, IPU_FW_ISYS_FRAME_FORMAT_RAW12}, {V4L2_PIX_FMT_SGRBG12, 12, 12, 0, MEDIA_BUS_FMT_SGRBG12_1X12, IPU_FW_ISYS_FRAME_FORMAT_RAW12}, {V4L2_PIX_FMT_SRGGB12, 12, 12, 0, MEDIA_BUS_FMT_SRGGB12_1X12, IPU_FW_ISYS_FRAME_FORMAT_RAW12}, #else /* V4L2_PIX_FMT_SBGGR12P */ {V4L2_PIX_FMT_SBGGR12P, 12, 12, 0, MEDIA_BUS_FMT_SBGGR12_1X12, IPU_FW_ISYS_FRAME_FORMAT_RAW12}, {V4L2_PIX_FMT_SGBRG12P, 12, 12, 0, MEDIA_BUS_FMT_SGBRG12_1X12, IPU_FW_ISYS_FRAME_FORMAT_RAW12}, {V4L2_PIX_FMT_SGRBG12P, 12, 12, 0, MEDIA_BUS_FMT_SGRBG12_1X12, IPU_FW_ISYS_FRAME_FORMAT_RAW12}, {V4L2_PIX_FMT_SRGGB12P, 12, 12, 0, MEDIA_BUS_FMT_SRGGB12_1X12, IPU_FW_ISYS_FRAME_FORMAT_RAW12}, #endif /* V4L2_PIX_FMT_SBGGR12P */ {V4L2_PIX_FMT_SBGGR10P, 10, 10, 0, MEDIA_BUS_FMT_SBGGR10_1X10, IPU_FW_ISYS_FRAME_FORMAT_RAW10}, {V4L2_PIX_FMT_SGBRG10P, 10, 10, 0, MEDIA_BUS_FMT_SGBRG10_1X10, IPU_FW_ISYS_FRAME_FORMAT_RAW10}, {V4L2_PIX_FMT_SGRBG10P, 10, 10, 0, MEDIA_BUS_FMT_SGRBG10_1X10, IPU_FW_ISYS_FRAME_FORMAT_RAW10}, {V4L2_PIX_FMT_SRGGB10P, 10, 10, 0, MEDIA_BUS_FMT_SRGGB10_1X10, IPU_FW_ISYS_FRAME_FORMAT_RAW10}, {V4L2_PIX_FMT_GREY, 8, 8, 0, MEDIA_BUS_FMT_Y8_1X8, IPU_FW_ISYS_FRAME_FORMAT_RAW8}, {V4L2_PIX_FMT_SBGGR8, 8, 8, 0, MEDIA_BUS_FMT_SBGGR8_1X8, IPU_FW_ISYS_FRAME_FORMAT_RAW8}, {V4L2_PIX_FMT_SGBRG8, 8, 8, 0, MEDIA_BUS_FMT_SGBRG8_1X8, IPU_FW_ISYS_FRAME_FORMAT_RAW8}, {V4L2_PIX_FMT_SGRBG8, 8, 8, 0, MEDIA_BUS_FMT_SGRBG8_1X8, IPU_FW_ISYS_FRAME_FORMAT_RAW8}, {V4L2_PIX_FMT_SRGGB8, 8, 8, 0, MEDIA_BUS_FMT_SRGGB8_1X8, IPU_FW_ISYS_FRAME_FORMAT_RAW8}, {} }; static int video_open(struct file *file) { struct ipu_isys_video *av = video_drvdata(file); struct ipu_isys *isys = av->isys; struct ipu_bus_device *adev = to_ipu_bus_device(&isys->adev->dev); struct ipu_device *isp = adev->isp; int rval; const struct ipu_isys_internal_pdata *ipdata; mutex_lock(&isys->mutex); if (isys->reset_needed || isp->flr_done) { mutex_unlock(&isys->mutex); dev_warn(&isys->adev->dev, "isys power cycle required\n"); return -EIO; } mutex_unlock(&isys->mutex); rval = pm_runtime_get_sync(&isys->adev->dev); if (rval < 0) { pm_runtime_put_noidle(&isys->adev->dev); return rval; } rval = v4l2_fh_open(file); if (rval) goto out_power_down; #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) rval = ipu_pipeline_pm_use(&av->vdev.entity, 1); #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0) rval = v4l2_pipeline_pm_use(&av->vdev.entity, 1); #else rval = v4l2_pipeline_pm_get(&av->vdev.entity); #endif if (rval) goto out_v4l2_fh_release; mutex_lock(&isys->mutex); if (isys->video_opened++) { /* Already open */ mutex_unlock(&isys->mutex); return 0; } ipdata = isys->pdata->ipdata; ipu_configure_spc(adev->isp, &ipdata->hw_variant, IPU_CPD_PKG_DIR_ISYS_SERVER_IDX, isys->pdata->base, isys->pkg_dir, isys->pkg_dir_dma_addr); /* * Buffers could have been left to wrong queue at last closure. * Move them now back to empty buffer queue. */ ipu_cleanup_fw_msg_bufs(isys); if (isys->fwcom) { /* * Something went wrong in previous shutdown. As we are now * restarting isys we can safely delete old context. */ dev_err(&isys->adev->dev, "Clearing old context\n"); ipu_fw_isys_cleanup(isys); } rval = ipu_fw_isys_init(av->isys, ipdata->num_parallel_streams); if (rval < 0) goto out_lib_init; mutex_unlock(&isys->mutex); return 0; out_lib_init: isys->video_opened--; mutex_unlock(&isys->mutex); #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) ipu_pipeline_pm_use(&av->vdev.entity, 0); #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0) v4l2_pipeline_pm_use(&av->vdev.entity, 0); #else v4l2_pipeline_pm_put(&av->vdev.entity); #endif out_v4l2_fh_release: v4l2_fh_release(file); out_power_down: pm_runtime_put(&isys->adev->dev); return rval; } static int video_release(struct file *file) { struct ipu_isys_video *av = video_drvdata(file); int ret = 0; vb2_fop_release(file); mutex_lock(&av->isys->mutex); if (!--av->isys->video_opened) { ipu_fw_isys_close(av->isys); if (av->isys->fwcom) { av->isys->reset_needed = true; ret = -EIO; } } mutex_unlock(&av->isys->mutex); #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) ipu_pipeline_pm_use(&av->vdev.entity, 0); #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0) v4l2_pipeline_pm_use(&av->vdev.entity, 0); #else v4l2_pipeline_pm_put(&av->vdev.entity); #endif if (av->isys->reset_needed) pm_runtime_put_sync(&av->isys->adev->dev); else pm_runtime_put(&av->isys->adev->dev); return ret; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) static struct media_pad *other_pad(struct media_pad *pad) { struct media_link *link; list_for_each_entry(link, &pad->entity->links, list) { if ((link->flags & MEDIA_LNK_FL_LINK_TYPE) != MEDIA_LNK_FL_DATA_LINK) continue; return link->source == pad ? link->sink : link->source; } WARN_ON(1); return NULL; } #endif const struct ipu_isys_pixelformat * ipu_isys_get_pixelformat(struct ipu_isys_video *av, u32 pixelformat) { #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) struct media_pad *pad = av->vdev.entity.pads[0].flags & MEDIA_PAD_FL_SOURCE ? av->vdev.entity.links[0].sink : av->vdev.entity.links[0].source; #else struct media_pad *pad = other_pad(&av->vdev.entity.pads[0]); #endif struct v4l2_subdev *sd; const u32 *supported_codes; const struct ipu_isys_pixelformat *pfmt; if (!pad || !pad->entity) { WARN_ON(1); return NULL; } sd = media_entity_to_v4l2_subdev(pad->entity); supported_codes = to_ipu_isys_subdev(sd)->supported_codes[pad->index]; for (pfmt = av->pfmts; pfmt->bpp; pfmt++) { unsigned int i; if (pfmt->pixelformat != pixelformat) continue; for (i = 0; supported_codes[i]; i++) { if (pfmt->code == supported_codes[i]) return pfmt; } } /* Not found. Get the default, i.e. the first defined one. */ for (pfmt = av->pfmts; pfmt->bpp; pfmt++) { if (pfmt->code == *supported_codes) return pfmt; } WARN_ON(1); return NULL; } int ipu_isys_vidioc_querycap(struct file *file, void *fh, struct v4l2_capability *cap) { struct ipu_isys_video *av = video_drvdata(file); strscpy(cap->driver, IPU_ISYS_NAME, sizeof(cap->driver)); strscpy(cap->card, av->isys->media_dev.model, sizeof(cap->card)); snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s", av->isys->media_dev.bus_info); return 0; } int ipu_isys_vidioc_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f) { struct ipu_isys_video *av = video_drvdata(file); #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) struct media_pad *pad = av->vdev.entity.pads[0].flags & MEDIA_PAD_FL_SOURCE ? av->vdev.entity.links[0].sink : av->vdev.entity.links[0].source; #else struct media_pad *pad = other_pad(&av->vdev.entity.pads[0]); #endif struct v4l2_subdev *sd; const u32 *supported_codes; const struct ipu_isys_pixelformat *pfmt; u32 index; if (!pad || !pad->entity) return -EINVAL; sd = media_entity_to_v4l2_subdev(pad->entity); supported_codes = to_ipu_isys_subdev(sd)->supported_codes[pad->index]; /* Walk the 0-terminated array for the f->index-th code. */ for (index = f->index; *supported_codes && index; index--, supported_codes++) { }; if (!*supported_codes) return -EINVAL; f->flags = 0; /* Code found */ for (pfmt = av->pfmts; pfmt->bpp; pfmt++) if (pfmt->code == *supported_codes) break; if (!pfmt->bpp) { dev_warn(&av->isys->adev->dev, "Format not found in mapping table."); return -EINVAL; } f->pixelformat = pfmt->pixelformat; return 0; } static int vidioc_g_fmt_vid_cap_mplane(struct file *file, void *fh, struct v4l2_format *fmt) { struct ipu_isys_video *av = video_drvdata(file); fmt->fmt.pix_mp = av->mpix; return 0; } const struct ipu_isys_pixelformat * ipu_isys_video_try_fmt_vid_mplane_default(struct ipu_isys_video *av, struct v4l2_pix_format_mplane *mpix) { return ipu_isys_video_try_fmt_vid_mplane(av, mpix, 0); } const struct ipu_isys_pixelformat * ipu_isys_video_try_fmt_vid_mplane(struct ipu_isys_video *av, struct v4l2_pix_format_mplane *mpix, int store_csi2_header) { const struct ipu_isys_pixelformat *pfmt = ipu_isys_get_pixelformat(av, mpix->pixelformat); if (!pfmt) return NULL; mpix->pixelformat = pfmt->pixelformat; mpix->num_planes = 1; mpix->width = clamp(mpix->width, IPU_ISYS_MIN_WIDTH, IPU_ISYS_MAX_WIDTH); mpix->height = clamp(mpix->height, IPU_ISYS_MIN_HEIGHT, IPU_ISYS_MAX_HEIGHT); if (!av->packed) mpix->plane_fmt[0].bytesperline = mpix->width * DIV_ROUND_UP(pfmt->bpp_planar ? pfmt->bpp_planar : pfmt->bpp, BITS_PER_BYTE); else if (store_csi2_header) mpix->plane_fmt[0].bytesperline = DIV_ROUND_UP(av->line_header_length + av->line_footer_length + (unsigned int)mpix->width * pfmt->bpp, BITS_PER_BYTE); else mpix->plane_fmt[0].bytesperline = DIV_ROUND_UP((unsigned int)mpix->width * pfmt->bpp, BITS_PER_BYTE); mpix->plane_fmt[0].bytesperline = ALIGN(mpix->plane_fmt[0].bytesperline, av->isys->line_align); if (pfmt->bpp_planar) mpix->plane_fmt[0].bytesperline = mpix->plane_fmt[0].bytesperline * pfmt->bpp / pfmt->bpp_planar; /* * (height + 1) * bytesperline due to a hardware issue: the DMA unit * is a power of two, and a line should be transferred as few units * as possible. The result is that up to line length more data than * the image size may be transferred to memory after the image. * Another limition is the GDA allocation unit size. For low * resolution it gives a bigger number. Use larger one to avoid * memory corruption. */ mpix->plane_fmt[0].sizeimage = max(max(mpix->plane_fmt[0].sizeimage, mpix->plane_fmt[0].bytesperline * mpix->height + max(mpix->plane_fmt[0].bytesperline, av->isys->pdata->ipdata->isys_dma_overshoot)), 1U); if (av->compression_ctrl) av->compression = v4l2_ctrl_g_ctrl(av->compression_ctrl); /* overwrite bpl/height with compression alignment */ if (av->compression) { u32 planar_tile_status_size, tile_status_size; u64 planar_bytes; mpix->plane_fmt[0].bytesperline = ALIGN(mpix->plane_fmt[0].bytesperline, IPU_ISYS_COMPRESSION_LINE_ALIGN); mpix->height = ALIGN(mpix->height, IPU_ISYS_COMPRESSION_HEIGHT_ALIGN); mpix->plane_fmt[0].sizeimage = ALIGN(mpix->plane_fmt[0].bytesperline * mpix->height, IPU_ISYS_COMPRESSION_PAGE_ALIGN); /* ISYS compression only for RAW and single plannar */ planar_bytes = mul_u32_u32(mpix->plane_fmt[0].bytesperline, mpix->height); planar_tile_status_size = DIV_ROUND_UP_ULL((planar_bytes / IPU_ISYS_COMPRESSION_TILE_SIZE_BYTES) * IPU_ISYS_COMPRESSION_TILE_STATUS_BITS, BITS_PER_BYTE); tile_status_size = ALIGN(planar_tile_status_size, IPU_ISYS_COMPRESSION_PAGE_ALIGN); /* tile status buffer offsets relative to buffer base address */ av->ts_offsets[0] = mpix->plane_fmt[0].sizeimage; mpix->plane_fmt[0].sizeimage += tile_status_size; dev_dbg(&av->isys->adev->dev, "cmprs: bpl:%d, height:%d img size:%d, ts_sz:%d\n", mpix->plane_fmt[0].bytesperline, mpix->height, av->ts_offsets[0], tile_status_size); } memset(mpix->plane_fmt[0].reserved, 0, sizeof(mpix->plane_fmt[0].reserved)); if (mpix->field == V4L2_FIELD_ANY) mpix->field = V4L2_FIELD_NONE; /* Use defaults */ mpix->colorspace = V4L2_COLORSPACE_RAW; mpix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; mpix->quantization = V4L2_QUANTIZATION_DEFAULT; mpix->xfer_func = V4L2_XFER_FUNC_DEFAULT; return pfmt; } static int vidioc_s_fmt_vid_cap_mplane(struct file *file, void *fh, struct v4l2_format *f) { struct ipu_isys_video *av = video_drvdata(file); if (av->aq.vbq.streaming) return -EBUSY; av->pfmt = av->try_fmt_vid_mplane(av, &f->fmt.pix_mp); av->mpix = f->fmt.pix_mp; return 0; } static int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *fh, struct v4l2_format *f) { struct ipu_isys_video *av = video_drvdata(file); av->try_fmt_vid_mplane(av, &f->fmt.pix_mp); return 0; } static long ipu_isys_vidioc_private(struct file *file, void *fh, bool valid_prio, unsigned int cmd, void *arg) { struct ipu_isys_video *av = video_drvdata(file); int ret = 0; switch (cmd) { case VIDIOC_IPU_GET_DRIVER_VERSION: *(u32 *)arg = IPU_DRIVER_VERSION; break; default: dev_dbg(&av->isys->adev->dev, "unsupported private ioctl %x\n", cmd); } return ret; } static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *input) { if (input->index > 0) return -EINVAL; strscpy(input->name, "camera", sizeof(input->name)); input->type = V4L2_INPUT_TYPE_CAMERA; return 0; } static int vidioc_g_input(struct file *file, void *fh, unsigned int *input) { *input = 0; return 0; } static int vidioc_s_input(struct file *file, void *fh, unsigned int input) { return input == 0 ? 0 : -EINVAL; } /* * Return true if an entity directly connected to an Iunit entity is * an image source for the ISP. This can be any external directly * connected entity or any of the test pattern generators in the * Iunit. */ static bool is_external(struct ipu_isys_video *av, struct media_entity *entity) { struct v4l2_subdev *sd; /* All video nodes are ours. */ if (!is_media_entity_v4l2_subdev(entity)) return false; sd = media_entity_to_v4l2_subdev(entity); if (strncmp(sd->name, IPU_ISYS_ENTITY_PREFIX, strlen(IPU_ISYS_ENTITY_PREFIX)) != 0) return true; return false; } static int link_validate(struct media_link *link) { struct ipu_isys_video *av = container_of(link->sink, struct ipu_isys_video, pad); struct ipu_isys_pipeline *ip = &av->ip; struct v4l2_subdev *sd; if (!link->source->entity) return -EINVAL; sd = media_entity_to_v4l2_subdev(link->source->entity); if (is_external(av, link->source->entity)) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0) ip->external = media_entity_remote_pad(av->vdev.entity.pads); #else ip->external = media_pad_remote_pad_first(av->vdev.entity.pads); #endif ip->source = to_ipu_isys_subdev(sd)->source; } ip->nr_queues++; return 0; } static void set_buttress_isys_freq(struct ipu_isys_video *av, bool max) { struct ipu_device *isp = av->isys->adev->isp; if (max) { ipu_buttress_isys_freq_set(isp, BUTTRESS_MAX_FORCE_IS_FREQ); } else { if (ipu_ver == IPU_VER_6SE) ipu_buttress_isys_freq_set(isp, IPU6SE_IS_DEFAULT_FREQ); else ipu_buttress_isys_freq_set(isp, IPU_IS_DEFAULT_FREQ); } } static void get_stream_opened(struct ipu_isys_video *av) { unsigned long flags; spin_lock_irqsave(&av->isys->lock, flags); av->isys->stream_opened++; spin_unlock_irqrestore(&av->isys->lock, flags); if (av->isys->stream_opened > 1) set_buttress_isys_freq(av, true); } static void put_stream_opened(struct ipu_isys_video *av) { unsigned long flags; spin_lock_irqsave(&av->isys->lock, flags); av->isys->stream_opened--; spin_unlock_irqrestore(&av->isys->lock, flags); if (av->isys->stream_opened <= 1) set_buttress_isys_freq(av, false); } static int get_stream_handle(struct ipu_isys_video *av) { struct media_pipeline *mp = media_entity_pipeline(&av->vdev.entity); struct ipu_isys_pipeline *ip = to_ipu_isys_pipeline(mp); unsigned int stream_handle; unsigned long flags; spin_lock_irqsave(&av->isys->lock, flags); for (stream_handle = 0; stream_handle < IPU_ISYS_MAX_STREAMS; stream_handle++) if (!av->isys->pipes[stream_handle]) break; if (stream_handle == IPU_ISYS_MAX_STREAMS) { spin_unlock_irqrestore(&av->isys->lock, flags); return -EBUSY; } av->isys->pipes[stream_handle] = ip; ip->stream_handle = stream_handle; spin_unlock_irqrestore(&av->isys->lock, flags); return 0; } static void put_stream_handle(struct ipu_isys_video *av) { struct media_pipeline *mp = media_entity_pipeline(&av->vdev.entity); struct ipu_isys_pipeline *ip = to_ipu_isys_pipeline(mp); unsigned long flags; spin_lock_irqsave(&av->isys->lock, flags); av->isys->pipes[ip->stream_handle] = NULL; ip->stream_handle = -1; spin_unlock_irqrestore(&av->isys->lock, flags); } static int get_external_facing_format(struct ipu_isys_pipeline *ip, struct v4l2_subdev_format *format) { struct ipu_isys_video *av = container_of(ip, struct ipu_isys_video, ip); struct v4l2_subdev *sd; struct media_pad *external_facing; if (!ip->external->entity) { WARN_ON(1); return -ENODEV; } sd = media_entity_to_v4l2_subdev(ip->external->entity); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0) external_facing = (strncmp(sd->name, IPU_ISYS_ENTITY_PREFIX, strlen(IPU_ISYS_ENTITY_PREFIX)) == 0) ? ip->external : media_entity_remote_pad(ip->external); #else external_facing = (strncmp(sd->name, IPU_ISYS_ENTITY_PREFIX, strlen(IPU_ISYS_ENTITY_PREFIX)) == 0) ? ip->external : media_pad_remote_pad_first(ip->external); #endif if (WARN_ON(!external_facing)) { dev_warn(&av->isys->adev->dev, "no external facing pad --- driver bug?\n"); return -EINVAL; } format->which = V4L2_SUBDEV_FORMAT_ACTIVE; format->pad = 0; sd = media_entity_to_v4l2_subdev(external_facing->entity); return v4l2_subdev_call(sd, pad, get_fmt, NULL, format); } static void short_packet_queue_destroy(struct ipu_isys_pipeline *ip) { struct ipu_isys_video *av = container_of(ip, struct ipu_isys_video, ip); #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0) #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) struct dma_attrs attrs; #else unsigned long attrs; #endif #endif unsigned int i; #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0) #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) init_dma_attrs(&attrs); dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); #else attrs = DMA_ATTR_NON_CONSISTENT; #endif #endif if (!ip->short_packet_bufs) return; for (i = 0; i < IPU_ISYS_SHORT_PACKET_BUFFER_NUM; i++) { if (ip->short_packet_bufs[i].buffer) #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0) dma_free_coherent(&av->isys->adev->dev, ip->short_packet_buffer_size, ip->short_packet_bufs[i].buffer, ip->short_packet_bufs[i].dma_addr); #else dma_free_attrs(&av->isys->adev->dev, ip->short_packet_buffer_size, ip->short_packet_bufs[i].buffer, ip->short_packet_bufs[i].dma_addr, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) &attrs); #else attrs); #endif #endif } kfree(ip->short_packet_bufs); ip->short_packet_bufs = NULL; } static int short_packet_queue_setup(struct ipu_isys_pipeline *ip) { struct ipu_isys_video *av = container_of(ip, struct ipu_isys_video, ip); struct v4l2_subdev_format source_fmt = { 0 }; #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0) #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) struct dma_attrs attrs; #else unsigned long attrs; #endif #endif unsigned int i; int rval; size_t buf_size; INIT_LIST_HEAD(&ip->pending_interlaced_bufs); ip->cur_field = V4L2_FIELD_TOP; if (ip->isys->short_packet_source == IPU_ISYS_SHORT_PACKET_FROM_TUNIT) { ip->short_packet_trace_index = 0; return 0; } rval = get_external_facing_format(ip, &source_fmt); if (rval) return rval; buf_size = IPU_ISYS_SHORT_PACKET_BUF_SIZE(source_fmt.format.height); ip->short_packet_buffer_size = buf_size; ip->num_short_packet_lines = IPU_ISYS_SHORT_PACKET_PKT_LINES(source_fmt.format.height); /* Initialize short packet queue. */ INIT_LIST_HEAD(&ip->short_packet_incoming); INIT_LIST_HEAD(&ip->short_packet_active); #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0) #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) init_dma_attrs(&attrs); dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); #else attrs = DMA_ATTR_NON_CONSISTENT; #endif #endif ip->short_packet_bufs = kzalloc(sizeof(struct ipu_isys_private_buffer) * IPU_ISYS_SHORT_PACKET_BUFFER_NUM, GFP_KERNEL); if (!ip->short_packet_bufs) return -ENOMEM; for (i = 0; i < IPU_ISYS_SHORT_PACKET_BUFFER_NUM; i++) { struct ipu_isys_private_buffer *buf = &ip->short_packet_bufs[i]; buf->index = (unsigned int)i; buf->ip = ip; buf->ib.type = IPU_ISYS_SHORT_PACKET_BUFFER; buf->bytesused = buf_size; #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0) buf->buffer = dma_alloc_coherent(&av->isys->adev->dev, buf_size, &buf->dma_addr, GFP_KERNEL); #else buf->buffer = dma_alloc_attrs(&av->isys->adev->dev, buf_size, &buf->dma_addr, GFP_KERNEL, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) &attrs); #else attrs); #endif #endif if (!buf->buffer) { short_packet_queue_destroy(ip); return -ENOMEM; } list_add(&buf->ib.head, &ip->short_packet_incoming); } return 0; } static void csi_short_packet_prepare_fw_cfg(struct ipu_isys_pipeline *ip, struct ipu_fw_isys_stream_cfg_data_abi *cfg) { int input_pin = cfg->nof_input_pins++; int output_pin = cfg->nof_output_pins++; struct ipu_fw_isys_input_pin_info_abi *input_info = &cfg->input_pins[input_pin]; struct ipu_fw_isys_output_pin_info_abi *output_info = &cfg->output_pins[output_pin]; struct ipu_isys *isys = ip->isys; /* * Setting dt as IPU_ISYS_SHORT_PACKET_GENERAL_DT will cause * MIPI receiver to receive all MIPI short packets. */ input_info->dt = IPU_ISYS_SHORT_PACKET_GENERAL_DT; input_info->input_res.width = IPU_ISYS_SHORT_PACKET_WIDTH; input_info->input_res.height = ip->num_short_packet_lines; ip->output_pins[output_pin].pin_ready = ipu_isys_queue_short_packet_ready; ip->output_pins[output_pin].aq = NULL; ip->short_packet_output_pin = output_pin; output_info->input_pin_id = input_pin; output_info->output_res.width = IPU_ISYS_SHORT_PACKET_WIDTH; output_info->output_res.height = ip->num_short_packet_lines; output_info->stride = IPU_ISYS_SHORT_PACKET_WIDTH * IPU_ISYS_SHORT_PACKET_UNITSIZE; output_info->pt = IPU_ISYS_SHORT_PACKET_PT; output_info->ft = IPU_ISYS_SHORT_PACKET_FT; output_info->send_irq = 1; memset(output_info->ts_offsets, 0, sizeof(output_info->ts_offsets)); output_info->s2m_pixel_soc_pixel_remapping = S2M_PIXEL_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING; output_info->csi_be_soc_pixel_remapping = CSI_BE_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING; output_info->sensor_type = isys->sensor_info.sensor_metadata; output_info->snoopable = true; output_info->error_handling_enable = false; } void ipu_isys_prepare_fw_cfg_default(struct ipu_isys_video *av, struct ipu_fw_isys_stream_cfg_data_abi *cfg) { struct media_pipeline *mp = media_entity_pipeline(&av->vdev.entity); struct ipu_isys_pipeline *ip = to_ipu_isys_pipeline(mp); struct ipu_isys_queue *aq = &av->aq; struct ipu_fw_isys_output_pin_info_abi *pin_info; struct ipu_isys *isys = av->isys; unsigned int type_index, type; int pin = cfg->nof_output_pins++; aq->fw_output = pin; ip->output_pins[pin].pin_ready = ipu_isys_queue_buf_ready; ip->output_pins[pin].aq = aq; pin_info = &cfg->output_pins[pin]; pin_info->input_pin_id = 0; pin_info->output_res.width = av->mpix.width; pin_info->output_res.height = av->mpix.height; if (!av->pfmt->bpp_planar) pin_info->stride = av->mpix.plane_fmt[0].bytesperline; else pin_info->stride = ALIGN(DIV_ROUND_UP(av->mpix.width * av->pfmt->bpp_planar, BITS_PER_BYTE), av->isys->line_align); pin_info->pt = aq->css_pin_type; pin_info->ft = av->pfmt->css_pixelformat; pin_info->send_irq = 1; memset(pin_info->ts_offsets, 0, sizeof(pin_info->ts_offsets)); pin_info->s2m_pixel_soc_pixel_remapping = S2M_PIXEL_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING; pin_info->csi_be_soc_pixel_remapping = CSI_BE_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING; cfg->vc = 0; switch (pin_info->pt) { /* non-snoopable sensor data to PSYS */ case IPU_FW_ISYS_PIN_TYPE_RAW_NS: type_index = IPU_FW_ISYS_VC1_SENSOR_DATA; pin_info->sensor_type = isys->sensor_types[type_index]++; pin_info->snoopable = false; pin_info->error_handling_enable = false; type = isys->sensor_types[type_index]; if (type > isys->sensor_info.vc1_data_end) isys->sensor_types[type_index] = isys->sensor_info.vc1_data_start; break; /* snoopable META/Stats data to CPU */ case IPU_FW_ISYS_PIN_TYPE_METADATA_0: case IPU_FW_ISYS_PIN_TYPE_METADATA_1: pin_info->sensor_type = isys->sensor_info.sensor_metadata; pin_info->snoopable = true; pin_info->error_handling_enable = false; break; case IPU_FW_ISYS_PIN_TYPE_RAW_SOC: if (av->compression) { type_index = IPU_FW_ISYS_VC1_SENSOR_DATA; pin_info->sensor_type = isys->sensor_types[type_index]++; pin_info->snoopable = false; pin_info->error_handling_enable = false; type = isys->sensor_types[type_index]; if (type > isys->sensor_info.vc1_data_end) isys->sensor_types[type_index] = isys->sensor_info.vc1_data_start; } else { type_index = IPU_FW_ISYS_VC0_SENSOR_DATA; pin_info->sensor_type = isys->sensor_types[type_index]++; pin_info->snoopable = true; pin_info->error_handling_enable = false; type = isys->sensor_types[type_index]; if (type > isys->sensor_info.vc0_data_end) isys->sensor_types[type_index] = isys->sensor_info.vc0_data_start; } break; case IPU_FW_ISYS_PIN_TYPE_MIPI: type_index = IPU_FW_ISYS_VC0_SENSOR_DATA; pin_info->sensor_type = isys->sensor_types[type_index]++; pin_info->snoopable = true; pin_info->error_handling_enable = false; type = isys->sensor_types[type_index]; if (type > isys->sensor_info.vc0_data_end) isys->sensor_types[type_index] = isys->sensor_info.vc0_data_start; break; default: dev_err(&av->isys->adev->dev, "Unknown pin type, use metadata type as default\n"); pin_info->sensor_type = isys->sensor_info.sensor_metadata; pin_info->snoopable = true; pin_info->error_handling_enable = false; } if (av->compression) { pin_info->payload_buf_size = av->mpix.plane_fmt[0].sizeimage; pin_info->reserve_compression = av->compression; pin_info->ts_offsets[0] = av->ts_offsets[0]; } } static unsigned int ipu_isys_get_compression_scheme(u32 code) { switch (code) { case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8: case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8: case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8: case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8: return 3; default: return 0; } } static unsigned int get_comp_format(u32 code) { unsigned int predictor = 0; /* currently hard coded */ unsigned int udt = ipu_isys_mbus_code_to_mipi(code); unsigned int scheme = ipu_isys_get_compression_scheme(code); /* if data type is not user defined return here */ if (udt < IPU_ISYS_MIPI_CSI2_TYPE_USER_DEF(1) || udt > IPU_ISYS_MIPI_CSI2_TYPE_USER_DEF(8)) return 0; /* * For each user defined type (1..8) there is configuration bitfield for * decompression. * * | bit 3 | bits 2:0 | * | predictor | scheme | * compression schemes: * 000 = no compression * 001 = 10 - 6 - 10 * 010 = 10 - 7 - 10 * 011 = 10 - 8 - 10 * 100 = 12 - 6 - 12 * 101 = 12 - 7 - 12 * 110 = 12 - 8 - 12 */ return ((predictor << 3) | scheme) << ((udt - IPU_ISYS_MIPI_CSI2_TYPE_USER_DEF(1)) * 4); } /* Create stream and start it using the CSS FW ABI. */ static int start_stream_firmware(struct ipu_isys_video *av, struct ipu_isys_buffer_list *bl) { struct media_pipeline *mp = media_entity_pipeline(&av->vdev.entity); struct ipu_isys_pipeline *ip = to_ipu_isys_pipeline(mp); struct device *dev = &av->isys->adev->dev; struct v4l2_subdev_selection sel_fmt = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, .target = V4L2_SEL_TGT_CROP, .pad = CSI2_BE_PAD_SOURCE, }; struct ipu_fw_isys_stream_cfg_data_abi *stream_cfg; struct isys_fw_msgs *msg = NULL; struct ipu_fw_isys_frame_buff_set_abi *buf = NULL; struct ipu_isys_queue *aq; struct ipu_isys_video *isl_av = NULL; struct v4l2_subdev_format source_fmt = { 0 }; struct v4l2_subdev *be_sd = NULL; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0) struct media_pad *source_pad = media_entity_remote_pad(&av->pad); #else struct media_pad *source_pad = media_pad_remote_pad_first(&av->pad); #endif struct ipu_fw_isys_cropping_abi *crop; enum ipu_fw_isys_send_type send_type; int rval, rvalout, tout; rval = get_external_facing_format(ip, &source_fmt); if (rval) return rval; msg = ipu_get_fw_msg_buf(ip); if (!msg) return -ENOMEM; stream_cfg = to_stream_cfg_msg_buf(msg); stream_cfg->compfmt = get_comp_format(source_fmt.format.code); stream_cfg->input_pins[0].input_res.width = source_fmt.format.width; stream_cfg->input_pins[0].input_res.height = source_fmt.format.height; stream_cfg->input_pins[0].dt = ipu_isys_mbus_code_to_mipi(source_fmt.format.code); stream_cfg->input_pins[0].mapped_dt = N_IPU_FW_ISYS_MIPI_DATA_TYPE; stream_cfg->input_pins[0].mipi_decompression = IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_NO_COMPRESSION; stream_cfg->input_pins[0].capture_mode = IPU_FW_ISYS_CAPTURE_MODE_REGULAR; if (ip->csi2 && !v4l2_ctrl_g_ctrl(ip->csi2->store_csi2_header)) stream_cfg->input_pins[0].mipi_store_mode = IPU_FW_ISYS_MIPI_STORE_MODE_DISCARD_LONG_HEADER; stream_cfg->src = ip->source; stream_cfg->vc = 0; stream_cfg->isl_use = ip->isl_mode; stream_cfg->nof_input_pins = 1; stream_cfg->sensor_type = IPU_FW_ISYS_SENSOR_MODE_NORMAL; /* * Only CSI2-BE and SOC BE has the capability to do crop, * so get the crop info from csi2-be or csi2-be-soc. */ if (ip->csi2_be) { be_sd = &ip->csi2_be->asd.sd; } else if (ip->csi2_be_soc) { be_sd = &ip->csi2_be_soc->asd.sd; if (source_pad) sel_fmt.pad = source_pad->index; } crop = &stream_cfg->crop; if (be_sd && !v4l2_subdev_call(be_sd, pad, get_selection, NULL, &sel_fmt)) { crop->left_offset = sel_fmt.r.left; crop->top_offset = sel_fmt.r.top; crop->right_offset = sel_fmt.r.left + sel_fmt.r.width; crop->bottom_offset = sel_fmt.r.top + sel_fmt.r.height; } else { crop->right_offset = source_fmt.format.width; crop->bottom_offset = source_fmt.format.height; } /* * If the CSI-2 backend's video node is part of the pipeline * it must be arranged first in the output pin list. This is * the most probably a firmware requirement. */ if (ip->isl_mode == IPU_ISL_CSI2_BE) isl_av = &ip->csi2_be->av; if (isl_av) { struct ipu_isys_queue *safe; list_for_each_entry_safe(aq, safe, &ip->queues, node) { struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); if (av != isl_av) continue; list_del(&aq->node); list_add(&aq->node, &ip->queues); break; } } list_for_each_entry(aq, &ip->queues, node) { struct ipu_isys_video *__av = ipu_isys_queue_to_video(aq); __av->prepare_fw_stream(__av, stream_cfg); } if (ip->interlaced && ip->isys->short_packet_source == IPU_ISYS_SHORT_PACKET_FROM_RECEIVER) csi_short_packet_prepare_fw_cfg(ip, stream_cfg); ip->nr_output_pins = stream_cfg->nof_output_pins; rval = get_stream_handle(av); if (rval) { dev_dbg(dev, "Can't get stream_handle\n"); return rval; } reinit_completion(&ip->stream_open_completion); ipu_fw_isys_set_params(stream_cfg); ipu_fw_isys_dump_stream_cfg(dev, stream_cfg); rval = ipu_fw_isys_complex_cmd(av->isys, ip->stream_handle, stream_cfg, to_dma_addr(msg), sizeof(*stream_cfg), IPU_FW_ISYS_SEND_TYPE_STREAM_OPEN); if (rval < 0) { dev_err(dev, "can't open stream (%d)\n", rval); ipu_put_fw_mgs_buf(av->isys, (uintptr_t)stream_cfg); goto out_put_stream_handle; } get_stream_opened(av); tout = wait_for_completion_timeout(&ip->stream_open_completion, IPU_LIB_CALL_TIMEOUT_JIFFIES); ipu_put_fw_mgs_buf(av->isys, (uintptr_t)stream_cfg); if (!tout) { dev_err(dev, "stream open time out\n"); rval = -ETIMEDOUT; goto out_put_stream_opened; } if (ip->error) { dev_err(dev, "stream open error: %d\n", ip->error); rval = -EIO; goto out_put_stream_opened; } dev_dbg(dev, "start stream: open complete\n"); if (bl) { msg = ipu_get_fw_msg_buf(ip); if (!msg) { rval = -ENOMEM; goto out_put_stream_opened; } buf = to_frame_msg_buf(msg); } if (bl) { ipu_isys_buffer_to_fw_frame_buff(buf, ip, bl); ipu_isys_buffer_list_queue(bl, IPU_ISYS_BUFFER_LIST_FL_ACTIVE, 0); } reinit_completion(&ip->stream_start_completion); if (bl) { send_type = IPU_FW_ISYS_SEND_TYPE_STREAM_START_AND_CAPTURE; ipu_fw_isys_dump_frame_buff_set(dev, buf, stream_cfg->nof_output_pins); rval = ipu_fw_isys_complex_cmd(av->isys, ip->stream_handle, buf, to_dma_addr(msg), sizeof(*buf), send_type); } else { send_type = IPU_FW_ISYS_SEND_TYPE_STREAM_START; rval = ipu_fw_isys_simple_cmd(av->isys, ip->stream_handle, send_type); } if (rval < 0) { dev_err(dev, "can't start streaming (%d)\n", rval); goto out_stream_close; } tout = wait_for_completion_timeout(&ip->stream_start_completion, IPU_LIB_CALL_TIMEOUT_JIFFIES); if (!tout) { dev_err(dev, "stream start time out\n"); rval = -ETIMEDOUT; goto out_stream_close; } if (ip->error) { dev_err(dev, "stream start error: %d\n", ip->error); rval = -EIO; goto out_stream_close; } dev_dbg(dev, "start stream: complete\n"); return 0; out_stream_close: reinit_completion(&ip->stream_close_completion); rvalout = ipu_fw_isys_simple_cmd(av->isys, ip->stream_handle, IPU_FW_ISYS_SEND_TYPE_STREAM_CLOSE); if (rvalout < 0) { dev_dbg(dev, "can't close stream (%d)\n", rvalout); goto out_put_stream_opened; } tout = wait_for_completion_timeout(&ip->stream_close_completion, IPU_LIB_CALL_TIMEOUT_JIFFIES); if (!tout) dev_err(dev, "stream close time out\n"); else if (ip->error) dev_err(dev, "stream close error: %d\n", ip->error); else dev_dbg(dev, "stream close complete\n"); out_put_stream_opened: put_stream_opened(av); out_put_stream_handle: put_stream_handle(av); return rval; } static void stop_streaming_firmware(struct ipu_isys_video *av) { struct media_pipeline *mp = media_entity_pipeline(&av->vdev.entity); struct ipu_isys_pipeline *ip = to_ipu_isys_pipeline(mp); struct device *dev = &av->isys->adev->dev; int rval, tout; enum ipu_fw_isys_send_type send_type = IPU_FW_ISYS_SEND_TYPE_STREAM_FLUSH; reinit_completion(&ip->stream_stop_completion); rval = ipu_fw_isys_simple_cmd(av->isys, ip->stream_handle, send_type); if (rval < 0) { dev_err(dev, "can't stop stream (%d)\n", rval); return; } tout = wait_for_completion_timeout(&ip->stream_stop_completion, IPU_LIB_CALL_TIMEOUT_JIFFIES); if (!tout) dev_err(dev, "stream stop time out\n"); else if (ip->error) dev_err(dev, "stream stop error: %d\n", ip->error); else dev_dbg(dev, "stop stream: complete\n"); } static void close_streaming_firmware(struct ipu_isys_video *av) { struct media_pipeline *mp = media_entity_pipeline(&av->vdev.entity); struct ipu_isys_pipeline *ip = to_ipu_isys_pipeline(mp); struct device *dev = &av->isys->adev->dev; int rval, tout; reinit_completion(&ip->stream_close_completion); rval = ipu_fw_isys_simple_cmd(av->isys, ip->stream_handle, IPU_FW_ISYS_SEND_TYPE_STREAM_CLOSE); if (rval < 0) { dev_err(dev, "can't close stream (%d)\n", rval); return; } tout = wait_for_completion_timeout(&ip->stream_close_completion, IPU_LIB_CALL_TIMEOUT_JIFFIES); if (!tout) dev_err(dev, "stream close time out\n"); else if (ip->error) dev_err(dev, "stream close error: %d\n", ip->error); else dev_dbg(dev, "close stream: complete\n"); put_stream_opened(av); put_stream_handle(av); } void ipu_isys_video_add_capture_done(struct ipu_isys_pipeline *ip, void (*capture_done) (struct ipu_isys_pipeline *ip, struct ipu_fw_isys_resp_info_abi *resp)) { unsigned int i; /* Different instances may register same function. Add only once */ for (i = 0; i < IPU_NUM_CAPTURE_DONE; i++) if (ip->capture_done[i] == capture_done) return; for (i = 0; i < IPU_NUM_CAPTURE_DONE; i++) { if (!ip->capture_done[i]) { ip->capture_done[i] = capture_done; return; } } /* * Too many call backs registered. Change to IPU_NUM_CAPTURE_DONE * constant probably required. */ WARN_ON(1); } int ipu_isys_video_prepare_streaming(struct ipu_isys_video *av, unsigned int state) { struct ipu_isys *isys = av->isys; struct device *dev = &isys->adev->dev; struct ipu_isys_pipeline *ip; #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) struct media_graph graph; #else struct media_entity_graph graph; #endif struct media_entity *entity; struct media_device *mdev = &av->isys->media_dev; struct media_pipeline *mp; int rval; unsigned int i; dev_dbg(dev, "prepare stream: %d\n", state); if (!state) { mp = media_entity_pipeline(&av->vdev.entity); ip = to_ipu_isys_pipeline(mp); if (ip->interlaced && isys->short_packet_source == IPU_ISYS_SHORT_PACKET_FROM_RECEIVER) short_packet_queue_destroy(ip); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) media_pipeline_stop(&av->vdev.entity); #else media_pipeline_stop(av->vdev.entity.pads); #endif media_entity_enum_cleanup(&ip->entity_enum); return 0; } ip = &av->ip; WARN_ON(ip->nr_streaming); ip->has_sof = false; ip->nr_queues = 0; ip->external = NULL; atomic_set(&ip->sequence, 0); ip->isl_mode = IPU_ISL_OFF; for (i = 0; i < IPU_NUM_CAPTURE_DONE; i++) ip->capture_done[i] = NULL; ip->csi2_be = NULL; ip->csi2_be_soc = NULL; ip->csi2 = NULL; ip->seq_index = 0; memset(ip->seq, 0, sizeof(ip->seq)); WARN_ON(!list_empty(&ip->queues)); ip->interlaced = false; rval = media_entity_enum_init(&ip->entity_enum, mdev); if (rval) return rval; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) rval = media_pipeline_start(&av->vdev.entity, &ip->pipe); #else rval = media_pipeline_start(av->vdev.entity.pads, &ip->pipe); #endif if (rval < 0) { dev_dbg(dev, "pipeline start failed\n"); goto out_enum_cleanup; } if (!ip->external) { dev_err(dev, "no external entity set! Driver bug?\n"); rval = -EINVAL; goto out_pipeline_stop; } rval = media_graph_walk_init(&graph, mdev); if (rval) goto out_pipeline_stop; /* Gather all entities in the graph. */ mutex_lock(&mdev->graph_mutex); media_graph_walk_start(&graph, &av->vdev.entity); while ((entity = media_graph_walk_next(&graph))) media_entity_enum_set(&ip->entity_enum, entity); mutex_unlock(&mdev->graph_mutex); media_graph_walk_cleanup(&graph); if (ip->interlaced) { rval = short_packet_queue_setup(ip); if (rval) { dev_err(&isys->adev->dev, "Failed to setup short packet buffer.\n"); goto out_pipeline_stop; } } dev_dbg(dev, "prepare stream: external entity %s\n", ip->external->entity->name); return 0; out_pipeline_stop: #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) media_pipeline_stop(&av->vdev.entity); #else media_pipeline_stop(av->vdev.entity.pads); #endif out_enum_cleanup: media_entity_enum_cleanup(&ip->entity_enum); return rval; } static void configure_stream_watermark(struct ipu_isys_video *av) { u32 vblank, hblank; u64 pixel_rate; int ret = 0; struct v4l2_subdev *esd; struct v4l2_ctrl *ctrl; struct ipu_isys_pipeline *ip; struct isys_iwake_watermark *iwake_watermark; struct v4l2_control vb = { .id = V4L2_CID_VBLANK, .value = 0 }; struct v4l2_control hb = { .id = V4L2_CID_HBLANK, .value = 0 }; struct media_pipeline *mp = media_entity_pipeline(&av->vdev.entity); ip = to_ipu_isys_pipeline(mp); if (!ip->external->entity) { WARN_ON(1); return; } esd = media_entity_to_v4l2_subdev(ip->external->entity); av->watermark->width = av->mpix.width; av->watermark->height = av->mpix.height; ret = v4l2_g_ctrl(esd->ctrl_handler, &vb); if (!ret && vb.value >= 0) vblank = vb.value; else vblank = 0; ret = v4l2_g_ctrl(esd->ctrl_handler, &hb); if (!ret && hb.value >= 0) hblank = hb.value; else hblank = 0; ctrl = v4l2_ctrl_find(esd->ctrl_handler, V4L2_CID_PIXEL_RATE); if (!ctrl) pixel_rate = DEFAULT_PIXEL_RATE; else pixel_rate = v4l2_ctrl_g_ctrl_int64(ctrl); av->watermark->vblank = vblank; av->watermark->hblank = hblank; av->watermark->pixel_rate = pixel_rate; if (!pixel_rate) { iwake_watermark = av->isys->iwake_watermark; mutex_lock(&iwake_watermark->mutex); iwake_watermark->force_iwake_disable = true; mutex_unlock(&iwake_watermark->mutex); WARN(1, "%s Invalid pixel_rate, disable iwake.\n", __func__); return; } } static void calculate_stream_datarate(struct video_stream_watermark *watermark) { u64 pixels_per_line, bytes_per_line, line_time_ns; u64 pages_per_line, pb_bytes_per_line, stream_data_rate; u16 sram_granulrity_shift = (ipu_ver == IPU_VER_6 || ipu_ver == IPU_VER_6EP || ipu_ver == IPU_VER_6EP_MTL) ? IPU6_SRAM_GRANULRITY_SHIFT : IPU6SE_SRAM_GRANULRITY_SHIFT; u16 sram_granulrity_size = (ipu_ver == IPU_VER_6 || ipu_ver == IPU_VER_6EP || ipu_ver == IPU_VER_6EP_MTL) ? IPU6_SRAM_GRANULRITY_SIZE : IPU6SE_SRAM_GRANULRITY_SIZE; pixels_per_line = watermark->width + watermark->hblank; line_time_ns = pixels_per_line * 1000 / (watermark->pixel_rate / 1000000); /* 2 bytes per Bayer pixel */ bytes_per_line = watermark->width << 1; /* bytes to IS pixel buffer pages */ pages_per_line = bytes_per_line >> sram_granulrity_shift; /* pages for each line */ pages_per_line = DIV_ROUND_UP(bytes_per_line, sram_granulrity_size); pb_bytes_per_line = pages_per_line << sram_granulrity_shift; /* data rate MB/s */ stream_data_rate = (pb_bytes_per_line * 1000) / line_time_ns; watermark->stream_data_rate = stream_data_rate; } static void update_stream_watermark(struct ipu_isys_video *av, bool state) { struct isys_iwake_watermark *iwake_watermark; iwake_watermark = av->isys->iwake_watermark; if (state) { calculate_stream_datarate(av->watermark); mutex_lock(&iwake_watermark->mutex); list_add(&av->watermark->stream_node, &iwake_watermark->video_list); mutex_unlock(&iwake_watermark->mutex); } else { av->watermark->stream_data_rate = 0; mutex_lock(&iwake_watermark->mutex); list_del(&av->watermark->stream_node); mutex_unlock(&iwake_watermark->mutex); } update_watermark_setting(av->isys); } int ipu_isys_video_set_streaming(struct ipu_isys_video *av, unsigned int state, struct ipu_isys_buffer_list *bl) { struct device *dev = &av->isys->adev->dev; #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) struct media_device *mdev = av->vdev.entity.parent; struct media_entity_graph graph; #else struct media_device *mdev = av->vdev.entity.graph_obj.mdev; #endif struct media_entity_enum entities; struct media_entity *entity, *entity2; struct media_pipeline *mp = media_entity_pipeline(&av->vdev.entity); struct ipu_isys_pipeline *ip = to_ipu_isys_pipeline(mp); struct v4l2_subdev *sd, *esd; int rval = 0; dev_dbg(dev, "set stream: %d\n", state); if (!ip->external->entity) { WARN_ON(1); return -ENODEV; } esd = media_entity_to_v4l2_subdev(ip->external->entity); if (state) { rval = media_graph_walk_init(&ip->graph, mdev); if (rval) return rval; rval = media_entity_enum_init(&entities, mdev); if (rval) goto out_media_entity_graph_init; } if (!state) { stop_streaming_firmware(av); /* stop external sub-device now. */ dev_info(dev, "stream off %s\n", ip->external->entity->name); v4l2_subdev_call(esd, video, s_stream, state); } mutex_lock(&mdev->graph_mutex); #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) media_graph_walk_start(&ip->graph, #else media_graph_walk_start(&graph, #endif &av->vdev.entity); #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) while ((entity = media_graph_walk_next(&ip->graph))) { #else while ((entity = media_graph_walk_next(&graph))) { #endif sd = media_entity_to_v4l2_subdev(entity); /* Non-subdev nodes can be safely ignored here. */ if (!is_media_entity_v4l2_subdev(entity)) continue; /* Don't start truly external devices quite yet. */ if (strncmp(sd->name, IPU_ISYS_ENTITY_PREFIX, strlen(IPU_ISYS_ENTITY_PREFIX)) != 0 || ip->external->entity == entity) continue; dev_dbg(dev, "s_stream %s entity %s\n", state ? "on" : "off", entity->name); rval = v4l2_subdev_call(sd, video, s_stream, state); if (!state) continue; if (rval && rval != -ENOIOCTLCMD) { mutex_unlock(&mdev->graph_mutex); goto out_media_entity_stop_streaming; } media_entity_enum_set(&entities, entity); } mutex_unlock(&mdev->graph_mutex); if (av->aq.css_pin_type == IPU_FW_ISYS_PIN_TYPE_RAW_SOC) { if (state) configure_stream_watermark(av); update_stream_watermark(av, state); } /* Oh crap */ if (state) { rval = start_stream_firmware(av, bl); if (rval) goto out_update_stream_watermark; dev_dbg(dev, "set stream: source %d, stream_handle %d\n", ip->source, ip->stream_handle); /* Start external sub-device now. */ dev_info(dev, "stream on %s\n", ip->external->entity->name); rval = v4l2_subdev_call(esd, video, s_stream, state); if (rval) goto out_media_entity_stop_streaming_firmware; } else { close_streaming_firmware(av); } if (state) media_entity_enum_cleanup(&entities); else media_graph_walk_cleanup(&ip->graph); av->streaming = state; return 0; out_media_entity_stop_streaming_firmware: stop_streaming_firmware(av); out_update_stream_watermark: if (av->aq.css_pin_type == IPU_FW_ISYS_PIN_TYPE_RAW_SOC) update_stream_watermark(av, 0); out_media_entity_stop_streaming: mutex_lock(&mdev->graph_mutex); #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) media_graph_walk_start(&ip->graph, #else media_graph_walk_start(&graph, #endif &av->vdev.entity); #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) while (state && (entity2 = media_graph_walk_next(&ip->graph)) && #else while (state && (entity2 = media_graph_walk_next(&graph)) && #endif entity2 != entity) { sd = media_entity_to_v4l2_subdev(entity2); if (!media_entity_enum_test(&entities, entity2)) continue; v4l2_subdev_call(sd, video, s_stream, 0); } mutex_unlock(&mdev->graph_mutex); media_entity_enum_cleanup(&entities); out_media_entity_graph_init: media_graph_walk_cleanup(&ip->graph); return rval; } static const struct v4l2_ioctl_ops ioctl_ops_mplane = { .vidioc_querycap = ipu_isys_vidioc_querycap, #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0) .vidioc_enum_fmt_vid_cap = ipu_isys_vidioc_enum_fmt, #else .vidioc_enum_fmt_vid_cap_mplane = ipu_isys_vidioc_enum_fmt, #endif .vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt_vid_cap_mplane, .vidioc_s_fmt_vid_cap_mplane = vidioc_s_fmt_vid_cap_mplane, .vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt_vid_cap_mplane, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_create_bufs = vb2_ioctl_create_bufs, .vidioc_prepare_buf = vb2_ioctl_prepare_buf, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, .vidioc_expbuf = vb2_ioctl_expbuf, .vidioc_default = ipu_isys_vidioc_private, .vidioc_enum_input = vidioc_enum_input, .vidioc_g_input = vidioc_g_input, .vidioc_s_input = vidioc_s_input, }; static const struct media_entity_operations entity_ops = { .link_validate = link_validate, }; static const struct v4l2_file_operations isys_fops = { .owner = THIS_MODULE, .poll = vb2_fop_poll, .unlocked_ioctl = video_ioctl2, .mmap = vb2_fop_mmap, .open = video_open, .release = video_release, }; /* * Do everything that's needed to initialise things related to video * buffer queue, video node, and the related media entity. The caller * is expected to assign isys field and set the name of the video * device. */ int ipu_isys_video_init(struct ipu_isys_video *av, struct media_entity *entity, unsigned int pad, unsigned long pad_flags, unsigned int flags) { static atomic_t video_dev_count = ATOMIC_INIT(0); const struct v4l2_ioctl_ops *ioctl_ops = NULL; int rval, video_dev_nr; mutex_init(&av->mutex); init_completion(&av->ip.stream_open_completion); init_completion(&av->ip.stream_close_completion); init_completion(&av->ip.stream_start_completion); init_completion(&av->ip.stream_stop_completion); INIT_LIST_HEAD(&av->ip.queues); spin_lock_init(&av->ip.short_packet_queue_lock); av->ip.isys = av->isys; if (!av->watermark) { av->watermark = kzalloc(sizeof(*av->watermark), GFP_KERNEL); if (!av->watermark) { rval = -ENOMEM; goto out_mutex_destroy; } } av->vdev.device_caps = V4L2_CAP_STREAMING; if (pad_flags & MEDIA_PAD_FL_SINK) { av->aq.vbq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; ioctl_ops = &ioctl_ops_mplane; av->vdev.device_caps |= V4L2_CAP_VIDEO_CAPTURE_MPLANE; av->vdev.vfl_dir = VFL_DIR_RX; } else { av->aq.vbq.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; av->vdev.vfl_dir = VFL_DIR_TX; av->vdev.device_caps |= V4L2_CAP_VIDEO_OUTPUT_MPLANE; } rval = ipu_isys_queue_init(&av->aq); if (rval) goto out_mutex_destroy; av->pad.flags = pad_flags | MEDIA_PAD_FL_MUST_CONNECT; rval = media_entity_pads_init(&av->vdev.entity, 1, &av->pad); if (rval) goto out_ipu_isys_queue_cleanup; av->vdev.entity.ops = &entity_ops; av->vdev.release = video_device_release_empty; av->vdev.fops = &isys_fops; av->vdev.v4l2_dev = &av->isys->v4l2_dev; if (!av->vdev.ioctl_ops) av->vdev.ioctl_ops = ioctl_ops; av->vdev.queue = &av->aq.vbq; av->vdev.lock = &av->mutex; set_bit(V4L2_FL_USES_V4L2_FH, &av->vdev.flags); video_set_drvdata(&av->vdev, av); video_dev_nr = atomic_inc_return(&video_dev_count) - 1; if (video_dev_nr < MAX_VIDEO_DEVICES) video_dev_nr = video_nr[video_dev_nr]; else video_dev_nr = -1; mutex_lock(&av->mutex); #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0) rval = video_register_device(&av->vdev, VFL_TYPE_GRABBER, video_dev_nr); #else rval = video_register_device(&av->vdev, VFL_TYPE_VIDEO, video_dev_nr); #endif if (rval) goto out_media_entity_cleanup; if (pad_flags & MEDIA_PAD_FL_SINK) rval = media_create_pad_link(entity, pad, &av->vdev.entity, 0, flags); else rval = media_create_pad_link(&av->vdev.entity, 0, entity, pad, flags); if (rval) { dev_info(&av->isys->adev->dev, "can't create link\n"); goto out_media_entity_cleanup; } av->pfmt = av->try_fmt_vid_mplane(av, &av->mpix); av->initialized = true; mutex_unlock(&av->mutex); return rval; out_media_entity_cleanup: video_unregister_device(&av->vdev); mutex_unlock(&av->mutex); media_entity_cleanup(&av->vdev.entity); out_ipu_isys_queue_cleanup: ipu_isys_queue_cleanup(&av->aq); out_mutex_destroy: kfree(av->watermark); mutex_destroy(&av->mutex); return rval; } void ipu_isys_video_cleanup(struct ipu_isys_video *av) { if (!av->initialized) return; kfree(av->watermark); video_unregister_device(&av->vdev); media_entity_cleanup(&av->vdev.entity); mutex_destroy(&av->mutex); ipu_isys_queue_cleanup(&av->aq); av->initialized = false; } ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu-isys-video.h000066400000000000000000000127121471702545500263760ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2013 - 2024 Intel Corporation */ #ifndef IPU_ISYS_VIDEO_H #define IPU_ISYS_VIDEO_H #include #include #include #include #include #include #include "ipu-isys-queue.h" #define IPU_ISYS_OUTPUT_PINS 11 #define IPU_NUM_CAPTURE_DONE 2 #define IPU_ISYS_MAX_PARALLEL_SOF 2 struct ipu_isys; struct ipu_isys_csi2_be_soc; struct ipu_fw_isys_stream_cfg_data_abi; struct ipu_isys_pixelformat { u32 pixelformat; u32 bpp; u32 bpp_packed; u32 bpp_planar; u32 code; u32 css_pixelformat; }; struct sequence_info { unsigned int sequence; u64 timestamp; }; struct output_pin_data { void (*pin_ready)(struct ipu_isys_pipeline *ip, struct ipu_fw_isys_resp_info_abi *info); struct ipu_isys_queue *aq; }; struct ipu_isys_pipeline { struct media_pipeline pipe; struct media_pad *external; atomic_t sequence; unsigned int seq_index; struct sequence_info seq[IPU_ISYS_MAX_PARALLEL_SOF]; int source; /* SSI stream source */ int stream_handle; /* stream handle for CSS API */ unsigned int nr_output_pins; /* How many firmware pins? */ enum ipu_isl_mode isl_mode; struct ipu_isys_csi2_be *csi2_be; struct ipu_isys_csi2_be_soc *csi2_be_soc; struct ipu_isys_csi2 *csi2; /* * Number of capture queues, write access serialised using struct * ipu_isys.stream_mutex */ int nr_queues; int nr_streaming; /* Number of capture queues streaming */ int streaming; /* Has streaming been really started? */ struct list_head queues; struct completion stream_open_completion; struct completion stream_close_completion; struct completion stream_start_completion; struct completion stream_stop_completion; struct ipu_isys *isys; void (*capture_done[IPU_NUM_CAPTURE_DONE]) (struct ipu_isys_pipeline *ip, struct ipu_fw_isys_resp_info_abi *resp); struct output_pin_data output_pins[IPU_ISYS_OUTPUT_PINS]; bool has_sof; bool interlaced; int error; struct ipu_isys_private_buffer *short_packet_bufs; size_t short_packet_buffer_size; unsigned int num_short_packet_lines; unsigned int short_packet_output_pin; unsigned int cur_field; struct list_head short_packet_incoming; struct list_head short_packet_active; /* Serialize access to short packet active and incoming lists */ spinlock_t short_packet_queue_lock; struct list_head pending_interlaced_bufs; unsigned int short_packet_trace_index; #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) struct media_graph graph; #else struct media_entity_graph graph; #endif #endif struct media_entity_enum entity_enum; }; #define to_ipu_isys_pipeline(__pipe) \ container_of((__pipe), struct ipu_isys_pipeline, pipe) struct video_stream_watermark { u32 width; u32 height; u32 vblank; u32 hblank; u32 frame_rate; u64 pixel_rate; u64 stream_data_rate; struct list_head stream_node; }; struct ipu_isys_video { /* Serialise access to other fields in the struct. */ struct mutex mutex; struct media_pad pad; struct video_device vdev; struct v4l2_pix_format_mplane mpix; const struct ipu_isys_pixelformat *pfmts; const struct ipu_isys_pixelformat *pfmt; struct ipu_isys_queue aq; struct ipu_isys *isys; struct ipu_isys_pipeline ip; unsigned int streaming; bool packed; bool compression; bool initialized; struct v4l2_ctrl_handler ctrl_handler; struct v4l2_ctrl *compression_ctrl; unsigned int ts_offsets[VIDEO_MAX_PLANES]; unsigned int line_header_length; /* bits */ unsigned int line_footer_length; /* bits */ struct video_stream_watermark *watermark; const struct ipu_isys_pixelformat * (*try_fmt_vid_mplane)(struct ipu_isys_video *av, struct v4l2_pix_format_mplane *mpix); void (*prepare_fw_stream)(struct ipu_isys_video *av, struct ipu_fw_isys_stream_cfg_data_abi *cfg); }; #define ipu_isys_queue_to_video(__aq) \ container_of(__aq, struct ipu_isys_video, aq) extern const struct ipu_isys_pixelformat ipu_isys_pfmts[]; extern const struct ipu_isys_pixelformat ipu_isys_pfmts_be_soc[]; extern const struct ipu_isys_pixelformat ipu_isys_pfmts_packed[]; const struct ipu_isys_pixelformat * ipu_isys_get_pixelformat(struct ipu_isys_video *av, u32 pixelformat); int ipu_isys_vidioc_querycap(struct file *file, void *fh, struct v4l2_capability *cap); int ipu_isys_vidioc_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f); const struct ipu_isys_pixelformat * ipu_isys_video_try_fmt_vid_mplane_default(struct ipu_isys_video *av, struct v4l2_pix_format_mplane *mpix); const struct ipu_isys_pixelformat * ipu_isys_video_try_fmt_vid_mplane(struct ipu_isys_video *av, struct v4l2_pix_format_mplane *mpix, int store_csi2_header); void ipu_isys_prepare_fw_cfg_default(struct ipu_isys_video *av, struct ipu_fw_isys_stream_cfg_data_abi *cfg); int ipu_isys_video_prepare_streaming(struct ipu_isys_video *av, unsigned int state); int ipu_isys_video_set_streaming(struct ipu_isys_video *av, unsigned int state, struct ipu_isys_buffer_list *bl); int ipu_isys_video_init(struct ipu_isys_video *av, struct media_entity *source, unsigned int source_pad, unsigned long pad_flags, unsigned int flags); void ipu_isys_video_cleanup(struct ipu_isys_video *av); void ipu_isys_video_add_capture_done(struct ipu_isys_pipeline *ip, void (*capture_done) (struct ipu_isys_pipeline *ip, struct ipu_fw_isys_resp_info_abi *resp)); #endif /* IPU_ISYS_VIDEO_H */ ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu-isys.c000066400000000000000000001455021471702545500252710ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2013 - 2024 Intel Corporation #include #include #include #include #include #include #include #include #include #include #include #include #if IS_ENABLED(CONFIG_IPU_BRIDGE) && \ LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0) #include #endif #include #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0) #include #endif #include #include #include #include #include #include #include #include "ipu.h" #include "ipu-bus.h" #include "ipu-cpd.h" #include "ipu-mmu.h" #include "ipu-dma.h" #include "ipu-isys.h" #include "ipu-isys-csi2.h" #include "ipu-isys-video.h" #include "ipu-platform-regs.h" #include "ipu-buttress.h" #include "ipu-platform.h" #include "ipu-platform-buttress-regs.h" #define ISYS_PM_QOS_VALUE 300 #define IPU_BUTTRESS_FABIC_CONTROL 0x68 #define GDA_ENABLE_IWAKE_INDEX 2 #define GDA_IWAKE_THRESHOLD_INDEX 1 #define GDA_IRQ_CRITICAL_THRESHOLD_INDEX 0 #define GDA_MEMOPEN_THRESHOLD_INDEX 3 #define DEFAULT_DID_RATIO 90 #define IPU6EP_LTR_VALUE 200 #define IPU6EP_MTL_LTR_VALUE 1023 #define DEFAULT_IWAKE_THRESHOLD 0x42 #define IPU6EP_MIN_MEMOPEN_TH 0x4 #define IPU6EP_MTL_MIN_MEMOPEN_TH 0xc #define DEFAULT_MEM_OPEN_TIME 10 #define ONE_THOUSAND_MICROSECOND 1000 /* One page is 2KB, 8 x 16 x 16 = 2048B = 2KB */ #define ISF_DMA_TOP_GDA_PROFERTY_PAGE_SIZE 0x800 /* LTR & DID value are 10 bit at most */ #define LTR_DID_VAL_MAX 1023 #define LTR_DEFAULT_VALUE 0x70503C19 #define FILL_TIME_DEFAULT_VALUE 0xFFF0783C #define LTR_DID_PKGC_2R 20 #define LTR_DID_PKGC_8 100 #define LTR_SCALE_DEFAULT 5 #define LTR_SCALE_1024NS 2 #define DID_SCALE_1US 2 #define DID_SCALE_32US 3 #define REG_PKGC_PMON_CFG 0xB00 #define VAL_PKGC_PMON_CFG_RESET 0x38 #define VAL_PKGC_PMON_CFG_START 0x7 #define IS_PIXEL_BUFFER_PAGES 0x80 /* BIOS provides the driver the LTR and threshold information in IPU, * IS pixel buffer is 256KB, MaxSRAMSize is 200KB on IPU6. */ #define IPU6_MAX_SRAM_SIZE (200 << 10) /* IS pixel buffer is 128KB, MaxSRAMSize is 96KB on IPU6SE. */ #define IPU6SE_MAX_SRAM_SIZE (96 << 10) /* When iwake mode is disabled the critical threshold is statically set to 75% * of the IS pixel buffer criticalThreshold = (128 * 3) / 4 */ #define CRITICAL_THRESHOLD_IWAKE_DISABLE (IS_PIXEL_BUFFER_PAGES * 3 / 4) union fabric_ctrl { struct { u16 ltr_val : 10; u16 ltr_scale : 3; u16 RSVD1 : 3; u16 did_val : 10; u16 did_scale : 3; u16 RSVD2 : 1; u16 keep_power_in_D0 : 1; u16 keep_power_override : 1; } bits; u32 value; }; enum ltr_did_type { LTR_IWAKE_ON, LTR_IWAKE_OFF, LTR_ISYS_ON, LTR_ISYS_OFF, LTR_ENHANNCE_IWAKE, LTR_TYPE_MAX }; #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) /* * BEGIN adapted code from drivers/media/platform/omap3isp/isp.c. * FIXME: This (in terms of functionality if not code) should be most * likely generalised in the framework, and use made optional for * drivers. */ /* * ipu_pipeline_pm_use_count - Count the number of users of a pipeline * @entity: The entity * * Return the total number of users of all video device nodes in the pipeline. */ static int ipu_pipeline_pm_use_count(struct media_pad *pad) { struct media_entity_graph graph; struct media_entity *entity = pad->entity; int use = 0; #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) media_graph_walk_init(&graph, entity->graph_obj.mdev); #endif media_graph_walk_start(&graph, pad); while ((entity = media_graph_walk_next(&graph))) { if (is_media_entity_v4l2_io(entity)) use += entity->use_count; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) media_graph_walk_cleanup(&graph); #endif return use; } /* * ipu_pipeline_pm_power_one - Apply power change to an entity * @entity: The entity * @change: Use count change * * Change the entity use count by @change. If the entity is a subdev update its * power state by calling the core::s_power operation when the use count goes * from 0 to != 0 or from != 0 to 0. * * Return 0 on success or a negative error code on failure. */ static int ipu_pipeline_pm_power_one(struct media_entity *entity, int change) { struct v4l2_subdev *subdev; int ret; subdev = is_media_entity_v4l2_subdev(entity) ? media_entity_to_v4l2_subdev(entity) : NULL; if (entity->use_count == 0 && change > 0 && subdev) { ret = v4l2_subdev_call(subdev, core, s_power, 1); if (ret < 0 && ret != -ENOIOCTLCMD) return ret; } entity->use_count += change; WARN_ON(entity->use_count < 0); if (entity->use_count == 0 && change < 0 && subdev) v4l2_subdev_call(subdev, core, s_power, 0); return 0; } /* * ipu_pipeline_pm_power - Apply power change to all entities * in a pipeline * @entity: The entity * @change: Use count change * @from_pad: Starting pad * * Walk the pipeline to update the use count and the power state of * all non-node * entities. * * Return 0 on success or a negative error code on failure. */ static int ipu_pipeline_pm_power(struct media_entity *entity, int change, int from_pad) { struct media_entity_graph graph; struct media_entity *first = entity; int ret = 0; if (!change) return 0; #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) media_graph_walk_init(&graph, entity->graph_obj.mdev); #endif media_graph_walk_start(&graph, &entity->pads[from_pad]); while (!ret && (entity = media_graph_walk_next(&graph))) if (!is_media_entity_v4l2_io(entity)) ret = ipu_pipeline_pm_power_one(entity, change); #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) media_graph_walk_cleanup(&graph); #endif if (!ret) return 0; #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) media_graph_walk_init(&graph, entity->graph_obj.mdev); #endif media_graph_walk_start(&graph, &first->pads[from_pad]); while ((first = media_graph_walk_next(&graph)) && first != entity) if (!is_media_entity_v4l2_io(first)) ipu_pipeline_pm_power_one(first, -change); #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) media_graph_walk_cleanup(&graph); #endif return ret; } /* * ipu_pipeline_pm_use - Update the use count of an entity * @entity: The entity * @use: Use (1) or stop using (0) the entity * * Update the use count of all entities in the pipeline and power entities * on or off accordingly. * * Return 0 on success or a negative error code on failure. Powering entities * off is assumed to never fail. No failure can occur when the use parameter is * set to 0. */ int ipu_pipeline_pm_use(struct media_entity *entity, int use) { int change = use ? 1 : -1; int ret; mutex_lock(&entity-> #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) parent #else graph_obj.mdev #endif ->graph_mutex); /* Apply use count to node. */ entity->use_count += change; WARN_ON(entity->use_count < 0); /* Apply power change to connected non-nodes. */ ret = ipu_pipeline_pm_power(entity, change, 0); if (ret < 0) entity->use_count -= change; mutex_unlock(&entity-> #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) parent #else graph_obj.mdev #endif ->graph_mutex); return ret; } /* * ipu_pipeline_link_notify - Link management notification callback * @link: The link * @flags: New link flags that will be applied * @notification: The link's state change notification type * (MEDIA_DEV_NOTIFY_*) * * React to link management on powered pipelines by updating the use count of * all entities in the source and sink sides of the link. Entities are powered * on or off accordingly. * * Return 0 on success or a negative error code on failure. Powering entities * off is assumed to never fail. This function will not fail for disconnection * events. */ static int ipu_pipeline_link_notify(struct media_link *link, u32 flags, unsigned int notification) { struct media_entity *source = link->source->entity; struct media_entity *sink = link->sink->entity; int source_use = ipu_pipeline_pm_use_count(link->source); int sink_use = ipu_pipeline_pm_use_count(link->sink); int ret; if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH && !(flags & MEDIA_LNK_FL_ENABLED)) { /* Powering off entities is assumed to never fail. */ ipu_pipeline_pm_power(source, -sink_use, 0); ipu_pipeline_pm_power(sink, -source_use, 0); return 0; } if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH && (flags & MEDIA_LNK_FL_ENABLED)) { ret = ipu_pipeline_pm_power(source, sink_use, 0); if (ret < 0) return ret; ret = ipu_pipeline_pm_power(sink, source_use, 0); if (ret < 0) ipu_pipeline_pm_power(source, -sink_use, 0); return ret; } return 0; } /* END adapted code from drivers/media/platform/omap3isp/isp.c */ #endif /* < v4.6 */ static int isys_complete_ext_device_registration(struct ipu_isys *isys, struct v4l2_subdev *sd, struct ipu_isys_csi2_config *csi2) { unsigned int i; int rval; v4l2_set_subdev_hostdata(sd, csi2); for (i = 0; i < sd->entity.num_pads; i++) { if (sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE) break; } if (i == sd->entity.num_pads) { dev_warn(&isys->adev->dev, "no source pad in external entity\n"); rval = -ENOENT; goto skip_unregister_subdev; } rval = media_create_pad_link(&sd->entity, i, &isys->csi2[csi2->port].asd.sd.entity, 0, 0); if (rval) { dev_warn(&isys->adev->dev, "can't create link\n"); goto skip_unregister_subdev; } isys->csi2[csi2->port].nlanes = csi2->nlanes; return 0; skip_unregister_subdev: v4l2_device_unregister_subdev(sd); return rval; } static void isys_unregister_subdevices(struct ipu_isys *isys) { const struct ipu_isys_internal_csi2_pdata *csi2 = &isys->pdata->ipdata->csi2; unsigned int i; for (i = 0; i < NR_OF_CSI2_BE_SOC_DEV; i++) ipu_isys_csi2_be_soc_cleanup(&isys->csi2_be_soc[i]); for (i = 0; i < csi2->nports && isys->csi2; i++) ipu_isys_csi2_cleanup(&isys->csi2[i]); } static int isys_register_subdevices(struct ipu_isys *isys) { const struct ipu_isys_internal_csi2_pdata *csi2 = &isys->pdata->ipdata->csi2; struct ipu_isys_csi2_be_soc *csi2_be_soc; unsigned int i, k; int rval; isys->csi2 = devm_kcalloc(&isys->adev->dev, csi2->nports, sizeof(*isys->csi2), GFP_KERNEL); if (!isys->csi2) { rval = -ENOMEM; goto fail; } for (i = 0; i < csi2->nports; i++) { rval = ipu_isys_csi2_init(&isys->csi2[i], isys, isys->pdata->base + csi2->offsets[i], i); if (rval) goto fail; isys->isr_csi2_bits |= IPU_ISYS_UNISPART_IRQ_CSI2(i); } for (k = 0; k < NR_OF_CSI2_BE_SOC_DEV; k++) { rval = ipu_isys_csi2_be_soc_init(&isys->csi2_be_soc[k], isys, k); if (rval) { dev_info(&isys->adev->dev, "can't register csi2 soc be device %d\n", k); goto fail; } } for (i = 0; i < csi2->nports; i++) { for (k = 0; k < NR_OF_CSI2_BE_SOC_DEV; k++) { csi2_be_soc = &isys->csi2_be_soc[k]; rval = media_create_pad_link(&isys->csi2[i].asd.sd.entity, CSI2_PAD_SOURCE, &csi2_be_soc->asd.sd.entity, CSI2_BE_SOC_PAD_SINK, 0); if (rval) { dev_info(&isys->adev->dev, "can't create link csi2->be_soc\n"); goto fail; } } } return 0; fail: isys_unregister_subdevices(isys); return rval; } /* read ltrdid threshold values from BIOS or system configuration */ static void get_lut_ltrdid(struct ipu_isys *isys, struct ltr_did *pltr_did) { struct isys_iwake_watermark *iwake_watermark = isys->iwake_watermark; /* default values*/ struct ltr_did ltrdid_default; ltrdid_default.lut_ltr.value = LTR_DEFAULT_VALUE; ltrdid_default.lut_fill_time.value = FILL_TIME_DEFAULT_VALUE; if (iwake_watermark->ltrdid.lut_ltr.value) *pltr_did = iwake_watermark->ltrdid; else *pltr_did = ltrdid_default; } static int set_iwake_register(struct ipu_isys *isys, u32 index, u32 value) { int ret = 0; u32 req_id = index; u32 offset = 0; ret = ipu_fw_isys_send_proxy_token(isys, req_id, index, offset, value); if (ret) dev_err(&isys->adev->dev, "write %d failed %d", index, ret); return ret; } /* * When input system is powered up and before enabling any new sensor capture, * or after disabling any sensor capture the following values need to be set: * LTR_value = LTR(usec) from calculation; * LTR_scale = 2; * DID_value = DID(usec) from calculation; * DID_scale = 2; * * When input system is powered down, the LTR and DID values * must be returned to the default values: * LTR_value = 1023; * LTR_scale = 5; * DID_value = 1023; * DID_scale = 2; */ static void set_iwake_ltrdid(struct ipu_isys *isys, u16 ltr, u16 did, enum ltr_did_type use) { u16 ltr_val, ltr_scale = LTR_SCALE_1024NS; u16 did_val, did_scale = DID_SCALE_1US; union fabric_ctrl fc; struct ipu_device *isp = isys->adev->isp; switch (use) { case LTR_IWAKE_ON: ltr_val = min_t(u16, ltr, (u16)LTR_DID_VAL_MAX); did_val = min_t(u16, did, (u16)LTR_DID_VAL_MAX); ltr_scale = (ltr == LTR_DID_VAL_MAX && did == LTR_DID_VAL_MAX) ? LTR_SCALE_DEFAULT : LTR_SCALE_1024NS; break; case LTR_ISYS_ON: case LTR_IWAKE_OFF: ltr_val = LTR_DID_PKGC_2R; did_val = LTR_DID_PKGC_2R; break; case LTR_ISYS_OFF: ltr_val = LTR_DID_VAL_MAX; did_val = LTR_DID_VAL_MAX; ltr_scale = LTR_SCALE_DEFAULT; break; case LTR_ENHANNCE_IWAKE: if (ltr == LTR_DID_VAL_MAX && did == LTR_DID_VAL_MAX) { ltr_val = LTR_DID_VAL_MAX; did_val = LTR_DID_VAL_MAX; ltr_scale = LTR_SCALE_DEFAULT; } else if (did < ONE_THOUSAND_MICROSECOND) { ltr_val = ltr; did_val = did; } else { ltr_val = ltr; /* div 90% value by 32 to account for scale change */ did_val = did / 32; did_scale = DID_SCALE_32US; } break; default: return; } fc.value = readl(isp->base + IPU_BUTTRESS_FABIC_CONTROL); fc.bits.ltr_val = ltr_val; fc.bits.ltr_scale = ltr_scale; fc.bits.did_val = did_val; fc.bits.did_scale = did_scale; dev_dbg(&isys->adev->dev, "%s ltr: %d did: %d", __func__, ltr_val, did_val); writel(fc.value, isp->base + IPU_BUTTRESS_FABIC_CONTROL); } /* SW driver may clear register GDA_ENABLE_IWAKE before the FW configures the * stream for debug purposes. Otherwise SW should not access this register. */ static int enable_iwake(struct ipu_isys *isys, bool enable) { int ret = 0; struct isys_iwake_watermark *iwake_watermark = isys->iwake_watermark; mutex_lock(&iwake_watermark->mutex); if (iwake_watermark->iwake_enabled == enable) { mutex_unlock(&iwake_watermark->mutex); return ret; } ret = set_iwake_register(isys, GDA_ENABLE_IWAKE_INDEX, enable); if (!ret) iwake_watermark->iwake_enabled = enable; mutex_unlock(&iwake_watermark->mutex); return ret; } void update_watermark_setting(struct ipu_isys *isys) { struct isys_iwake_watermark *iwake_watermark = isys->iwake_watermark; struct list_head *stream_node; struct video_stream_watermark *p_watermark; struct ltr_did ltrdid; u16 calc_fill_time_us = 0, ltr = 0, did = 0; enum ltr_did_type ltr_did_type; u32 iwake_threshold, iwake_critical_threshold, page_num, mem_threshold; u32 mem_open_threshold = 0; u64 threshold_bytes; u64 isys_pb_datarate_mbs = 0; u16 sram_granulrity_shift = (ipu_ver == IPU_VER_6 || ipu_ver == IPU_VER_6EP || ipu_ver == IPU_VER_6EP_MTL) ? IPU6_SRAM_GRANULRITY_SHIFT : IPU6SE_SRAM_GRANULRITY_SHIFT; int max_sram_size = (ipu_ver == IPU_VER_6 || ipu_ver == IPU_VER_6EP || ipu_ver == IPU_VER_6EP_MTL) ? IPU6_MAX_SRAM_SIZE : IPU6SE_MAX_SRAM_SIZE; mutex_lock(&iwake_watermark->mutex); if (iwake_watermark->force_iwake_disable) { set_iwake_ltrdid(isys, 0, 0, LTR_IWAKE_OFF); set_iwake_register(isys, GDA_IRQ_CRITICAL_THRESHOLD_INDEX, CRITICAL_THRESHOLD_IWAKE_DISABLE); mutex_unlock(&iwake_watermark->mutex); return; } if (list_empty(&iwake_watermark->video_list)) { isys_pb_datarate_mbs = 0; } else { list_for_each(stream_node, &iwake_watermark->video_list) { p_watermark = list_entry(stream_node, struct video_stream_watermark, stream_node); isys_pb_datarate_mbs += p_watermark->stream_data_rate; } } mutex_unlock(&iwake_watermark->mutex); if (!isys_pb_datarate_mbs) { enable_iwake(isys, false); set_iwake_ltrdid(isys, 0, 0, LTR_IWAKE_OFF); mutex_lock(&iwake_watermark->mutex); set_iwake_register(isys, GDA_IRQ_CRITICAL_THRESHOLD_INDEX, CRITICAL_THRESHOLD_IWAKE_DISABLE); mutex_unlock(&iwake_watermark->mutex); return; } /* should enable iwake by default according to FW */ enable_iwake(isys, true); calc_fill_time_us = (u16)(max_sram_size / isys_pb_datarate_mbs); if (ipu_ver == IPU_VER_6EP_MTL || ipu_ver == IPU_VER_6EP) { ltr = (ipu_ver == IPU_VER_6EP_MTL) ? IPU6EP_MTL_LTR_VALUE : IPU6EP_LTR_VALUE; did = calc_fill_time_us * DEFAULT_DID_RATIO / 100; ltr_did_type = LTR_ENHANNCE_IWAKE; } else { get_lut_ltrdid(isys, <rdid); if (calc_fill_time_us <= ltrdid.lut_fill_time.bits.th0) ltr = 0; else if (calc_fill_time_us <= ltrdid.lut_fill_time.bits.th1) ltr = ltrdid.lut_ltr.bits.val0; else if (calc_fill_time_us <= ltrdid.lut_fill_time.bits.th2) ltr = ltrdid.lut_ltr.bits.val1; else if (calc_fill_time_us <= ltrdid.lut_fill_time.bits.th3) ltr = ltrdid.lut_ltr.bits.val2; else ltr = ltrdid.lut_ltr.bits.val3; did = calc_fill_time_us - ltr; ltr_did_type = LTR_IWAKE_ON; } threshold_bytes = did * isys_pb_datarate_mbs; /* calculate iwake threshold with 2KB granularity pages */ iwake_threshold = max_t(u32, 1, threshold_bytes >> sram_granulrity_shift); iwake_threshold = min_t(u32, iwake_threshold, max_sram_size); set_iwake_ltrdid(isys, ltr, did, ltr_did_type); mutex_lock(&iwake_watermark->mutex); if (ipu_ver == IPU_VER_6EP_MTL || ipu_ver == IPU_VER_6EP) set_iwake_register(isys, GDA_IWAKE_THRESHOLD_INDEX, DEFAULT_IWAKE_THRESHOLD); else set_iwake_register(isys, GDA_IWAKE_THRESHOLD_INDEX, iwake_threshold); if (ipu_ver == IPU_VER_6EP_MTL || ipu_ver == IPU_VER_6EP) { /* Calculate number of pages that will be filled in 10 usec */ page_num = (DEFAULT_MEM_OPEN_TIME * isys_pb_datarate_mbs) / ISF_DMA_TOP_GDA_PROFERTY_PAGE_SIZE; page_num += ((DEFAULT_MEM_OPEN_TIME * isys_pb_datarate_mbs) % ISF_DMA_TOP_GDA_PROFERTY_PAGE_SIZE) ? 1 : 0; mem_threshold = (ipu_ver == IPU_VER_6EP_MTL) ? IPU6EP_MTL_MIN_MEMOPEN_TH : IPU6EP_MIN_MEMOPEN_TH; mem_open_threshold = max_t(u32, mem_threshold, page_num); dev_dbg(&isys->adev->dev, "%s mem_open_threshold: %u\n", __func__, mem_open_threshold); set_iwake_register(isys, GDA_MEMOPEN_THRESHOLD_INDEX, mem_open_threshold); } /* set the critical threshold to halfway between * iwake threshold and the full buffer. */ iwake_critical_threshold = iwake_threshold + (IS_PIXEL_BUFFER_PAGES - iwake_threshold) / 2; dev_dbg(&isys->adev->dev, "%s threshold: %u critical: %u\n", __func__, iwake_threshold, iwake_critical_threshold); set_iwake_register(isys, GDA_IRQ_CRITICAL_THRESHOLD_INDEX, iwake_critical_threshold); mutex_unlock(&iwake_watermark->mutex); writel(VAL_PKGC_PMON_CFG_RESET, isys->adev->isp->base + REG_PKGC_PMON_CFG); writel(VAL_PKGC_PMON_CFG_START, isys->adev->isp->base + REG_PKGC_PMON_CFG); } static int isys_iwake_watermark_init(struct ipu_isys *isys) { struct isys_iwake_watermark *iwake_watermark; if (isys->iwake_watermark) return 0; iwake_watermark = devm_kzalloc(&isys->adev->dev, sizeof(*iwake_watermark), GFP_KERNEL); if (!iwake_watermark) return -ENOMEM; INIT_LIST_HEAD(&iwake_watermark->video_list); mutex_init(&iwake_watermark->mutex); iwake_watermark->ltrdid.lut_ltr.value = 0; isys->iwake_watermark = iwake_watermark; iwake_watermark->isys = isys; iwake_watermark->iwake_enabled = false; iwake_watermark->force_iwake_disable = false; return 0; } static int isys_iwake_watermark_cleanup(struct ipu_isys *isys) { struct isys_iwake_watermark *iwake_watermark = isys->iwake_watermark; if (!iwake_watermark) return -EINVAL; mutex_lock(&iwake_watermark->mutex); list_del(&iwake_watermark->video_list); mutex_unlock(&iwake_watermark->mutex); mutex_destroy(&iwake_watermark->mutex); isys->iwake_watermark = NULL; return 0; } /* The .bound() notifier callback when a match is found */ #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) static int isys_notifier_bound(struct v4l2_async_notifier *notifier, struct v4l2_subdev *sd, struct v4l2_async_subdev *asd) { struct ipu_isys *isys = container_of(notifier, struct ipu_isys, notifier); struct sensor_async_subdev *s_asd = container_of(asd, struct sensor_async_subdev, asd); dev_info(&isys->adev->dev, "bind %s nlanes is %d port is %d\n", sd->name, s_asd->csi2.nlanes, s_asd->csi2.port); isys_complete_ext_device_registration(isys, sd, &s_asd->csi2); return v4l2_device_register_subdev_nodes(&isys->v4l2_dev); } static void isys_notifier_unbind(struct v4l2_async_notifier *notifier, struct v4l2_subdev *sd, struct v4l2_async_subdev *asd) { struct ipu_isys *isys = container_of(notifier, struct ipu_isys, notifier); dev_info(&isys->adev->dev, "unbind %s\n", sd->name); } #else static int isys_notifier_bound(struct v4l2_async_notifier *notifier, struct v4l2_subdev *sd, struct v4l2_async_connection *asc) { struct ipu_isys *isys = container_of(notifier, struct ipu_isys, notifier); struct sensor_async_sd *s_asd = container_of(asc, struct sensor_async_sd, asc); int ret; #if IS_ENABLED(CONFIG_IPU_BRIDGE) ret = ipu_bridge_instantiate_vcm(sd->dev); if (ret) { dev_err(&isys->adev->dev, "instantiate vcm failed\n"); return ret; } #endif dev_info(&isys->adev->dev, "bind %s nlanes is %d port is %d\n", sd->name, s_asd->csi2.nlanes, s_asd->csi2.port); ret = isys_complete_ext_device_registration(isys, sd, &s_asd->csi2); if (ret) return ret; return v4l2_device_register_subdev_nodes(&isys->v4l2_dev); } static void isys_notifier_unbind(struct v4l2_async_notifier *notifier, struct v4l2_subdev *sd, struct v4l2_async_connection *asc) { struct ipu_isys *isys = container_of(notifier, struct ipu_isys, notifier); dev_info(&isys->adev->dev, "unbind %s\n", sd->name); } #endif static int isys_notifier_complete(struct v4l2_async_notifier *notifier) { struct ipu_isys *isys = container_of(notifier, struct ipu_isys, notifier); dev_info(&isys->adev->dev, "All sensor registration completed.\n"); return v4l2_device_register_subdev_nodes(&isys->v4l2_dev); } static const struct v4l2_async_notifier_operations isys_async_ops = { .bound = isys_notifier_bound, .unbind = isys_notifier_unbind, .complete = isys_notifier_complete, }; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) static int isys_fwnode_parse(struct device *dev, struct v4l2_fwnode_endpoint *vep, struct v4l2_async_subdev *asd) { struct sensor_async_subdev *s_asd = container_of(asd, struct sensor_async_subdev, asd); s_asd->csi2.port = vep->base.port; s_asd->csi2.nlanes = vep->bus.mipi_csi2.num_data_lanes; return 0; } #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0) && LINUX_VERSION_CODE != KERNEL_VERSION(5, 15, 71) static int isys_notifier_init(struct ipu_isys *isys) { struct ipu_device *isp = isys->adev->isp; size_t asd_struct_size = sizeof(struct sensor_async_subdev); int ret; v4l2_async_notifier_init(&isys->notifier); ret = v4l2_async_notifier_parse_fwnode_endpoints(&isp->pdev->dev, &isys->notifier, asd_struct_size, isys_fwnode_parse); if (ret < 0) { dev_err(&isys->adev->dev, "v4l2 parse_fwnode_endpoints() failed: %d\n", ret); return ret; } if (list_empty(&isys->notifier.asd_list)) { /* isys probe could continue with async subdevs missing */ dev_warn(&isys->adev->dev, "no subdev found in graph\n"); return 0; } isys->notifier.ops = &isys_async_ops; ret = v4l2_async_notifier_register(&isys->v4l2_dev, &isys->notifier); if (ret) { dev_err(&isys->adev->dev, "failed to register async notifier : %d\n", ret); v4l2_async_notifier_cleanup(&isys->notifier); } return ret; } #elif LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) static int isys_notifier_init(struct ipu_isys *isys) { struct ipu_device *isp = isys->adev->isp; size_t asd_struct_size = sizeof(struct sensor_async_subdev); int ret; v4l2_async_nf_init(&isys->notifier); ret = v4l2_async_nf_parse_fwnode_endpoints(&isp->pdev->dev, &isys->notifier, asd_struct_size, isys_fwnode_parse); if (ret < 0) { dev_err(&isys->adev->dev, "v4l2 parse_fwnode_endpoints() failed: %d\n", ret); return ret; } if (list_empty(&isys->notifier.asd_list)) { /* isys probe could continue with async subdevs missing */ dev_warn(&isys->adev->dev, "no subdev found in graph\n"); return 0; } isys->notifier.ops = &isys_async_ops; ret = v4l2_async_nf_register(&isys->v4l2_dev, &isys->notifier); if (ret) { dev_err(&isys->adev->dev, "failed to register async notifier : %d\n", ret); v4l2_async_nf_cleanup(&isys->notifier); } return ret; } #else static int isys_notifier_init(struct ipu_isys *isys) { const struct ipu_isys_internal_csi2_pdata *csi2 = &isys->pdata->ipdata->csi2; struct ipu_device *isp = isys->adev->isp; struct device *dev = &isp->pdev->dev; unsigned int i; int ret; v4l2_async_nf_init(&isys->notifier, &isys->v4l2_dev); for (i = 0; i < csi2->nports; i++) { struct v4l2_fwnode_endpoint vep = { .bus_type = V4L2_MBUS_CSI2_DPHY }; struct sensor_async_sd *s_asd; struct fwnode_handle *ep; ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), i, 0, FWNODE_GRAPH_ENDPOINT_NEXT); if (!ep) continue; ret = v4l2_fwnode_endpoint_parse(ep, &vep); if (ret) goto err_parse; s_asd = v4l2_async_nf_add_fwnode_remote(&isys->notifier, ep, struct sensor_async_sd); if (IS_ERR(s_asd)) { ret = PTR_ERR(s_asd); goto err_parse; } s_asd->csi2.port = vep.base.port; s_asd->csi2.nlanes = vep.bus.mipi_csi2.num_data_lanes; fwnode_handle_put(ep); continue; err_parse: fwnode_handle_put(ep); return ret; } if (list_empty(&isys->notifier.waiting_list)) { /* isys probe could continue with async subdevs missing */ dev_warn(&isys->adev->dev, "no subdev found in graph\n"); return 0; } isys->notifier.ops = &isys_async_ops; ret = v4l2_async_nf_register(&isys->notifier); if (ret) { dev_err(&isys->adev->dev, "failed to register async notifier : %d\n", ret); v4l2_async_nf_cleanup(&isys->notifier); } return ret; } #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0) && LINUX_VERSION_CODE != KERNEL_VERSION(5, 15, 71) static void isys_notifier_cleanup(struct ipu_isys *isys) { v4l2_async_notifier_unregister(&isys->notifier); v4l2_async_notifier_cleanup(&isys->notifier); } #else static void isys_notifier_cleanup(struct ipu_isys *isys) { v4l2_async_nf_unregister(&isys->notifier); v4l2_async_nf_cleanup(&isys->notifier); } #endif static struct media_device_ops isys_mdev_ops = { #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) .link_notify = ipu_pipeline_link_notify, #else .link_notify = v4l2_pipeline_link_notify, #endif }; static int isys_register_devices(struct ipu_isys *isys) { int rval; isys->media_dev.dev = &isys->adev->dev; #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 12) isys->media_dev.ops = &isys_mdev_ops; #elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) isys->media_dev.link_notify = ipu_pipeline_link_notify; #else isys->media_dev.link_notify = v4l2_pipeline_link_notify; #endif strscpy(isys->media_dev.model, IPU_MEDIA_DEV_MODEL_NAME, sizeof(isys->media_dev.model)); #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) isys->media_dev.driver_version = LINUX_VERSION_CODE; #endif snprintf(isys->media_dev.bus_info, sizeof(isys->media_dev.bus_info), "pci:%s", dev_name(isys->adev->dev.parent->parent)); strscpy(isys->v4l2_dev.name, isys->media_dev.model, sizeof(isys->v4l2_dev.name)); #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) media_device_init(&isys->media_dev); #endif rval = media_device_register(&isys->media_dev); if (rval < 0) { dev_info(&isys->adev->dev, "can't register media device\n"); goto out_media_device_unregister; } isys->v4l2_dev.mdev = &isys->media_dev; rval = v4l2_device_register(&isys->adev->dev, &isys->v4l2_dev); if (rval < 0) { dev_info(&isys->adev->dev, "can't register v4l2 device\n"); goto out_media_device_unregister; } rval = isys_register_subdevices(isys); if (rval) goto out_v4l2_device_unregister; rval = isys_notifier_init(isys); if (rval) goto out_isys_unregister_subdevices; rval = v4l2_device_register_subdev_nodes(&isys->v4l2_dev); if (rval) goto out_isys_notifier_cleanup; return 0; out_isys_notifier_cleanup: isys_notifier_cleanup(isys); out_isys_unregister_subdevices: isys_unregister_subdevices(isys); out_v4l2_device_unregister: v4l2_device_unregister(&isys->v4l2_dev); out_media_device_unregister: media_device_unregister(&isys->media_dev); #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) media_device_cleanup(&isys->media_dev); #endif return rval; } static void isys_unregister_devices(struct ipu_isys *isys) { isys_unregister_subdevices(isys); v4l2_device_unregister(&isys->v4l2_dev); media_device_unregister(&isys->media_dev); #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) media_device_cleanup(&isys->media_dev); #endif } #ifdef CONFIG_PM static int isys_runtime_pm_resume(struct device *dev) { struct ipu_bus_device *adev = to_ipu_bus_device(dev); struct ipu_device *isp = adev->isp; struct ipu_isys *isys = ipu_bus_get_drvdata(adev); unsigned long flags; int ret; if (!isys) return 0; ret = ipu_mmu_hw_init(adev->mmu); if (ret) return ret; ipu_trace_restore(dev); #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0) cpu_latency_qos_update_request(&isys->pm_qos, ISYS_PM_QOS_VALUE); #else pm_qos_update_request(&isys->pm_qos, ISYS_PM_QOS_VALUE); #endif ret = ipu_buttress_start_tsc_sync(isp); if (ret) return ret; spin_lock_irqsave(&isys->power_lock, flags); isys->power = 1; spin_unlock_irqrestore(&isys->power_lock, flags); if (isys->short_packet_source == IPU_ISYS_SHORT_PACKET_FROM_TUNIT) { mutex_lock(&isys->short_packet_tracing_mutex); isys->short_packet_tracing_count = 0; mutex_unlock(&isys->short_packet_tracing_mutex); } isys_setup_hw(isys); set_iwake_ltrdid(isys, 0, 0, LTR_ISYS_ON); return 0; } static int isys_runtime_pm_suspend(struct device *dev) { struct ipu_bus_device *adev = to_ipu_bus_device(dev); struct ipu_isys *isys = ipu_bus_get_drvdata(adev); unsigned long flags; if (!isys) return 0; spin_lock_irqsave(&isys->power_lock, flags); isys->power = 0; spin_unlock_irqrestore(&isys->power_lock, flags); ipu_trace_stop(dev); mutex_lock(&isys->mutex); isys->reset_needed = false; mutex_unlock(&isys->mutex); isys->phy_termcal_val = 0; #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0) cpu_latency_qos_update_request(&isys->pm_qos, PM_QOS_DEFAULT_VALUE); #else pm_qos_update_request(&isys->pm_qos, PM_QOS_DEFAULT_VALUE); #endif ipu_mmu_hw_cleanup(adev->mmu); set_iwake_ltrdid(isys, 0, 0, LTR_ISYS_OFF); return 0; } static int isys_suspend(struct device *dev) { struct ipu_bus_device *adev = to_ipu_bus_device(dev); struct ipu_isys *isys = ipu_bus_get_drvdata(adev); /* If stream is open, refuse to suspend */ if (isys->stream_opened) return -EBUSY; return 0; } static int isys_resume(struct device *dev) { return 0; } static const struct dev_pm_ops isys_pm_ops = { .runtime_suspend = isys_runtime_pm_suspend, .runtime_resume = isys_runtime_pm_resume, .suspend = isys_suspend, .resume = isys_resume, }; #define ISYS_PM_OPS (&isys_pm_ops) #else #define ISYS_PM_OPS NULL #endif static void isys_remove(struct ipu_bus_device *adev) { struct ipu_isys *isys = ipu_bus_get_drvdata(adev); struct ipu_device *isp = adev->isp; struct isys_fw_msgs *fwmsg, *safe; dev_info(&adev->dev, "removed\n"); #ifdef CONFIG_DEBUG_FS if (isp->ipu_dir) debugfs_remove_recursive(isys->debugfsdir); #endif list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist, head) { dma_free_attrs(&adev->dev, sizeof(struct isys_fw_msgs), fwmsg, fwmsg->dma_addr, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) NULL); #else 0); #endif } list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist_fw, head) { dma_free_attrs(&adev->dev, sizeof(struct isys_fw_msgs), fwmsg, fwmsg->dma_addr, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) NULL #else 0 #endif ); } isys_iwake_watermark_cleanup(isys); ipu_trace_uninit(&adev->dev); isys_notifier_cleanup(isys); isys_unregister_devices(isys); #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0) cpu_latency_qos_remove_request(&isys->pm_qos); #else pm_qos_remove_request(&isys->pm_qos); #endif if (!isp->secure_mode) { ipu_cpd_free_pkg_dir(adev, isys->pkg_dir, isys->pkg_dir_dma_addr, isys->pkg_dir_size); ipu_buttress_unmap_fw_image(adev, &isys->fw_sgt); release_firmware(isys->fw); } mutex_destroy(&isys->stream_mutex); mutex_destroy(&isys->mutex); if (isys->short_packet_source == IPU_ISYS_SHORT_PACKET_FROM_TUNIT) { u32 trace_size = IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE; #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0) dma_free_coherent(&adev->dev, trace_size, isys->short_packet_trace_buffer, isys->short_packet_trace_buffer_dma_addr); #elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) struct dma_attrs attrs; init_dma_attrs(&attrs); dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); dma_free_attrs(&adev->dev, trace_size, isys->short_packet_trace_buffer, isys->short_packet_trace_buffer_dma_addr, &attrs); #else unsigned long attrs; attrs = DMA_ATTR_NON_CONSISTENT; dma_free_attrs(&adev->dev, trace_size, isys->short_packet_trace_buffer, isys->short_packet_trace_buffer_dma_addr, attrs); #endif } } #ifdef CONFIG_DEBUG_FS static int ipu_isys_icache_prefetch_get(void *data, u64 *val) { struct ipu_isys *isys = data; *val = isys->icache_prefetch; return 0; } static int ipu_isys_icache_prefetch_set(void *data, u64 val) { struct ipu_isys *isys = data; if (val != !!val) return -EINVAL; isys->icache_prefetch = val; return 0; } static int isys_iwake_control_get(void *data, u64 *val) { struct ipu_isys *isys = data; struct isys_iwake_watermark *iwake_watermark = isys->iwake_watermark; mutex_lock(&iwake_watermark->mutex); *val = isys->iwake_watermark->force_iwake_disable; mutex_unlock(&iwake_watermark->mutex); return 0; } static int isys_iwake_control_set(void *data, u64 val) { struct ipu_isys *isys = data; struct isys_iwake_watermark *iwake_watermark; if (val != !!val) return -EINVAL; /* If stream is open, refuse to set iwake */ if (isys->stream_opened) return -EBUSY; iwake_watermark = isys->iwake_watermark; mutex_lock(&iwake_watermark->mutex); isys->iwake_watermark->force_iwake_disable = !!val; mutex_unlock(&iwake_watermark->mutex); return 0; } DEFINE_SIMPLE_ATTRIBUTE(isys_icache_prefetch_fops, ipu_isys_icache_prefetch_get, ipu_isys_icache_prefetch_set, "%llu\n"); DEFINE_SIMPLE_ATTRIBUTE(isys_iwake_control_fops, isys_iwake_control_get, isys_iwake_control_set, "%llu\n"); static int ipu_isys_init_debugfs(struct ipu_isys *isys) { struct dentry *file; struct dentry *dir; #ifdef IPU_ISYS_GPC int ret; #endif dir = debugfs_create_dir("isys", isys->adev->isp->ipu_dir); if (IS_ERR(dir)) return -ENOMEM; file = debugfs_create_file("icache_prefetch", 0600, dir, isys, &isys_icache_prefetch_fops); if (IS_ERR(file)) goto err; file = debugfs_create_file("iwake_disable", 0600, dir, isys, &isys_iwake_control_fops); if (IS_ERR(file)) goto err; isys->debugfsdir = dir; #ifdef IPU_ISYS_GPC ret = ipu_isys_gpc_init_debugfs(isys); if (ret) return ret; #endif return 0; err: debugfs_remove_recursive(dir); return -ENOMEM; } #endif static int alloc_fw_msg_bufs(struct ipu_isys *isys, int amount) { dma_addr_t dma_addr; struct isys_fw_msgs *addr; unsigned int i; unsigned long flags; for (i = 0; i < amount; i++) { addr = dma_alloc_attrs(&isys->adev->dev, sizeof(struct isys_fw_msgs), &dma_addr, GFP_KERNEL, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) NULL); #else 0); #endif if (!addr) break; addr->dma_addr = dma_addr; spin_lock_irqsave(&isys->listlock, flags); list_add(&addr->head, &isys->framebuflist); spin_unlock_irqrestore(&isys->listlock, flags); } if (i == amount) return 0; spin_lock_irqsave(&isys->listlock, flags); while (!list_empty(&isys->framebuflist)) { addr = list_first_entry(&isys->framebuflist, struct isys_fw_msgs, head); list_del(&addr->head); spin_unlock_irqrestore(&isys->listlock, flags); dma_free_attrs(&isys->adev->dev, sizeof(struct isys_fw_msgs), addr, addr->dma_addr, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) NULL); #else 0); #endif spin_lock_irqsave(&isys->listlock, flags); } spin_unlock_irqrestore(&isys->listlock, flags); return -ENOMEM; } struct isys_fw_msgs *ipu_get_fw_msg_buf(struct ipu_isys_pipeline *ip) { struct ipu_isys_video *pipe_av = container_of(ip, struct ipu_isys_video, ip); struct ipu_isys *isys; struct isys_fw_msgs *msg; unsigned long flags; isys = pipe_av->isys; spin_lock_irqsave(&isys->listlock, flags); if (list_empty(&isys->framebuflist)) { spin_unlock_irqrestore(&isys->listlock, flags); dev_dbg(&isys->adev->dev, "Frame list empty - Allocate more"); alloc_fw_msg_bufs(isys, 5); spin_lock_irqsave(&isys->listlock, flags); if (list_empty(&isys->framebuflist)) { spin_unlock_irqrestore(&isys->listlock, flags); dev_err(&isys->adev->dev, "Frame list empty"); return NULL; } } msg = list_last_entry(&isys->framebuflist, struct isys_fw_msgs, head); list_move(&msg->head, &isys->framebuflist_fw); spin_unlock_irqrestore(&isys->listlock, flags); memset(&msg->fw_msg, 0, sizeof(msg->fw_msg)); return msg; } void ipu_cleanup_fw_msg_bufs(struct ipu_isys *isys) { struct isys_fw_msgs *fwmsg, *fwmsg0; unsigned long flags; spin_lock_irqsave(&isys->listlock, flags); list_for_each_entry_safe(fwmsg, fwmsg0, &isys->framebuflist_fw, head) list_move(&fwmsg->head, &isys->framebuflist); spin_unlock_irqrestore(&isys->listlock, flags); } void ipu_put_fw_mgs_buf(struct ipu_isys *isys, u64 data) { struct isys_fw_msgs *msg; unsigned long flags; u64 *ptr = (u64 *)(unsigned long)data; if (!ptr) return; spin_lock_irqsave(&isys->listlock, flags); msg = container_of(ptr, struct isys_fw_msgs, fw_msg.dummy); list_move(&msg->head, &isys->framebuflist); spin_unlock_irqrestore(&isys->listlock, flags); } static int isys_probe(struct ipu_bus_device *adev) { struct ipu_isys *isys; struct ipu_device *isp = adev->isp; const struct firmware *fw; int rval = 0; isys = devm_kzalloc(&adev->dev, sizeof(*isys), GFP_KERNEL); if (!isys) return -ENOMEM; rval = ipu_mmu_hw_init(adev->mmu); if (rval) return rval; /* By default, short packet is captured from T-Unit. */ isys->short_packet_source = IPU_ISYS_SHORT_PACKET_FROM_RECEIVER; isys->adev = adev; isys->pdata = adev->pdata; /* initial streamID for different sensor types */ if (ipu_ver == IPU_VER_6 || ipu_ver == IPU_VER_6EP || ipu_ver == IPU_VER_6EP_MTL) { isys->sensor_info.vc1_data_start = IPU6_FW_ISYS_VC1_SENSOR_DATA_START; isys->sensor_info.vc1_data_end = IPU6_FW_ISYS_VC1_SENSOR_DATA_END; isys->sensor_info.vc0_data_start = IPU6_FW_ISYS_VC0_SENSOR_DATA_START; isys->sensor_info.vc0_data_end = IPU6_FW_ISYS_VC0_SENSOR_DATA_END; isys->sensor_info.vc1_pdaf_start = IPU6_FW_ISYS_VC1_SENSOR_PDAF_START; isys->sensor_info.vc1_pdaf_end = IPU6_FW_ISYS_VC1_SENSOR_PDAF_END; isys->sensor_info.sensor_metadata = IPU6_FW_ISYS_SENSOR_METADATA; isys->sensor_types[IPU_FW_ISYS_VC1_SENSOR_DATA] = IPU6_FW_ISYS_VC1_SENSOR_DATA_START; isys->sensor_types[IPU_FW_ISYS_VC1_SENSOR_PDAF] = IPU6_FW_ISYS_VC1_SENSOR_PDAF_START; isys->sensor_types[IPU_FW_ISYS_VC0_SENSOR_DATA] = IPU6_FW_ISYS_VC0_SENSOR_DATA_START; } else if (ipu_ver == IPU_VER_6SE) { isys->sensor_info.vc1_data_start = IPU6SE_FW_ISYS_VC1_SENSOR_DATA_START; isys->sensor_info.vc1_data_end = IPU6SE_FW_ISYS_VC1_SENSOR_DATA_END; isys->sensor_info.vc0_data_start = IPU6SE_FW_ISYS_VC0_SENSOR_DATA_START; isys->sensor_info.vc0_data_end = IPU6SE_FW_ISYS_VC0_SENSOR_DATA_END; isys->sensor_info.vc1_pdaf_start = IPU6SE_FW_ISYS_VC1_SENSOR_PDAF_START; isys->sensor_info.vc1_pdaf_end = IPU6SE_FW_ISYS_VC1_SENSOR_PDAF_END; isys->sensor_info.sensor_metadata = IPU6SE_FW_ISYS_SENSOR_METADATA; isys->sensor_types[IPU_FW_ISYS_VC1_SENSOR_DATA] = IPU6SE_FW_ISYS_VC1_SENSOR_DATA_START; isys->sensor_types[IPU_FW_ISYS_VC1_SENSOR_PDAF] = IPU6SE_FW_ISYS_VC1_SENSOR_PDAF_START; isys->sensor_types[IPU_FW_ISYS_VC0_SENSOR_DATA] = IPU6SE_FW_ISYS_VC0_SENSOR_DATA_START; } INIT_LIST_HEAD(&isys->requests); spin_lock_init(&isys->lock); spin_lock_init(&isys->power_lock); isys->power = 0; isys->phy_termcal_val = 0; mutex_init(&isys->mutex); mutex_init(&isys->stream_mutex); mutex_init(&isys->lib_mutex); spin_lock_init(&isys->listlock); INIT_LIST_HEAD(&isys->framebuflist); INIT_LIST_HEAD(&isys->framebuflist_fw); dev_dbg(&adev->dev, "isys probe %p %p\n", adev, &adev->dev); ipu_bus_set_drvdata(adev, isys); isys->line_align = IPU_ISYS_2600_MEM_LINE_ALIGN; isys->icache_prefetch = 0; #ifndef CONFIG_PM isys_setup_hw(isys); #endif if (!isp->secure_mode) { fw = isp->cpd_fw; rval = ipu_buttress_map_fw_image(adev, fw, &isys->fw_sgt); if (rval) goto release_firmware; isys->pkg_dir = ipu_cpd_create_pkg_dir(adev, isp->cpd_fw->data, sg_dma_address(isys->fw_sgt.sgl), &isys->pkg_dir_dma_addr, &isys->pkg_dir_size); if (!isys->pkg_dir) { rval = -ENOMEM; goto remove_shared_buffer; } } #ifdef CONFIG_DEBUG_FS /* Debug fs failure is not fatal. */ ipu_isys_init_debugfs(isys); #endif ipu_trace_init(adev->isp, isys->pdata->base, &adev->dev, isys_trace_blocks); #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0) cpu_latency_qos_add_request(&isys->pm_qos, PM_QOS_DEFAULT_VALUE); #else pm_qos_add_request(&isys->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); #endif alloc_fw_msg_bufs(isys, 20); rval = isys_register_devices(isys); if (rval) goto out_remove_pkg_dir_shared_buffer; rval = isys_iwake_watermark_init(isys); if (rval) goto out_unregister_devices; ipu_mmu_hw_cleanup(adev->mmu); return 0; out_unregister_devices: isys_iwake_watermark_cleanup(isys); isys_unregister_devices(isys); out_remove_pkg_dir_shared_buffer: cpu_latency_qos_remove_request(&isys->pm_qos); if (!isp->secure_mode) ipu_cpd_free_pkg_dir(adev, isys->pkg_dir, isys->pkg_dir_dma_addr, isys->pkg_dir_size); remove_shared_buffer: if (!isp->secure_mode) ipu_buttress_unmap_fw_image(adev, &isys->fw_sgt); release_firmware: if (!isp->secure_mode) release_firmware(isys->fw); ipu_trace_uninit(&adev->dev); mutex_destroy(&isys->mutex); mutex_destroy(&isys->stream_mutex); if (isys->short_packet_source == IPU_ISYS_SHORT_PACKET_FROM_TUNIT) mutex_destroy(&isys->short_packet_tracing_mutex); ipu_mmu_hw_cleanup(adev->mmu); return rval; } struct fwmsg { int type; char *msg; bool valid_ts; }; static const struct fwmsg fw_msg[] = { {IPU_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE, "STREAM_OPEN_DONE", 0}, {IPU_FW_ISYS_RESP_TYPE_STREAM_CLOSE_ACK, "STREAM_CLOSE_ACK", 0}, {IPU_FW_ISYS_RESP_TYPE_STREAM_START_ACK, "STREAM_START_ACK", 0}, {IPU_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK, "STREAM_START_AND_CAPTURE_ACK", 0}, {IPU_FW_ISYS_RESP_TYPE_STREAM_STOP_ACK, "STREAM_STOP_ACK", 0}, {IPU_FW_ISYS_RESP_TYPE_STREAM_FLUSH_ACK, "STREAM_FLUSH_ACK", 0}, {IPU_FW_ISYS_RESP_TYPE_PIN_DATA_READY, "PIN_DATA_READY", 1}, {IPU_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK, "STREAM_CAPTURE_ACK", 0}, {IPU_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE, "STREAM_START_AND_CAPTURE_DONE", 1}, {IPU_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE, "STREAM_CAPTURE_DONE", 1}, {IPU_FW_ISYS_RESP_TYPE_FRAME_SOF, "FRAME_SOF", 1}, {IPU_FW_ISYS_RESP_TYPE_FRAME_EOF, "FRAME_EOF", 1}, {IPU_FW_ISYS_RESP_TYPE_STATS_DATA_READY, "STATS_READY", 1}, {-1, "UNKNOWN MESSAGE", 0}, }; static int resp_type_to_index(int type) { unsigned int i; for (i = 0; i < ARRAY_SIZE(fw_msg); i++) if (fw_msg[i].type == type) return i; return i - 1; } int isys_isr_one(struct ipu_bus_device *adev) { struct ipu_isys *isys = ipu_bus_get_drvdata(adev); struct ipu_fw_isys_resp_info_abi resp_data; struct ipu_fw_isys_resp_info_abi *resp; struct ipu_isys_pipeline *pipe; u64 ts; unsigned int i; if (!isys->fwcom) return 0; resp = ipu_fw_isys_get_resp(isys->fwcom, IPU_BASE_MSG_RECV_QUEUES, &resp_data); if (!resp) return 1; ts = (u64)resp->timestamp[1] << 32 | resp->timestamp[0]; if (resp->error_info.error == IPU_FW_ISYS_ERROR_STREAM_IN_SUSPENSION) /* Suspension is kind of special case: not enough buffers */ dev_dbg(&adev->dev, "hostlib: error resp %02d %s, stream %u, error SUSPENSION, details %d, timestamp 0x%16.16llx, pin %d\n", resp->type, fw_msg[resp_type_to_index(resp->type)].msg, resp->stream_handle, resp->error_info.error_details, fw_msg[resp_type_to_index(resp->type)].valid_ts ? ts : 0, resp->pin_id); else if (resp->error_info.error) dev_dbg(&adev->dev, "hostlib: error resp %02d %s, stream %u, error %d, details %d, timestamp 0x%16.16llx, pin %d\n", resp->type, fw_msg[resp_type_to_index(resp->type)].msg, resp->stream_handle, resp->error_info.error, resp->error_info.error_details, fw_msg[resp_type_to_index(resp->type)].valid_ts ? ts : 0, resp->pin_id); else dev_dbg(&adev->dev, "hostlib: resp %02d %s, stream %u, timestamp 0x%16.16llx, pin %d\n", resp->type, fw_msg[resp_type_to_index(resp->type)].msg, resp->stream_handle, fw_msg[resp_type_to_index(resp->type)].valid_ts ? ts : 0, resp->pin_id); if (resp->stream_handle >= IPU_ISYS_MAX_STREAMS) { dev_err(&adev->dev, "bad stream handle %u\n", resp->stream_handle); goto leave; } pipe = isys->pipes[resp->stream_handle]; if (!pipe) { dev_err(&adev->dev, "no pipeline for stream %u\n", resp->stream_handle); goto leave; } pipe->error = resp->error_info.error; switch (resp->type) { case IPU_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE: complete(&pipe->stream_open_completion); break; case IPU_FW_ISYS_RESP_TYPE_STREAM_CLOSE_ACK: complete(&pipe->stream_close_completion); break; case IPU_FW_ISYS_RESP_TYPE_STREAM_START_ACK: complete(&pipe->stream_start_completion); break; case IPU_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK: complete(&pipe->stream_start_completion); break; case IPU_FW_ISYS_RESP_TYPE_STREAM_STOP_ACK: complete(&pipe->stream_stop_completion); break; case IPU_FW_ISYS_RESP_TYPE_STREAM_FLUSH_ACK: complete(&pipe->stream_stop_completion); break; case IPU_FW_ISYS_RESP_TYPE_PIN_DATA_READY: /* * firmware only release the capture msg until software * get pin_data_ready event */ ipu_put_fw_mgs_buf(ipu_bus_get_drvdata(adev), resp->buf_id); if (resp->pin_id < IPU_ISYS_OUTPUT_PINS && pipe->output_pins[resp->pin_id].pin_ready) pipe->output_pins[resp->pin_id].pin_ready(pipe, resp); else dev_err(&adev->dev, "%d:No data pin ready handler for pin id %d\n", resp->stream_handle, resp->pin_id); if (pipe->csi2) ipu_isys_csi2_error(pipe->csi2); break; case IPU_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK: break; case IPU_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE: case IPU_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE: if (pipe->interlaced) { struct ipu_isys_buffer *ib, *ib_safe; struct list_head list; unsigned long flags; unsigned int *ts = resp->timestamp; if (pipe->isys->short_packet_source == IPU_ISYS_SHORT_PACKET_FROM_TUNIT) pipe->cur_field = ipu_isys_csi2_get_current_field(pipe, ts); /* * Move the pending buffers to a local temp list. * Then we do not need to handle the lock during * the loop. */ spin_lock_irqsave(&pipe->short_packet_queue_lock, flags); list_cut_position(&list, &pipe->pending_interlaced_bufs, pipe->pending_interlaced_bufs.prev); spin_unlock_irqrestore(&pipe->short_packet_queue_lock, flags); list_for_each_entry_safe(ib, ib_safe, &list, head) { struct vb2_buffer *vb; vb = ipu_isys_buffer_to_vb2_buffer(ib); #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) vb->v4l2_buf.field = pipe->cur_field; #else to_vb2_v4l2_buffer(vb)->field = pipe->cur_field; #endif list_del(&ib->head); ipu_isys_queue_buf_done(ib); } } for (i = 0; i < IPU_NUM_CAPTURE_DONE; i++) if (pipe->capture_done[i]) pipe->capture_done[i] (pipe, resp); break; case IPU_FW_ISYS_RESP_TYPE_FRAME_SOF: if (pipe->csi2) ipu_isys_csi2_sof_event(pipe->csi2); pipe->seq[pipe->seq_index].sequence = atomic_read(&pipe->sequence) - 1; pipe->seq[pipe->seq_index].timestamp = ts; dev_dbg(&adev->dev, "sof: handle %d: (index %u), timestamp 0x%16.16llx\n", resp->stream_handle, pipe->seq[pipe->seq_index].sequence, ts); pipe->seq_index = (pipe->seq_index + 1) % IPU_ISYS_MAX_PARALLEL_SOF; break; case IPU_FW_ISYS_RESP_TYPE_FRAME_EOF: if (pipe->csi2) ipu_isys_csi2_eof_event(pipe->csi2); dev_dbg(&adev->dev, "eof: handle %d: (index %u), timestamp 0x%16.16llx\n", resp->stream_handle, pipe->seq[pipe->seq_index].sequence, ts); break; case IPU_FW_ISYS_RESP_TYPE_STATS_DATA_READY: break; default: dev_err(&adev->dev, "%d:unknown response type %u\n", resp->stream_handle, resp->type); break; } leave: ipu_fw_isys_put_resp(isys->fwcom, IPU_BASE_MSG_RECV_QUEUES); return 0; } static struct ipu_bus_driver isys_driver = { .probe = isys_probe, .remove = isys_remove, .isr = isys_isr, .wanted = IPU_ISYS_NAME, .drv = { .name = IPU_ISYS_NAME, .owner = THIS_MODULE, .pm = ISYS_PM_OPS, }, }; module_ipu_bus_driver(isys_driver); static const struct pci_device_id ipu_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6_PCI_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6SE_PCI_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6EP_ADL_P_PCI_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6EP_ADL_N_PCI_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6EP_RPL_P_PCI_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6EP_MTL_PCI_ID)}, {0,} }; MODULE_DEVICE_TABLE(pci, ipu_pci_tbl); MODULE_AUTHOR("Sakari Ailus "); MODULE_AUTHOR("Samu Onkalo "); MODULE_AUTHOR("Jouni Högander "); MODULE_AUTHOR("Jouni Ukkonen "); MODULE_AUTHOR("Jianxu Zheng "); MODULE_AUTHOR("Tianshu Qiu "); MODULE_AUTHOR("Renwei Wu "); MODULE_AUTHOR("Bingbu Cao "); MODULE_AUTHOR("Yunliang Ding "); MODULE_AUTHOR("Zaikuo Wang "); MODULE_AUTHOR("Leifu Zhao "); MODULE_AUTHOR("Xia Wu "); MODULE_AUTHOR("Kun Jiang "); MODULE_AUTHOR("Yu Xia "); MODULE_AUTHOR("Jerry Hu "); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Intel ipu input system driver"); #if IS_ENABLED(CONFIG_IPU_BRIDGE) MODULE_IMPORT_NS(INTEL_IPU_BRIDGE); #endif ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu-isys.h000066400000000000000000000151161471702545500252730ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2013 - 2024 Intel Corporation */ #ifndef IPU_ISYS_H #define IPU_ISYS_H #include #include #include #include #include #include "ipu.h" #include "ipu-isys-media.h" #include "ipu-isys-csi2.h" #include "ipu-isys-csi2-be.h" #include "ipu-isys-video.h" #include "ipu-pdata.h" #include "ipu-fw-isys.h" #include "ipu-platform-isys.h" #define IPU_ISYS_2600_MEM_LINE_ALIGN 64 /* for TPG */ #define IPU_ISYS_FREQ 533000000UL /* * Current message queue configuration. These must be big enough * so that they never gets full. Queues are located in system memory */ #define IPU_ISYS_SIZE_RECV_QUEUE 40 #define IPU_ISYS_SIZE_SEND_QUEUE 40 #define IPU_ISYS_SIZE_PROXY_RECV_QUEUE 5 #define IPU_ISYS_SIZE_PROXY_SEND_QUEUE 5 #define IPU_ISYS_NUM_RECV_QUEUE 1 /* * Device close takes some time from last ack message to actual stopping * of the SP processor. As long as the SP processor runs we can't proceed with * clean up of resources. */ #define IPU_ISYS_OPEN_TIMEOUT_US 1000 #define IPU_ISYS_OPEN_RETRY 1000 #define IPU_ISYS_TURNOFF_DELAY_US 1000 #define IPU_ISYS_TURNOFF_TIMEOUT 1000 #define IPU_LIB_CALL_TIMEOUT_JIFFIES \ msecs_to_jiffies(IPU_LIB_CALL_TIMEOUT_MS) #define IPU_ISYS_CSI2_LONG_PACKET_HEADER_SIZE 32 #define IPU_ISYS_CSI2_LONG_PACKET_FOOTER_SIZE 32 #define IPU_ISYS_MIN_WIDTH 1U #define IPU_ISYS_MIN_HEIGHT 1U #define IPU_ISYS_MAX_WIDTH 16384U #define IPU_ISYS_MAX_HEIGHT 16384U #ifdef CONFIG_IPU_SINGLE_BE_SOC_DEVICE #define NR_OF_CSI2_BE_SOC_DEV 1 #else #define NR_OF_CSI2_BE_SOC_DEV 8 #endif /* the threshold granularity is 2KB on IPU6 */ #define IPU6_SRAM_GRANULRITY_SHIFT 11 #define IPU6_SRAM_GRANULRITY_SIZE 2048 /* the threshold granularity is 1KB on IPU6SE */ #define IPU6SE_SRAM_GRANULRITY_SHIFT 10 #define IPU6SE_SRAM_GRANULRITY_SIZE 1024 struct task_struct; struct ltr_did { union { u32 value; struct { u8 val0; u8 val1; u8 val2; u8 val3; } bits; } lut_ltr; union { u32 value; struct { u8 th0; u8 th1; u8 th2; u8 th3; } bits; } lut_fill_time; }; struct isys_iwake_watermark { bool iwake_enabled; bool force_iwake_disable; u32 iwake_threshold; u64 isys_pixelbuffer_datarate; struct ltr_did ltrdid; struct mutex mutex; /* protect whole struct */ struct ipu_isys *isys; struct list_head video_list; }; struct ipu_isys_sensor_info { unsigned int vc1_data_start; unsigned int vc1_data_end; unsigned int vc0_data_start; unsigned int vc0_data_end; unsigned int vc1_pdaf_start; unsigned int vc1_pdaf_end; unsigned int sensor_metadata; }; /* * struct ipu_isys * * @media_dev: Media device * @v4l2_dev: V4L2 device * @adev: ISYS bus device * @power: Is ISYS powered on or not? * @isr_bits: Which bits does the ISR handle? * @power_lock: Serialise access to power (power state in general) * @csi2_rx_ctrl_cached: cached shared value between all CSI2 receivers * @lock: serialise access to pipes * @pipes: pipelines per stream ID * @fwcom: fw communication layer private pointer * or optional external library private pointer * @line_align: line alignment in memory * @phy_termcal_val: the termination calibration value, only used for DWC PHY * @reset_needed: Isys requires d0i0->i3 transition * @video_opened: total number of opened file handles on video nodes * @mutex: serialise access isys video open/release related operations * @stream_mutex: serialise stream start and stop, queueing requests * @lib_mutex: optional external library mutex * @pdata: platform data pointer * @csi2: CSI-2 receivers * @csi2_be: CSI-2 back-ends * @fw: ISYS firmware binary (unsecure firmware) * @fw_sgt: fw scatterlist * @pkg_dir: host pointer to pkg_dir * @pkg_dir_dma_addr: I/O virtual address for pkg_dir * @pkg_dir_size: size of pkg_dir in bytes * @short_packet_source: select short packet capture mode */ struct ipu_isys { struct media_device media_dev; struct v4l2_device v4l2_dev; struct ipu_bus_device *adev; int power; spinlock_t power_lock; /* Serialise access to power */ u32 isr_csi2_bits; u32 csi2_rx_ctrl_cached; spinlock_t lock; /* Serialise access to pipes */ struct ipu_isys_pipeline *pipes[IPU_ISYS_MAX_STREAMS]; void *fwcom; unsigned int line_align; u32 phy_termcal_val; bool reset_needed; bool icache_prefetch; bool csi2_cse_ipc_not_supported; unsigned int video_opened; unsigned int stream_opened; struct ipu_isys_sensor_info sensor_info; unsigned int sensor_types[N_IPU_FW_ISYS_SENSOR_TYPE]; #ifdef CONFIG_DEBUG_FS struct dentry *debugfsdir; #endif struct mutex mutex; /* Serialise isys video open/release related */ struct mutex stream_mutex; /* Stream start, stop, queueing reqs */ struct mutex lib_mutex; /* Serialise optional external library mutex */ struct ipu_isys_pdata *pdata; struct ipu_isys_csi2 *csi2; struct ipu_isys_csi2_be csi2_be; struct ipu_isys_csi2_be_soc csi2_be_soc[NR_OF_CSI2_BE_SOC_DEV]; const struct firmware *fw; struct sg_table fw_sgt; u64 *pkg_dir; dma_addr_t pkg_dir_dma_addr; unsigned int pkg_dir_size; struct list_head requests; struct pm_qos_request pm_qos; unsigned int short_packet_source; struct ipu_isys_csi2_monitor_message *short_packet_trace_buffer; dma_addr_t short_packet_trace_buffer_dma_addr; unsigned int short_packet_tracing_count; struct mutex short_packet_tracing_mutex; /* For tracing count */ u64 tsc_timer_base; u64 tunit_timer_base; spinlock_t listlock; /* Protect framebuflist */ struct list_head framebuflist; struct list_head framebuflist_fw; struct v4l2_async_notifier notifier; struct isys_iwake_watermark *iwake_watermark; }; void update_watermark_setting(struct ipu_isys *isys); struct isys_fw_msgs { union { u64 dummy; struct ipu_fw_isys_frame_buff_set_abi frame; struct ipu_fw_isys_stream_cfg_data_abi stream; } fw_msg; struct list_head head; dma_addr_t dma_addr; }; #define to_frame_msg_buf(a) (&(a)->fw_msg.frame) #define to_stream_cfg_msg_buf(a) (&(a)->fw_msg.stream) #define to_dma_addr(a) ((a)->dma_addr) #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) int ipu_pipeline_pm_use(struct media_entity *entity, int use); #endif struct isys_fw_msgs *ipu_get_fw_msg_buf(struct ipu_isys_pipeline *ip); void ipu_put_fw_mgs_buf(struct ipu_isys *isys, u64 data); void ipu_cleanup_fw_msg_bufs(struct ipu_isys *isys); extern const struct v4l2_ioctl_ops ipu_isys_ioctl_ops; void isys_setup_hw(struct ipu_isys *isys); int isys_isr_one(struct ipu_bus_device *adev); irqreturn_t isys_isr(struct ipu_bus_device *adev); #ifdef IPU_ISYS_GPC int ipu_isys_gpc_init_debugfs(struct ipu_isys *isys); #endif #endif /* IPU_ISYS_H */ ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu-mmu.c000066400000000000000000000503721471702545500251000ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2013 - 2024 Intel Corporation #include #include #include #include #include #include "ipu.h" #include "ipu-platform.h" #include "ipu-dma.h" #include "ipu-mmu.h" #include "ipu-platform-regs.h" #define ISP_PAGE_SHIFT 12 #define ISP_PAGE_SIZE BIT(ISP_PAGE_SHIFT) #define ISP_PAGE_MASK (~(ISP_PAGE_SIZE - 1)) #define ISP_L1PT_SHIFT 22 #define ISP_L1PT_MASK (~((1U << ISP_L1PT_SHIFT) - 1)) #define ISP_L2PT_SHIFT 12 #define ISP_L2PT_MASK (~(ISP_L1PT_MASK | (~(ISP_PAGE_MASK)))) #define ISP_L1PT_PTES 1024 #define ISP_L2PT_PTES 1024 #define ISP_PADDR_SHIFT 12 #define REG_TLB_INVALIDATE 0x0000 #define REG_L1_PHYS 0x0004 /* 27-bit pfn */ #define REG_INFO 0x0008 /* The range of stream ID i in L1 cache is from 0 to 15 */ #define MMUV2_REG_L1_STREAMID(i) (0x0c + ((i) * 4)) /* The range of stream ID i in L2 cache is from 0 to 15 */ #define MMUV2_REG_L2_STREAMID(i) (0x4c + ((i) * 4)) #define TBL_PHYS_ADDR(a) ((phys_addr_t)(a) << ISP_PADDR_SHIFT) static void tlb_invalidate(struct ipu_mmu *mmu) { unsigned int i; unsigned long flags; spin_lock_irqsave(&mmu->ready_lock, flags); if (!mmu->ready) { spin_unlock_irqrestore(&mmu->ready_lock, flags); return; } for (i = 0; i < mmu->nr_mmus; i++) { /* * To avoid the HW bug induced dead lock in some of the IPU * MMUs on successive invalidate calls, we need to first do a * read to the page table base before writing the invalidate * register. MMUs which need to implement this WA, will have * the insert_read_before_invalidate flags set as true. * Disregard the return value of the read. */ if (mmu->mmu_hw[i].insert_read_before_invalidate) readl(mmu->mmu_hw[i].base + REG_L1_PHYS); writel(0xffffffff, mmu->mmu_hw[i].base + REG_TLB_INVALIDATE); /* * The TLB invalidation is a "single cycle" (IOMMU clock cycles) * When the actual MMIO write reaches the IPU TLB Invalidate * register, wmb() will force the TLB invalidate out if the CPU * attempts to update the IOMMU page table (or sooner). */ wmb(); } spin_unlock_irqrestore(&mmu->ready_lock, flags); } #ifdef DEBUG static void page_table_dump(struct ipu_mmu_info *mmu_info) { u32 l1_idx; dev_dbg(mmu_info->dev, "begin IOMMU page table dump\n"); for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++) { u32 l2_idx; u32 iova = (phys_addr_t)l1_idx << ISP_L1PT_SHIFT; if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval) continue; dev_dbg(mmu_info->dev, "l1 entry %u; iovas 0x%8.8x-0x%8.8x, at %p\n", l1_idx, iova, iova + ISP_PAGE_SIZE, (void *)TBL_PHYS_ADDR(mmu_info->l1_pt[l1_idx])); for (l2_idx = 0; l2_idx < ISP_L2PT_PTES; l2_idx++) { u32 *l2_pt = mmu_info->l2_pts[l1_idx]; u32 iova2 = iova + (l2_idx << ISP_L2PT_SHIFT); if (l2_pt[l2_idx] == mmu_info->dummy_page_pteval) continue; dev_dbg(mmu_info->dev, "\tl2 entry %u; iova 0x%8.8x, phys %p\n", l2_idx, iova2, (void *)TBL_PHYS_ADDR(l2_pt[l2_idx])); } } dev_dbg(mmu_info->dev, "end IOMMU page table dump\n"); } #endif /* DEBUG */ static dma_addr_t map_single(struct ipu_mmu_info *mmu_info, void *ptr) { dma_addr_t dma; dma = dma_map_single(mmu_info->dev, ptr, PAGE_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(mmu_info->dev, dma)) return 0; return dma; } static int get_dummy_page(struct ipu_mmu_info *mmu_info) { dma_addr_t dma; void *pt = (void *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); if (!pt) return -ENOMEM; dev_dbg(mmu_info->dev, "%s get_zeroed_page() == %p\n", __func__, pt); dma = map_single(mmu_info, pt); if (!dma) { dev_err(mmu_info->dev, "Failed to map dummy page\n"); goto err_free_page; } mmu_info->dummy_page = pt; mmu_info->dummy_page_pteval = dma >> ISP_PAGE_SHIFT; return 0; err_free_page: free_page((unsigned long)pt); return -ENOMEM; } static void free_dummy_page(struct ipu_mmu_info *mmu_info) { dma_unmap_single(mmu_info->dev, TBL_PHYS_ADDR(mmu_info->dummy_page_pteval), PAGE_SIZE, DMA_BIDIRECTIONAL); free_page((unsigned long)mmu_info->dummy_page); } static int alloc_dummy_l2_pt(struct ipu_mmu_info *mmu_info) { dma_addr_t dma; u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); int i; if (!pt) return -ENOMEM; dev_dbg(mmu_info->dev, "%s get_zeroed_page() == %p\n", __func__, pt); dma = map_single(mmu_info, pt); if (!dma) { dev_err(mmu_info->dev, "Failed to map l2pt page\n"); goto err_free_page; } for (i = 0; i < ISP_L2PT_PTES; i++) pt[i] = mmu_info->dummy_page_pteval; mmu_info->dummy_l2_pt = pt; mmu_info->dummy_l2_pteval = dma >> ISP_PAGE_SHIFT; return 0; err_free_page: free_page((unsigned long)pt); return -ENOMEM; } static void free_dummy_l2_pt(struct ipu_mmu_info *mmu_info) { dma_unmap_single(mmu_info->dev, TBL_PHYS_ADDR(mmu_info->dummy_l2_pteval), PAGE_SIZE, DMA_BIDIRECTIONAL); free_page((unsigned long)mmu_info->dummy_l2_pt); } static u32 *alloc_l1_pt(struct ipu_mmu_info *mmu_info) { dma_addr_t dma; u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); int i; if (!pt) return NULL; dev_dbg(mmu_info->dev, "%s get_zeroed_page() == %p\n", __func__, pt); for (i = 0; i < ISP_L1PT_PTES; i++) pt[i] = mmu_info->dummy_l2_pteval; dma = map_single(mmu_info, pt); if (!dma) { dev_err(mmu_info->dev, "Failed to map l1pt page\n"); goto err_free_page; } mmu_info->l1_pt_dma = dma >> ISP_PADDR_SHIFT; dev_dbg(mmu_info->dev, "l1 pt %p mapped at %llx\n", pt, dma); return pt; err_free_page: free_page((unsigned long)pt); return NULL; } static u32 *alloc_l2_pt(struct ipu_mmu_info *mmu_info) { u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); int i; if (!pt) return NULL; dev_dbg(mmu_info->dev, "%s get_zeroed_page() == %p\n", __func__, pt); for (i = 0; i < ISP_L1PT_PTES; i++) pt[i] = mmu_info->dummy_page_pteval; return pt; } static void l2_unmap(struct ipu_mmu_info *mmu_info, unsigned long iova, phys_addr_t dummy, size_t size); static int l2_map(struct ipu_mmu_info *mmu_info, unsigned long iova, phys_addr_t paddr, size_t size) { struct device *dev = mmu_info->dev; u32 l1_idx; u32 l1_entry; u32 *l2_pt, *l2_virt; unsigned int l2_idx; unsigned long flags; dma_addr_t dma; unsigned int l2_entries; size_t mapped = 0; int err = 0; spin_lock_irqsave(&mmu_info->lock, flags); paddr = ALIGN(paddr, ISP_PAGE_SIZE); for (l1_idx = iova >> ISP_L1PT_SHIFT; size > 0 && l1_idx < ISP_L1PT_PTES; l1_idx++) { dev_dbg(dev, "mapping l2 page table for l1 index %u (iova %8.8x)\n", l1_idx, (u32)iova); l1_entry = mmu_info->l1_pt[l1_idx]; if (l1_entry == mmu_info->dummy_l2_pteval) { l2_virt = mmu_info->l2_pts[l1_idx]; if (likely(!l2_virt)) { l2_virt = alloc_l2_pt(mmu_info); if (!l2_virt) { err = -ENOMEM; goto error; } } dma = map_single(mmu_info, l2_virt); if (!dma) { dev_err(dev, "Failed to map l2pt page\n"); free_page((unsigned long)l2_virt); err = -EINVAL; goto error; } l1_entry = dma >> ISP_PADDR_SHIFT; dev_dbg(dev, "page for l1_idx %u %p allocated\n", l1_idx, l2_virt); mmu_info->l1_pt[l1_idx] = l1_entry; mmu_info->l2_pts[l1_idx] = l2_virt; clflush_cache_range(&mmu_info->l1_pt[l1_idx], sizeof(mmu_info->l1_pt[l1_idx])); } l2_pt = mmu_info->l2_pts[l1_idx]; l2_entries = 0; for (l2_idx = (iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT; size > 0 && l2_idx < ISP_L2PT_PTES; l2_idx++) { l2_pt[l2_idx] = paddr >> ISP_PADDR_SHIFT; dev_dbg(dev, "l2 index %u mapped as 0x%8.8x\n", l2_idx, l2_pt[l2_idx]); iova += ISP_PAGE_SIZE; paddr += ISP_PAGE_SIZE; mapped += ISP_PAGE_SIZE; size -= ISP_PAGE_SIZE; l2_entries++; } WARN_ON_ONCE(!l2_entries); clflush_cache_range(&l2_pt[l2_idx - l2_entries], sizeof(l2_pt[0]) * l2_entries); } spin_unlock_irqrestore(&mmu_info->lock, flags); return 0; error: spin_unlock_irqrestore(&mmu_info->lock, flags); /* unroll mapping in case something went wrong */ if (mapped) l2_unmap(mmu_info, iova - mapped, paddr - mapped, mapped); return err; } static int __ipu_mmu_map(struct ipu_mmu_info *mmu_info, unsigned long iova, phys_addr_t paddr, size_t size) { u32 iova_start = round_down(iova, ISP_PAGE_SIZE); u32 iova_end = ALIGN(iova + size, ISP_PAGE_SIZE); dev_dbg(mmu_info->dev, "mapping iova 0x%8.8x--0x%8.8x, size %zu at paddr 0x%10.10llx\n", iova_start, iova_end, size, paddr); return l2_map(mmu_info, iova_start, paddr, size); } static void l2_unmap(struct ipu_mmu_info *mmu_info, unsigned long iova, phys_addr_t dummy, size_t size) { u32 l1_idx; u32 *l2_pt; unsigned int l2_idx; unsigned int l2_entries; size_t unmapped = 0; unsigned long flags; spin_lock_irqsave(&mmu_info->lock, flags); for (l1_idx = iova >> ISP_L1PT_SHIFT; size > 0 && l1_idx < ISP_L1PT_PTES; l1_idx++) { dev_dbg(mmu_info->dev, "unmapping l2 page table for l1 index %u (iova 0x%8.8lx)\n", l1_idx, iova); if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval) { dev_err(mmu_info->dev, "unmap iova 0x%8.8lx l1 idx %u which was not mapped\n", iova, l1_idx); continue; } l2_pt = mmu_info->l2_pts[l1_idx]; l2_entries = 0; for (l2_idx = (iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT; size > 0 && l2_idx < ISP_L2PT_PTES; l2_idx++) { dev_dbg(mmu_info->dev, "unmap l2 index %u with pteval 0x%10.10llx\n", l2_idx, TBL_PHYS_ADDR(l2_pt[l2_idx])); l2_pt[l2_idx] = mmu_info->dummy_page_pteval; iova += ISP_PAGE_SIZE; unmapped += ISP_PAGE_SIZE; size -= ISP_PAGE_SIZE; l2_entries++; } WARN_ON_ONCE(!l2_entries); clflush_cache_range(&l2_pt[l2_idx - l2_entries], sizeof(l2_pt[0]) * l2_entries); } WARN_ON_ONCE(size); spin_unlock_irqrestore(&mmu_info->lock, flags); } static void __ipu_mmu_unmap(struct ipu_mmu_info *mmu_info, unsigned long iova, size_t size) { return l2_unmap(mmu_info, iova, 0, size); } static int allocate_trash_buffer(struct ipu_mmu *mmu) { unsigned int n_pages = PAGE_ALIGN(IPU_MMUV2_TRASH_RANGE) >> PAGE_SHIFT; struct iova *iova; u32 iova_addr; unsigned int i; dma_addr_t dma; int ret; /* Allocate 8MB in iova range */ iova = alloc_iova(&mmu->dmap->iovad, n_pages, mmu->dmap->mmu_info->aperture_end >> PAGE_SHIFT, 0); if (!iova) { dev_err(mmu->dev, "cannot allocate iova range for trash\n"); return -ENOMEM; } dma = dma_map_page(mmu->dmap->mmu_info->dev, mmu->trash_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(mmu->dmap->mmu_info->dev, dma)) { dev_err(mmu->dmap->mmu_info->dev, "Failed to map trash page\n"); ret = -ENOMEM; goto out_free_iova; } mmu->pci_trash_page = dma; /* * Map the 8MB iova address range to the same physical trash page * mmu->trash_page which is already reserved at the probe */ iova_addr = iova->pfn_lo; for (i = 0; i < n_pages; i++) { ret = ipu_mmu_map(mmu->dmap->mmu_info, iova_addr << PAGE_SHIFT, mmu->pci_trash_page, PAGE_SIZE); if (ret) { dev_err(mmu->dev, "mapping trash buffer range failed\n"); goto out_unmap; } iova_addr++; } mmu->iova_trash_page = iova->pfn_lo << PAGE_SHIFT; dev_dbg(mmu->dev, "iova trash buffer for MMUID: %d is %u\n", mmu->mmid, (unsigned int)mmu->iova_trash_page); return 0; out_unmap: ipu_mmu_unmap(mmu->dmap->mmu_info, iova->pfn_lo << PAGE_SHIFT, (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT); dma_unmap_page(mmu->dmap->mmu_info->dev, mmu->pci_trash_page, PAGE_SIZE, DMA_BIDIRECTIONAL); out_free_iova: __free_iova(&mmu->dmap->iovad, iova); return ret; } int ipu_mmu_hw_init(struct ipu_mmu *mmu) { unsigned int i; unsigned long flags; struct ipu_mmu_info *mmu_info; dev_dbg(mmu->dev, "mmu hw init\n"); mmu_info = mmu->dmap->mmu_info; /* Initialise the each MMU HW block */ for (i = 0; i < mmu->nr_mmus; i++) { struct ipu_mmu_hw *mmu_hw = &mmu->mmu_hw[i]; unsigned int j; u16 block_addr; /* Write page table address per MMU */ writel((phys_addr_t)mmu_info->l1_pt_dma, mmu->mmu_hw[i].base + REG_L1_PHYS); /* Set info bits per MMU */ writel(mmu->mmu_hw[i].info_bits, mmu->mmu_hw[i].base + REG_INFO); /* Configure MMU TLB stream configuration for L1 */ for (j = 0, block_addr = 0; j < mmu_hw->nr_l1streams; block_addr += mmu->mmu_hw[i].l1_block_sz[j], j++) { if (block_addr > IPU_MAX_LI_BLOCK_ADDR) { dev_err(mmu->dev, "invalid L1 configuration\n"); return -EINVAL; } /* Write block start address for each streams */ writel(block_addr, mmu_hw->base + mmu_hw->l1_stream_id_reg_offset + 4 * j); } /* Configure MMU TLB stream configuration for L2 */ for (j = 0, block_addr = 0; j < mmu_hw->nr_l2streams; block_addr += mmu->mmu_hw[i].l2_block_sz[j], j++) { if (block_addr > IPU_MAX_L2_BLOCK_ADDR) { dev_err(mmu->dev, "invalid L2 configuration\n"); return -EINVAL; } writel(block_addr, mmu_hw->base + mmu_hw->l2_stream_id_reg_offset + 4 * j); } } if (!mmu->trash_page) { int ret; mmu->trash_page = alloc_page(GFP_KERNEL); if (!mmu->trash_page) { dev_err(mmu->dev, "insufficient memory for trash buffer\n"); return -ENOMEM; } ret = allocate_trash_buffer(mmu); if (ret) { __free_page(mmu->trash_page); mmu->trash_page = NULL; dev_err(mmu->dev, "trash buffer allocation failed\n"); return ret; } } spin_lock_irqsave(&mmu->ready_lock, flags); mmu->ready = true; spin_unlock_irqrestore(&mmu->ready_lock, flags); return 0; } EXPORT_SYMBOL(ipu_mmu_hw_init); static struct ipu_mmu_info *ipu_mmu_alloc(struct ipu_device *isp) { struct ipu_mmu_info *mmu_info; int ret; mmu_info = kzalloc(sizeof(*mmu_info), GFP_KERNEL); if (!mmu_info) return NULL; mmu_info->aperture_start = 0; mmu_info->aperture_end = DMA_BIT_MASK(isp->secure_mode ? IPU_MMU_ADDRESS_BITS : IPU_MMU_ADDRESS_BITS_NON_SECURE); mmu_info->pgsize_bitmap = SZ_4K; mmu_info->dev = &isp->pdev->dev; ret = get_dummy_page(mmu_info); if (ret) goto err_free_info; ret = alloc_dummy_l2_pt(mmu_info); if (ret) goto err_free_dummy_page; mmu_info->l2_pts = vzalloc(ISP_L2PT_PTES * sizeof(*mmu_info->l2_pts)); if (!mmu_info->l2_pts) goto err_free_dummy_l2_pt; /* * We always map the L1 page table (a single page as well as * the L2 page tables). */ mmu_info->l1_pt = alloc_l1_pt(mmu_info); if (!mmu_info->l1_pt) goto err_free_l2_pts; spin_lock_init(&mmu_info->lock); dev_dbg(mmu_info->dev, "domain initialised\n"); return mmu_info; err_free_l2_pts: vfree(mmu_info->l2_pts); err_free_dummy_l2_pt: free_dummy_l2_pt(mmu_info); err_free_dummy_page: free_dummy_page(mmu_info); err_free_info: kfree(mmu_info); return NULL; } int ipu_mmu_hw_cleanup(struct ipu_mmu *mmu) { unsigned long flags; spin_lock_irqsave(&mmu->ready_lock, flags); mmu->ready = false; spin_unlock_irqrestore(&mmu->ready_lock, flags); return 0; } EXPORT_SYMBOL(ipu_mmu_hw_cleanup); static struct ipu_dma_mapping *alloc_dma_mapping(struct ipu_device *isp) { struct ipu_dma_mapping *dmap; dmap = kzalloc(sizeof(*dmap), GFP_KERNEL); if (!dmap) return NULL; dmap->mmu_info = ipu_mmu_alloc(isp); if (!dmap->mmu_info) { kfree(dmap); return NULL; } #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) init_iova_domain(&dmap->iovad, dmap->mmu_info->aperture_end >> PAGE_SHIFT); #elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) init_iova_domain(&dmap->iovad, SZ_4K, 1, dmap->mmu_info->aperture_end >> PAGE_SHIFT); #else init_iova_domain(&dmap->iovad, SZ_4K, 1); #endif dmap->mmu_info->dmap = dmap; kref_init(&dmap->ref); dev_dbg(&isp->pdev->dev, "alloc mapping\n"); iova_cache_get(); return dmap; } phys_addr_t ipu_mmu_iova_to_phys(struct ipu_mmu_info *mmu_info, dma_addr_t iova) { unsigned long flags; u32 *l2_pt; phys_addr_t phy_addr; spin_lock_irqsave(&mmu_info->lock, flags); l2_pt = mmu_info->l2_pts[iova >> ISP_L1PT_SHIFT]; phy_addr = (phys_addr_t)l2_pt[(iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT]; phy_addr <<= ISP_PAGE_SHIFT; spin_unlock_irqrestore(&mmu_info->lock, flags); return phy_addr; } /* drivers/iommu/iommu.c:iommu_unmap() */ void ipu_mmu_unmap(struct ipu_mmu_info *mmu_info, unsigned long iova, size_t size) { unsigned int min_pagesz; dev_dbg(mmu_info->dev, "unmapping iova 0x%lx size 0x%zx\n", iova, size); /* find out the minimum page size supported */ min_pagesz = 1 << __ffs(mmu_info->pgsize_bitmap); /* * The virtual address, as well as the size of the mapping, must be * aligned (at least) to the size of the smallest page supported * by the hardware */ if (!IS_ALIGNED(iova | size, min_pagesz)) { dev_err(NULL, "unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n", iova, size, min_pagesz); return; } return __ipu_mmu_unmap(mmu_info, iova, size); } /* drivers/iommu/iommu.c:iommu_map() */ int ipu_mmu_map(struct ipu_mmu_info *mmu_info, unsigned long iova, phys_addr_t paddr, size_t size) { unsigned int min_pagesz; if (mmu_info->pgsize_bitmap == 0UL) return -ENODEV; /* find out the minimum page size supported */ min_pagesz = 1 << __ffs(mmu_info->pgsize_bitmap); /* * both the virtual address and the physical one, as well as * the size of the mapping, must be aligned (at least) to the * size of the smallest page supported by the hardware */ if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { dev_err(mmu_info->dev, "unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n", iova, &paddr, size, min_pagesz); return -EINVAL; } dev_dbg(mmu_info->dev, "map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); return __ipu_mmu_map(mmu_info, iova, paddr, size); } static void ipu_mmu_destroy(struct ipu_mmu *mmu) { struct ipu_dma_mapping *dmap = mmu->dmap; struct ipu_mmu_info *mmu_info = dmap->mmu_info; struct iova *iova; u32 l1_idx; if (mmu->iova_trash_page) { iova = find_iova(&dmap->iovad, mmu->iova_trash_page >> PAGE_SHIFT); if (iova) { /* unmap and free the trash buffer iova */ ipu_mmu_unmap(mmu_info, iova->pfn_lo << PAGE_SHIFT, (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT); __free_iova(&dmap->iovad, iova); } else { dev_err(mmu->dev, "trash buffer iova not found.\n"); } mmu->iova_trash_page = 0; dma_unmap_page(mmu_info->dev, mmu->pci_trash_page, PAGE_SIZE, DMA_BIDIRECTIONAL); mmu->pci_trash_page = 0; __free_page(mmu->trash_page); } for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++) { if (mmu_info->l1_pt[l1_idx] != mmu_info->dummy_l2_pteval) { dma_unmap_single(mmu_info->dev, TBL_PHYS_ADDR(mmu_info->l1_pt[l1_idx]), PAGE_SIZE, DMA_BIDIRECTIONAL); free_page((unsigned long)mmu_info->l2_pts[l1_idx]); } } free_dummy_page(mmu_info); dma_unmap_single(mmu_info->dev, mmu_info->l1_pt_dma << ISP_PADDR_SHIFT, PAGE_SIZE, DMA_BIDIRECTIONAL); free_page((unsigned long)mmu_info->dummy_l2_pt); free_page((unsigned long)mmu_info->l1_pt); kfree(mmu_info); } struct ipu_mmu *ipu_mmu_init(struct device *dev, void __iomem *base, int mmid, const struct ipu_hw_variants *hw) { struct ipu_mmu *mmu; struct ipu_mmu_pdata *pdata; struct ipu_device *isp = pci_get_drvdata(to_pci_dev(dev)); unsigned int i; if (hw->nr_mmus > IPU_MMU_MAX_DEVICES) return ERR_PTR(-EINVAL); pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) return ERR_PTR(-ENOMEM); for (i = 0; i < hw->nr_mmus; i++) { struct ipu_mmu_hw *pdata_mmu = &pdata->mmu_hw[i]; const struct ipu_mmu_hw *src_mmu = &hw->mmu_hw[i]; if (src_mmu->nr_l1streams > IPU_MMU_MAX_TLB_L1_STREAMS || src_mmu->nr_l2streams > IPU_MMU_MAX_TLB_L2_STREAMS) return ERR_PTR(-EINVAL); *pdata_mmu = *src_mmu; pdata_mmu->base = base + src_mmu->offset; } mmu = devm_kzalloc(dev, sizeof(*mmu), GFP_KERNEL); if (!mmu) return ERR_PTR(-ENOMEM); mmu->mmid = mmid; mmu->mmu_hw = pdata->mmu_hw; mmu->nr_mmus = hw->nr_mmus; mmu->tlb_invalidate = tlb_invalidate; mmu->ready = false; INIT_LIST_HEAD(&mmu->vma_list); spin_lock_init(&mmu->ready_lock); mmu->dmap = alloc_dma_mapping(isp); if (!mmu->dmap) { dev_err(dev, "can't alloc dma mapping\n"); return ERR_PTR(-ENOMEM); } return mmu; } void ipu_mmu_cleanup(struct ipu_mmu *mmu) { struct ipu_dma_mapping *dmap = mmu->dmap; ipu_mmu_destroy(mmu); mmu->dmap = NULL; iova_cache_put(); put_iova_domain(&dmap->iovad); kfree(dmap); } MODULE_AUTHOR("Sakari Ailus "); MODULE_AUTHOR("Samu Onkalo "); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Intel ipu mmu driver"); ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu-mmu.h000066400000000000000000000033341471702545500251010ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2013 - 2024 Intel Corporation */ #ifndef IPU_MMU_H #define IPU_MMU_H #include #include "ipu.h" #include "ipu-pdata.h" #define ISYS_MMID 1 #define PSYS_MMID 0 /* * @pgtbl: virtual address of the l1 page table (one page) */ struct ipu_mmu_info { struct device *dev; u32 __iomem *l1_pt; u32 l1_pt_dma; u32 **l2_pts; u32 *dummy_l2_pt; u32 dummy_l2_pteval; void *dummy_page; u32 dummy_page_pteval; dma_addr_t aperture_start; dma_addr_t aperture_end; unsigned long pgsize_bitmap; spinlock_t lock; /* Serialize access to users */ struct ipu_dma_mapping *dmap; }; /* * @pgtbl: physical address of the l1 page table */ struct ipu_mmu { struct list_head node; struct ipu_mmu_hw *mmu_hw; unsigned int nr_mmus; int mmid; phys_addr_t pgtbl; struct device *dev; struct ipu_dma_mapping *dmap; struct list_head vma_list; struct page *trash_page; dma_addr_t pci_trash_page; /* IOVA from PCI DMA services (parent) */ dma_addr_t iova_trash_page; /* IOVA for IPU child nodes to use */ bool ready; spinlock_t ready_lock; /* Serialize access to bool ready */ void (*tlb_invalidate)(struct ipu_mmu *mmu); }; struct ipu_mmu *ipu_mmu_init(struct device *dev, void __iomem *base, int mmid, const struct ipu_hw_variants *hw); void ipu_mmu_cleanup(struct ipu_mmu *mmu); int ipu_mmu_hw_init(struct ipu_mmu *mmu); int ipu_mmu_hw_cleanup(struct ipu_mmu *mmu); int ipu_mmu_map(struct ipu_mmu_info *mmu_info, unsigned long iova, phys_addr_t paddr, size_t size); void ipu_mmu_unmap(struct ipu_mmu_info *mmu_info, unsigned long iova, size_t size); phys_addr_t ipu_mmu_iova_to_phys(struct ipu_mmu_info *mmu_info, dma_addr_t iova); #endif ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu-pdata.h000066400000000000000000000201371471702545500253740ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2013 - 2024 Intel Corporation */ #ifndef IPU_PDATA_H #define IPU_PDATA_H #define IPU_MMU_NAME IPU_NAME "-mmu" #define IPU_ISYS_CSI2_NAME IPU_NAME "-csi2" #define IPU_ISYS_NAME IPU_NAME "-isys" #define IPU_PSYS_NAME IPU_NAME "-psys" #define IPU_BUTTRESS_NAME IPU_NAME "-buttress" #define IPU_MMU_MAX_DEVICES 4 #define IPU_MMU_ADDRESS_BITS 32 /* The firmware is accessible within the first 2 GiB only in non-secure mode. */ #define IPU_MMU_ADDRESS_BITS_NON_SECURE 31 #define IPU_MMU_MAX_TLB_L1_STREAMS 32 #define IPU_MMU_MAX_TLB_L2_STREAMS 32 #define IPU_MAX_LI_BLOCK_ADDR 128 #define IPU_MAX_L2_BLOCK_ADDR 64 #define IPU_ISYS_MAX_CSI2_LEGACY_PORTS 4 #define IPU_ISYS_MAX_CSI2_COMBO_PORTS 2 #define IPU_MAX_FRAME_COUNTER 0xff /* * To maximize the IOSF utlization, IPU need to send requests in bursts. * At the DMA interface with the buttress, there are CDC FIFOs with burst * collection capability. CDC FIFO burst collectors have a configurable * threshold and is configured based on the outcome of performance measurements. * * isys has 3 ports with IOSF interface for VC0, VC1 and VC2 * psys has 4 ports with IOSF interface for VC0, VC1w, VC1r and VC2 * * Threshold values are pre-defined and are arrived at after performance * evaluations on a type of IPU */ #define IPU_MAX_VC_IOSF_PORTS 4 /* * IPU must configure correct arbitration mechanism related to the IOSF VC * requests. There are two options per VC0 and VC1 - > 0 means rearbitrate on * stall and 1 means stall until the request is completed. */ #define IPU_BTRS_ARB_MODE_TYPE_REARB 0 #define IPU_BTRS_ARB_MODE_TYPE_STALL 1 /* Currently chosen arbitration mechanism for VC0 */ #define IPU_BTRS_ARB_STALL_MODE_VC0 \ IPU_BTRS_ARB_MODE_TYPE_REARB /* Currently chosen arbitration mechanism for VC1 */ #define IPU_BTRS_ARB_STALL_MODE_VC1 \ IPU_BTRS_ARB_MODE_TYPE_REARB struct ipu_isys_subdev_pdata; /* * MMU Invalidation HW bug workaround by ZLW mechanism * * Old IPU MMUV2 has a bug in the invalidation mechanism which might result in * wrong translation or replication of the translation. This will cause data * corruption. So we cannot directly use the MMU V2 invalidation registers * to invalidate the MMU. Instead, whenever an invalidate is called, we need to * clear the TLB by evicting all the valid translations by filling it with trash * buffer (which is guaranteed not to be used by any other processes). ZLW is * used to fill the L1 and L2 caches with the trash buffer translations. ZLW * or Zero length write, is pre-fetch mechanism to pre-fetch the pages in * advance to the L1 and L2 caches without triggering any memory operations. * * In MMU V2, L1 -> 16 streams and 64 blocks, maximum 16 blocks per stream * One L1 block has 16 entries, hence points to 16 * 4K pages * L2 -> 16 streams and 32 blocks. 2 blocks per streams * One L2 block maps to 1024 L1 entries, hence points to 4MB address range * 2 blocks per L2 stream means, 1 stream points to 8MB range * * As we need to clear the caches and 8MB being the biggest cache size, we need * to have trash buffer which points to 8MB address range. As these trash * buffers are not used for any memory transactions, we need only the least * amount of physical memory. So we reserve 8MB IOVA address range but only * one page is reserved from physical memory. Each of this 8MB IOVA address * range is then mapped to the same physical memory page. */ /* One L2 entry maps 1024 L1 entries and one L1 entry per page */ #define IPU_MMUV2_L2_RANGE (1024 * PAGE_SIZE) /* Max L2 blocks per stream */ #define IPU_MMUV2_MAX_L2_BLOCKS 2 /* Max L1 blocks per stream */ #define IPU_MMUV2_MAX_L1_BLOCKS 16 #define IPU_MMUV2_TRASH_RANGE (IPU_MMUV2_L2_RANGE * \ IPU_MMUV2_MAX_L2_BLOCKS) /* Entries per L1 block */ #define MMUV2_ENTRIES_PER_L1_BLOCK 16 #define MMUV2_TRASH_L1_BLOCK_OFFSET (MMUV2_ENTRIES_PER_L1_BLOCK * \ PAGE_SIZE) #define MMUV2_TRASH_L2_BLOCK_OFFSET IPU_MMUV2_L2_RANGE /* * In some of the IPU MMUs, there is provision to configure L1 and L2 page * table caches. Both these L1 and L2 caches are divided into multiple sections * called streams. There is maximum 16 streams for both caches. Each of these * sections are subdivided into multiple blocks. When nr_l1streams = 0 and * nr_l2streams = 0, means the MMU is of type MMU_V1 and do not support * L1/L2 page table caches. * * L1 stream per block sizes are configurable and varies per usecase. * L2 has constant block sizes - 2 blocks per stream. * * MMU1 support pre-fetching of the pages to have less cache lookup misses. To * enable the pre-fetching, MMU1 AT (Address Translator) device registers * need to be configured. * * There are four types of memory accesses which requires ZLW configuration. * ZLW(Zero Length Write) is a mechanism to enable VT-d pre-fetching on IOMMU. * * 1. Sequential Access or 1D mode * Set ZLW_EN -> 1 * set ZLW_PAGE_CROSS_1D -> 1 * Set ZLW_N to "N" pages so that ZLW will be inserte N pages ahead where * N is pre-defined and hardcoded in the platform data * Set ZLW_2D -> 0 * * 2. ZLW 2D mode * Set ZLW_EN -> 1 * set ZLW_PAGE_CROSS_1D -> 1, * Set ZLW_N -> 0 * Set ZLW_2D -> 1 * * 3. ZLW Enable (no 1D or 2D mode) * Set ZLW_EN -> 1 * set ZLW_PAGE_CROSS_1D -> 0, * Set ZLW_N -> 0 * Set ZLW_2D -> 0 * * 4. ZLW disable * Set ZLW_EN -> 0 * set ZLW_PAGE_CROSS_1D -> 0, * Set ZLW_N -> 0 * Set ZLW_2D -> 0 * * To configure the ZLW for the above memory access, four registers are * available. Hence to track these four settings, we have the following entries * in the struct ipu_mmu_hw. Each of these entries are per stream and * available only for the L1 streams. * * a. l1_zlw_en -> To track zlw enabled per stream (ZLW_EN) * b. l1_zlw_1d_mode -> Track 1D mode per stream. ZLW inserted at page boundary * c. l1_ins_zlw_ahead_pages -> to track how advance the ZLW need to be inserted * Insert ZLW request N pages ahead address. * d. l1_zlw_2d_mode -> To track 2D mode per stream (ZLW_2D) * * * Currently L1/L2 streams, blocks, AT ZLW configurations etc. are pre-defined * as per the usecase specific calculations. Any change to this pre-defined * table has to happen in sync with IPU FW. */ struct ipu_mmu_hw { union { unsigned long offset; void __iomem *base; }; unsigned int info_bits; u8 nr_l1streams; /* * L1 has variable blocks per stream - total of 64 blocks and maximum of * 16 blocks per stream. Configurable by using the block start address * per stream. Block start address is calculated from the block size */ u8 l1_block_sz[IPU_MMU_MAX_TLB_L1_STREAMS]; /* Is ZLW is enabled in each stream */ bool l1_zlw_en[IPU_MMU_MAX_TLB_L1_STREAMS]; bool l1_zlw_1d_mode[IPU_MMU_MAX_TLB_L1_STREAMS]; u8 l1_ins_zlw_ahead_pages[IPU_MMU_MAX_TLB_L1_STREAMS]; bool l1_zlw_2d_mode[IPU_MMU_MAX_TLB_L1_STREAMS]; u32 l1_stream_id_reg_offset; u32 l2_stream_id_reg_offset; u8 nr_l2streams; /* * L2 has fixed 2 blocks per stream. Block address is calculated * from the block size */ u8 l2_block_sz[IPU_MMU_MAX_TLB_L2_STREAMS]; /* flag to track if WA is needed for successive invalidate HW bug */ bool insert_read_before_invalidate; }; struct ipu_mmu_pdata { unsigned int nr_mmus; struct ipu_mmu_hw mmu_hw[IPU_MMU_MAX_DEVICES]; int mmid; }; struct ipu_isys_csi2_pdata { void __iomem *base; }; #define IPU_EV_AUTO 0xff struct ipu_isys_internal_csi2_pdata { unsigned int nports; unsigned int *offsets; }; /* * One place to handle all the IPU HW variations */ struct ipu_hw_variants { unsigned long offset; unsigned int nr_mmus; struct ipu_mmu_hw mmu_hw[IPU_MMU_MAX_DEVICES]; u8 cdc_fifos; u8 cdc_fifo_threshold[IPU_MAX_VC_IOSF_PORTS]; u32 dmem_offset; u32 spc_offset; /* SPC offset from psys base */ }; struct ipu_isys_internal_pdata { struct ipu_isys_internal_csi2_pdata csi2; struct ipu_hw_variants hw_variant; u32 num_parallel_streams; u32 isys_dma_overshoot; }; struct ipu_isys_pdata { void __iomem *base; const struct ipu_isys_internal_pdata *ipdata; }; struct ipu_psys_internal_pdata { struct ipu_hw_variants hw_variant; }; struct ipu_psys_pdata { void __iomem *base; const struct ipu_psys_internal_pdata *ipdata; }; #endif ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu-trace.c000066400000000000000000000543741471702545500254060ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2014 - 2024 Intel Corporation #include #include #include #include #include #include #include #include #include #include "ipu.h" #include "ipu-platform-regs.h" #include "ipu-trace.h" /* * enabling ipu trace need a 96 MB buffer. */ static bool ipu_trace_enable; module_param(ipu_trace_enable, bool, 0660); MODULE_PARM_DESC(ipu_trace_enable, "IPU trace enable"); struct trace_register_range { u32 start; u32 end; }; #define MEMORY_RING_BUFFER_SIZE (SZ_1M * 96) #define TRACE_MESSAGE_SIZE 16 /* * It looks that the trace unit sometimes writes outside the given buffer. * To avoid memory corruption one extra page is reserved at the end * of the buffer. Read also the extra area since it may contain valid data. */ #define MEMORY_RING_BUFFER_GUARD PAGE_SIZE #define MEMORY_RING_BUFFER_OVERREAD MEMORY_RING_BUFFER_GUARD #define MAX_TRACE_REGISTERS 200 #define TRACE_CONF_DUMP_BUFFER_SIZE (MAX_TRACE_REGISTERS * 2 * 32) #define TRACE_CONF_DATA_MAX_LEN (1024 * 4) #define WPT_TRACE_CONF_DATA_MAX_LEN (1024 * 64) struct config_value { u32 reg; u32 value; }; struct ipu_trace_buffer { dma_addr_t dma_handle; void *memory_buffer; }; struct ipu_subsystem_wptrace_config { bool open; char *conf_dump_buffer; int size_conf_dump; unsigned int fill_level; struct config_value config[MAX_TRACE_REGISTERS]; }; struct ipu_subsystem_trace_config { u32 offset; void __iomem *base; struct ipu_trace_buffer memory; /* ring buffer */ struct device *dev; struct ipu_trace_block *blocks; unsigned int fill_level; /* Nbr of regs in config table below */ bool running; /* Cached register values */ struct config_value config[MAX_TRACE_REGISTERS]; /* watchpoint trace info */ struct ipu_subsystem_wptrace_config wpt; }; struct ipu_trace { struct mutex lock; /* Protect ipu trace operations */ bool open; char *conf_dump_buffer; int size_conf_dump; struct ipu_subsystem_trace_config isys; struct ipu_subsystem_trace_config psys; }; static void __ipu_trace_restore(struct device *dev) { struct ipu_bus_device *adev = to_ipu_bus_device(dev); struct ipu_device *isp = adev->isp; struct ipu_trace *trace = isp->trace; struct config_value *config; struct ipu_subsystem_trace_config *sys = adev->trace_cfg; struct ipu_trace_block *blocks; u32 mapped_trace_buffer; void __iomem *addr = NULL; int i; if (trace->open) { dev_info(dev, "Trace control file open. Skipping update\n"); return; } if (!sys) return; /* leave if no trace configuration for this subsystem */ if (sys->fill_level == 0) return; /* Find trace unit base address */ blocks = sys->blocks; while (blocks->type != IPU_TRACE_BLOCK_END) { if (blocks->type == IPU_TRACE_BLOCK_TUN) { addr = sys->base + blocks->offset; break; } blocks++; } if (!addr) return; if (!sys->memory.memory_buffer) { sys->memory.memory_buffer = #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0) dma_alloc_coherent(dev, MEMORY_RING_BUFFER_SIZE + MEMORY_RING_BUFFER_GUARD, &sys->memory.dma_handle, GFP_KERNEL); #else dma_alloc_attrs(dev, MEMORY_RING_BUFFER_SIZE + MEMORY_RING_BUFFER_GUARD, &sys->memory.dma_handle, GFP_KERNEL, DMA_ATTR_NON_CONSISTENT); #endif } if (!sys->memory.memory_buffer) { dev_err(dev, "No memory for tracing. Trace unit disabled\n"); return; } config = sys->config; mapped_trace_buffer = sys->memory.dma_handle; /* ring buffer base */ writel(mapped_trace_buffer, addr + TRACE_REG_TUN_DRAM_BASE_ADDR); /* ring buffer end */ writel(mapped_trace_buffer + MEMORY_RING_BUFFER_SIZE - TRACE_MESSAGE_SIZE, addr + TRACE_REG_TUN_DRAM_END_ADDR); /* Infobits for ddr trace */ writel(IPU_INFO_REQUEST_DESTINATION_PRIMARY, addr + TRACE_REG_TUN_DDR_INFO_VAL); /* Find trace timer reset address */ addr = NULL; blocks = sys->blocks; while (blocks->type != IPU_TRACE_BLOCK_END) { if (blocks->type == IPU_TRACE_TIMER_RST) { addr = sys->base + blocks->offset; break; } blocks++; } if (!addr) { dev_err(dev, "No trace reset addr\n"); return; } /* Remove reset from trace timers */ writel(TRACE_REG_GPREG_TRACE_TIMER_RST_OFF, addr); /* Register config received from userspace */ for (i = 0; i < sys->fill_level; i++) { dev_dbg(dev, "Trace restore: reg 0x%08x, value 0x%08x\n", config[i].reg, config[i].value); writel(config[i].value, isp->base + config[i].reg); } /* Register wpt config received from userspace, and only psys has wpt */ config = sys->wpt.config; for (i = 0; i < sys->wpt.fill_level; i++) { dev_dbg(dev, "Trace restore: reg 0x%08x, value 0x%08x\n", config[i].reg, config[i].value); writel(config[i].value, isp->base + config[i].reg); } sys->running = true; } void ipu_trace_restore(struct device *dev) { struct ipu_trace *trace = to_ipu_bus_device(dev)->isp->trace; if (!trace) return; mutex_lock(&trace->lock); __ipu_trace_restore(dev); mutex_unlock(&trace->lock); } EXPORT_SYMBOL_GPL(ipu_trace_restore); static void __ipu_trace_stop(struct device *dev) { struct ipu_subsystem_trace_config *sys = to_ipu_bus_device(dev)->trace_cfg; struct ipu_trace_block *blocks; if (!sys) return; if (!sys->running) return; sys->running = false; /* Turn off all the gpc blocks */ blocks = sys->blocks; while (blocks->type != IPU_TRACE_BLOCK_END) { if (blocks->type == IPU_TRACE_BLOCK_GPC) { writel(0, sys->base + blocks->offset + TRACE_REG_GPC_OVERALL_ENABLE); } blocks++; } /* Turn off all the trace monitors */ blocks = sys->blocks; while (blocks->type != IPU_TRACE_BLOCK_END) { if (blocks->type == IPU_TRACE_BLOCK_TM) { writel(0, sys->base + blocks->offset + TRACE_REG_TM_TRACE_ENABLE_NPK); writel(0, sys->base + blocks->offset + TRACE_REG_TM_TRACE_ENABLE_DDR); } blocks++; } /* Turn off trace units */ blocks = sys->blocks; while (blocks->type != IPU_TRACE_BLOCK_END) { if (blocks->type == IPU_TRACE_BLOCK_TUN) { writel(0, sys->base + blocks->offset + TRACE_REG_TUN_DDR_ENABLE); writel(0, sys->base + blocks->offset + TRACE_REG_TUN_NPK_ENABLE); } blocks++; } } void ipu_trace_stop(struct device *dev) { struct ipu_trace *trace = to_ipu_bus_device(dev)->isp->trace; if (!trace) return; mutex_lock(&trace->lock); __ipu_trace_stop(dev); mutex_unlock(&trace->lock); } EXPORT_SYMBOL_GPL(ipu_trace_stop); static int update_register_cache(struct ipu_device *isp, u32 reg, u32 value) { struct ipu_trace *dctrl = isp->trace; struct ipu_subsystem_trace_config *sys; int rval = -EINVAL; if (dctrl->isys.offset == dctrl->psys.offset) { /* For the IPU with uniform address space */ if (reg >= IPU_ISYS_OFFSET && reg < IPU_ISYS_OFFSET + TRACE_REG_MAX_ISYS_OFFSET) sys = &dctrl->isys; else if (reg >= IPU_PSYS_OFFSET && reg < IPU_PSYS_OFFSET + TRACE_REG_MAX_PSYS_OFFSET) sys = &dctrl->psys; else goto error; } else { if (dctrl->isys.offset && reg >= dctrl->isys.offset && reg < dctrl->isys.offset + TRACE_REG_MAX_ISYS_OFFSET) sys = &dctrl->isys; else if (dctrl->psys.offset && reg >= dctrl->psys.offset && reg < dctrl->psys.offset + TRACE_REG_MAX_PSYS_OFFSET) sys = &dctrl->psys; else goto error; } if (sys->fill_level < MAX_TRACE_REGISTERS) { dev_dbg(sys->dev, "Trace reg addr 0x%08x value 0x%08x\n", reg, value); sys->config[sys->fill_level].reg = reg; sys->config[sys->fill_level].value = value; sys->fill_level++; } else { rval = -ENOMEM; goto error; } return 0; error: dev_info(&isp->pdev->dev, "Trace register address 0x%08x ignored as invalid register\n", reg); return rval; } static void traceconf_dump(struct ipu_device *isp) { struct ipu_subsystem_trace_config *sys[2] = { &isp->trace->isys, &isp->trace->psys }; int i, j, rem_size; char *out; isp->trace->size_conf_dump = 0; out = isp->trace->conf_dump_buffer; rem_size = TRACE_CONF_DUMP_BUFFER_SIZE; for (j = 0; j < ARRAY_SIZE(sys); j++) { for (i = 0; i < sys[j]->fill_level && rem_size > 0; i++) { int bytes_print; int n = snprintf(out, rem_size, "0x%08x = 0x%08x\n", sys[j]->config[i].reg, sys[j]->config[i].value); bytes_print = min(n, rem_size - 1); rem_size -= bytes_print; out += bytes_print; } } isp->trace->size_conf_dump = out - isp->trace->conf_dump_buffer; } static void clear_trace_buffer(struct ipu_subsystem_trace_config *sys) { if (!sys->memory.memory_buffer) return; memset(sys->memory.memory_buffer, 0, MEMORY_RING_BUFFER_SIZE + MEMORY_RING_BUFFER_OVERREAD); dma_sync_single_for_device(sys->dev, sys->memory.dma_handle, MEMORY_RING_BUFFER_SIZE + MEMORY_RING_BUFFER_GUARD, DMA_FROM_DEVICE); } static int traceconf_open(struct inode *inode, struct file *file) { int ret; struct ipu_device *isp; if (!inode->i_private) return -EACCES; isp = inode->i_private; ret = mutex_trylock(&isp->trace->lock); if (!ret) return -EBUSY; if (isp->trace->open) { mutex_unlock(&isp->trace->lock); return -EBUSY; } file->private_data = isp; isp->trace->open = 1; if (file->f_mode & FMODE_WRITE) { /* TBD: Allocate temp buffer for processing. * Push validated buffer to active config */ /* Forget old config if opened for write */ isp->trace->isys.fill_level = 0; isp->trace->psys.fill_level = 0; isp->trace->psys.wpt.fill_level = 0; } if (file->f_mode & FMODE_READ) { isp->trace->conf_dump_buffer = vzalloc(TRACE_CONF_DUMP_BUFFER_SIZE); if (!isp->trace->conf_dump_buffer) { isp->trace->open = 0; mutex_unlock(&isp->trace->lock); return -ENOMEM; } traceconf_dump(isp); } mutex_unlock(&isp->trace->lock); return 0; } static ssize_t traceconf_read(struct file *file, char __user *buf, size_t len, loff_t *ppos) { struct ipu_device *isp = file->private_data; return simple_read_from_buffer(buf, len, ppos, isp->trace->conf_dump_buffer, isp->trace->size_conf_dump); } static ssize_t traceconf_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos) { int i; struct ipu_device *isp = file->private_data; ssize_t bytes = 0; char *ipu_trace_buffer = NULL; size_t buffer_size = 0; u32 ipu_trace_number = 0; struct config_value *cfg_buffer = NULL; if ((*ppos < 0) || (len > TRACE_CONF_DATA_MAX_LEN) || (len < sizeof(ipu_trace_number))) { dev_info(&isp->pdev->dev, "length is error, len:%ld, loff:%lld\n", len, *ppos); return -EINVAL; } ipu_trace_buffer = vzalloc(len); if (!ipu_trace_buffer) return -ENOMEM; bytes = copy_from_user(ipu_trace_buffer, buf, len); if (bytes != 0) { vfree(ipu_trace_buffer); return -EFAULT; } memcpy(&ipu_trace_number, ipu_trace_buffer, sizeof(u32)); buffer_size = ipu_trace_number * sizeof(struct config_value); if ((buffer_size + sizeof(ipu_trace_number)) != len) { dev_info(&isp->pdev->dev, "File size is not right, len:%ld, buffer_size:%zu\n", len, buffer_size); vfree(ipu_trace_buffer); return -EFAULT; } mutex_lock(&isp->trace->lock); cfg_buffer = (struct config_value *)(ipu_trace_buffer + sizeof(u32)); for (i = 0; i < ipu_trace_number; i++) { update_register_cache(isp, cfg_buffer[i].reg, cfg_buffer[i].value); } mutex_unlock(&isp->trace->lock); vfree(ipu_trace_buffer); return len; } static int traceconf_release(struct inode *inode, struct file *file) { struct ipu_device *isp = file->private_data; struct device *psys_dev = isp->psys ? &isp->psys->dev : NULL; struct device *isys_dev = isp->isys ? &isp->isys->dev : NULL; int isys_pm_rval = -EINVAL; int psys_pm_rval = -EINVAL; /* * Turn devices on outside trace->lock mutex. PM transition may * cause call to function which tries to take the same lock. * Also do this before trace->open is set back to 0 to avoid * double restore (one here and one in pm transition). We can't * rely purely on the restore done by pm call backs since trace * configuration can occur in any phase compared to other activity. */ if (file->f_mode & FMODE_WRITE) { if (isys_dev) isys_pm_rval = pm_runtime_resume_and_get(isys_dev); if (isys_pm_rval >= 0 && psys_dev) psys_pm_rval = pm_runtime_resume_and_get(psys_dev); } mutex_lock(&isp->trace->lock); isp->trace->open = 0; vfree(isp->trace->conf_dump_buffer); isp->trace->conf_dump_buffer = NULL; /* Update new cfg to HW */ if (isys_pm_rval >= 0) { __ipu_trace_stop(isys_dev); clear_trace_buffer(isp->isys->trace_cfg); __ipu_trace_restore(isys_dev); } if (psys_pm_rval >= 0) { __ipu_trace_stop(psys_dev); clear_trace_buffer(isp->psys->trace_cfg); __ipu_trace_restore(psys_dev); } mutex_unlock(&isp->trace->lock); if (psys_pm_rval >= 0) pm_runtime_put(psys_dev); if (isys_pm_rval >= 0) pm_runtime_put(isys_dev); return 0; } static const struct file_operations ipu_traceconf_fops = { .owner = THIS_MODULE, .open = traceconf_open, .release = traceconf_release, .read = traceconf_read, .write = traceconf_write, #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 12, 0) .llseek = no_llseek, #endif }; static void wptraceconf_dump(struct ipu_device *isp) { struct ipu_subsystem_wptrace_config *sys = &isp->trace->psys.wpt; int i, rem_size; char *out; sys->size_conf_dump = 0; out = sys->conf_dump_buffer; rem_size = TRACE_CONF_DUMP_BUFFER_SIZE; for (i = 0; i < sys->fill_level && rem_size > 0; i++) { int bytes_print; int n = snprintf(out, rem_size, "0x%08x = 0x%08x\n", sys->config[i].reg, sys->config[i].value); bytes_print = min(n, rem_size - 1); rem_size -= bytes_print; out += bytes_print; } sys->size_conf_dump = out - sys->conf_dump_buffer; } static int wptraceconf_open(struct inode *inode, struct file *file) { int ret; struct ipu_device *isp; if (!inode->i_private) return -EACCES; isp = inode->i_private; ret = mutex_trylock(&isp->trace->lock); if (!ret) return -EBUSY; if (isp->trace->psys.wpt.open) { mutex_unlock(&isp->trace->lock); return -EBUSY; } file->private_data = isp; if (file->f_mode & FMODE_WRITE) { /* TBD: Allocate temp buffer for processing. * Push validated buffer to active config */ /* Forget old config if opened for write */ isp->trace->psys.wpt.fill_level = 0; } if (file->f_mode & FMODE_READ) { isp->trace->psys.wpt.conf_dump_buffer = vzalloc(TRACE_CONF_DUMP_BUFFER_SIZE); if (!isp->trace->psys.wpt.conf_dump_buffer) { mutex_unlock(&isp->trace->lock); return -ENOMEM; } wptraceconf_dump(isp); } mutex_unlock(&isp->trace->lock); return 0; } static ssize_t wptraceconf_read(struct file *file, char __user *buf, size_t len, loff_t *ppos) { struct ipu_device *isp = file->private_data; return simple_read_from_buffer(buf, len, ppos, isp->trace->psys.wpt.conf_dump_buffer, isp->trace->psys.wpt.size_conf_dump); } static ssize_t wptraceconf_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos) { int i; struct ipu_device *isp = file->private_data; ssize_t bytes = 0; char *wpt_info_buffer = NULL; size_t buffer_size = 0; u32 wp_node_number = 0; struct config_value *wpt_buffer = NULL; struct ipu_subsystem_wptrace_config *wpt = &isp->trace->psys.wpt; if ((*ppos < 0) || (len > WPT_TRACE_CONF_DATA_MAX_LEN) || (len < sizeof(wp_node_number))) { dev_info(&isp->pdev->dev, "length is error, len:%ld, loff:%lld\n", len, *ppos); return -EINVAL; } wpt_info_buffer = vzalloc(len); if (!wpt_info_buffer) return -ENOMEM; bytes = copy_from_user(wpt_info_buffer, buf, len); if (bytes != 0) { vfree(wpt_info_buffer); return -EFAULT; } memcpy(&wp_node_number, wpt_info_buffer, sizeof(u32)); buffer_size = wp_node_number * sizeof(struct config_value); if ((buffer_size + sizeof(wp_node_number)) != len) { dev_info(&isp->pdev->dev, "File size is not right, len:%ld, buffer_size:%zu\n", len, buffer_size); vfree(wpt_info_buffer); return -EFAULT; } mutex_lock(&isp->trace->lock); wpt_buffer = (struct config_value *)(wpt_info_buffer + sizeof(u32)); for (i = 0; i < wp_node_number; i++) { if (wpt->fill_level < MAX_TRACE_REGISTERS) { wpt->config[wpt->fill_level].reg = wpt_buffer[i].reg; wpt->config[wpt->fill_level].value = wpt_buffer[i].value; wpt->fill_level++; } else { dev_info(&isp->pdev->dev, "Address 0x%08x ignored as invalid register\n", wpt_buffer[i].reg); break; } } mutex_unlock(&isp->trace->lock); vfree(wpt_info_buffer); return len; } static int wptraceconf_release(struct inode *inode, struct file *file) { struct ipu_device *isp = file->private_data; mutex_lock(&isp->trace->lock); isp->trace->open = 0; vfree(isp->trace->psys.wpt.conf_dump_buffer); isp->trace->psys.wpt.conf_dump_buffer = NULL; mutex_unlock(&isp->trace->lock); return 0; } static const struct file_operations ipu_wptraceconf_fops = { .owner = THIS_MODULE, .open = wptraceconf_open, .release = wptraceconf_release, .read = wptraceconf_read, .write = wptraceconf_write, #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 12, 0) .llseek = no_llseek, #endif }; static int gettrace_open(struct inode *inode, struct file *file) { struct ipu_subsystem_trace_config *sys = inode->i_private; if (!sys) return -EACCES; if (!sys->memory.memory_buffer) return -EACCES; dma_sync_single_for_cpu(sys->dev, sys->memory.dma_handle, MEMORY_RING_BUFFER_SIZE + MEMORY_RING_BUFFER_GUARD, DMA_FROM_DEVICE); file->private_data = sys; return 0; }; static ssize_t gettrace_read(struct file *file, char __user *buf, size_t len, loff_t *ppos) { struct ipu_subsystem_trace_config *sys = file->private_data; return simple_read_from_buffer(buf, len, ppos, sys->memory.memory_buffer, MEMORY_RING_BUFFER_SIZE + MEMORY_RING_BUFFER_OVERREAD); } static ssize_t gettrace_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos) { struct ipu_subsystem_trace_config *sys = file->private_data; static const char str[] = "clear"; char buffer[sizeof(str)] = { 0 }; ssize_t ret; ret = simple_write_to_buffer(buffer, sizeof(buffer), ppos, buf, len); if (ret < 0) return ret; if (ret < sizeof(str) - 1) return -EINVAL; if (!strncmp(str, buffer, sizeof(str) - 1)) { clear_trace_buffer(sys); return len; } return -EINVAL; } static int gettrace_release(struct inode *inode, struct file *file) { return 0; } static const struct file_operations ipu_gettrace_fops = { .owner = THIS_MODULE, .open = gettrace_open, .release = gettrace_release, .read = gettrace_read, .write = gettrace_write, #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 12, 0) .llseek = no_llseek, #endif }; int ipu_trace_init(struct ipu_device *isp, void __iomem *base, struct device *dev, struct ipu_trace_block *blocks) { struct ipu_bus_device *adev = to_ipu_bus_device(dev); struct ipu_trace *trace = isp->trace; struct ipu_subsystem_trace_config *sys; int ret = 0; if (!isp->trace) return 0; mutex_lock(&isp->trace->lock); if (dev == &isp->isys->dev) { sys = &trace->isys; } else if (dev == &isp->psys->dev) { sys = &trace->psys; } else { ret = -EINVAL; goto leave; } adev->trace_cfg = sys; sys->dev = dev; sys->offset = base - isp->base; /* sub system offset */ sys->base = base; sys->blocks = blocks; #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0) sys->memory.memory_buffer = dma_alloc_coherent(dev, MEMORY_RING_BUFFER_SIZE + MEMORY_RING_BUFFER_GUARD, &sys->memory.dma_handle, GFP_KERNEL); #else sys->memory.memory_buffer = dma_alloc_attrs(dev, MEMORY_RING_BUFFER_SIZE + MEMORY_RING_BUFFER_GUARD, &sys->memory.dma_handle, GFP_KERNEL, DMA_ATTR_NON_CONSISTENT); #endif if (!sys->memory.memory_buffer) dev_err(dev, "failed alloc memory for tracing.\n"); leave: mutex_unlock(&isp->trace->lock); return ret; } EXPORT_SYMBOL_GPL(ipu_trace_init); void ipu_trace_uninit(struct device *dev) { struct ipu_bus_device *adev = to_ipu_bus_device(dev); struct ipu_device *isp = adev->isp; struct ipu_trace *trace = isp->trace; struct ipu_subsystem_trace_config *sys = adev->trace_cfg; if (!trace || !sys) return; mutex_lock(&trace->lock); if (sys->memory.memory_buffer) #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0) dma_free_coherent(sys->dev, MEMORY_RING_BUFFER_SIZE + MEMORY_RING_BUFFER_GUARD, sys->memory.memory_buffer, sys->memory.dma_handle); #else dma_free_attrs(sys->dev, MEMORY_RING_BUFFER_SIZE + MEMORY_RING_BUFFER_GUARD, sys->memory.memory_buffer, sys->memory.dma_handle, DMA_ATTR_NON_CONSISTENT); #endif sys->dev = NULL; sys->memory.memory_buffer = NULL; mutex_unlock(&trace->lock); } EXPORT_SYMBOL_GPL(ipu_trace_uninit); int ipu_trace_debugfs_add(struct ipu_device *isp, struct dentry *dir) { struct dentry *files[4]; int i = 0; if (!ipu_trace_enable) return 0; files[i] = debugfs_create_file("traceconf", 0644, dir, isp, &ipu_traceconf_fops); if (!files[i]) return -ENOMEM; i++; files[i] = debugfs_create_file("wptraceconf", 0644, dir, isp, &ipu_wptraceconf_fops); if (!files[i]) goto error; i++; files[i] = debugfs_create_file("getisystrace", 0444, dir, &isp->trace->isys, &ipu_gettrace_fops); if (!files[i]) goto error; i++; files[i] = debugfs_create_file("getpsystrace", 0444, dir, &isp->trace->psys, &ipu_gettrace_fops); if (!files[i]) goto error; return 0; error: for (; i > 0; i--) debugfs_remove(files[i - 1]); return -ENOMEM; } int ipu_trace_add(struct ipu_device *isp) { if (!ipu_trace_enable) return 0; isp->trace = devm_kzalloc(&isp->pdev->dev, sizeof(struct ipu_trace), GFP_KERNEL); if (!isp->trace) return -ENOMEM; mutex_init(&isp->trace->lock); dev_dbg(&isp->pdev->dev, "ipu trace enabled!"); return 0; } void ipu_trace_release(struct ipu_device *isp) { if (!isp->trace) return; mutex_destroy(&isp->trace->lock); } int ipu_trace_buffer_dma_handle(struct device *dev, dma_addr_t *dma_handle) { struct ipu_bus_device *adev = to_ipu_bus_device(dev); struct ipu_subsystem_trace_config *sys = adev->trace_cfg; if (!ipu_trace_enable) return -EACCES; if (!sys->memory.memory_buffer) return -EACCES; *dma_handle = sys->memory.dma_handle; return 0; } EXPORT_SYMBOL_GPL(ipu_trace_buffer_dma_handle); MODULE_AUTHOR("Samu Onkalo "); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Intel ipu trace support"); ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu-trace.h000066400000000000000000000124111471702545500253750ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2014 - 2024 Intel Corporation */ #ifndef IPU_TRACE_H #define IPU_TRACE_H #include /* Trace unit register offsets */ #define TRACE_REG_TUN_DDR_ENABLE 0x000 #define TRACE_REG_TUN_NPK_ENABLE 0x004 #define TRACE_REG_TUN_DDR_INFO_VAL 0x008 #define TRACE_REG_TUN_NPK_ADDR 0x00C #define TRACE_REG_TUN_DRAM_BASE_ADDR 0x010 #define TRACE_REG_TUN_DRAM_END_ADDR 0x014 #define TRACE_REG_TUN_LOCAL_TIMER0 0x018 #define TRACE_REG_TUN_LOCAL_TIMER1 0x01C #define TRACE_REG_TUN_WR_PTR 0x020 #define TRACE_REG_TUN_RD_PTR 0x024 /* * Following registers are left out on purpose: * TUN_LOCAL_TIMER0, TUN_LOCAL_TIMER1, TUN_DRAM_BASE_ADDR * TUN_DRAM_END_ADDR, TUN_WR_PTR, TUN_RD_PTR */ /* Trace monitor register offsets */ #define TRACE_REG_TM_TRACE_ADDR_A 0x0900 #define TRACE_REG_TM_TRACE_ADDR_B 0x0904 #define TRACE_REG_TM_TRACE_ADDR_C 0x0908 #define TRACE_REG_TM_TRACE_ADDR_D 0x090c #define TRACE_REG_TM_TRACE_ENABLE_NPK 0x0910 #define TRACE_REG_TM_TRACE_ENABLE_DDR 0x0914 #define TRACE_REG_TM_TRACE_PER_PC 0x0918 #define TRACE_REG_TM_TRACE_PER_BRANCH 0x091c #define TRACE_REG_TM_TRACE_HEADER 0x0920 #define TRACE_REG_TM_TRACE_CFG 0x0924 #define TRACE_REG_TM_TRACE_LOST_PACKETS 0x0928 #define TRACE_REG_TM_TRACE_LP_CLEAR 0x092c #define TRACE_REG_TM_TRACE_LMRUN_MASK 0x0930 #define TRACE_REG_TM_TRACE_LMRUN_PC_LOW 0x0934 #define TRACE_REG_TM_TRACE_LMRUN_PC_HIGH 0x0938 #define TRACE_REG_TM_TRACE_MMIO_SEL 0x093c #define TRACE_REG_TM_TRACE_MMIO_WP0_LOW 0x0940 #define TRACE_REG_TM_TRACE_MMIO_WP1_LOW 0x0944 #define TRACE_REG_TM_TRACE_MMIO_WP2_LOW 0x0948 #define TRACE_REG_TM_TRACE_MMIO_WP3_LOW 0x094c #define TRACE_REG_TM_TRACE_MMIO_WP0_HIGH 0x0950 #define TRACE_REG_TM_TRACE_MMIO_WP1_HIGH 0x0954 #define TRACE_REG_TM_TRACE_MMIO_WP2_HIGH 0x0958 #define TRACE_REG_TM_TRACE_MMIO_WP3_HIGH 0x095c #define TRACE_REG_TM_FWTRACE_FIRST 0x0A00 #define TRACE_REG_TM_FWTRACE_MIDDLE 0x0A04 #define TRACE_REG_TM_FWTRACE_LAST 0x0A08 /* * Following exists only in (I)SP address space: * TM_FWTRACE_FIRST, TM_FWTRACE_MIDDLE, TM_FWTRACE_LAST */ #define TRACE_REG_GPC_RESET 0x000 #define TRACE_REG_GPC_OVERALL_ENABLE 0x004 #define TRACE_REG_GPC_TRACE_HEADER 0x008 #define TRACE_REG_GPC_TRACE_ADDRESS 0x00C #define TRACE_REG_GPC_TRACE_NPK_EN 0x010 #define TRACE_REG_GPC_TRACE_DDR_EN 0x014 #define TRACE_REG_GPC_TRACE_LPKT_CLEAR 0x018 #define TRACE_REG_GPC_TRACE_LPKT 0x01C #define TRACE_REG_GPC_ENABLE_ID0 0x020 #define TRACE_REG_GPC_ENABLE_ID1 0x024 #define TRACE_REG_GPC_ENABLE_ID2 0x028 #define TRACE_REG_GPC_ENABLE_ID3 0x02c #define TRACE_REG_GPC_VALUE_ID0 0x030 #define TRACE_REG_GPC_VALUE_ID1 0x034 #define TRACE_REG_GPC_VALUE_ID2 0x038 #define TRACE_REG_GPC_VALUE_ID3 0x03c #define TRACE_REG_GPC_CNT_INPUT_SELECT_ID0 0x040 #define TRACE_REG_GPC_CNT_INPUT_SELECT_ID1 0x044 #define TRACE_REG_GPC_CNT_INPUT_SELECT_ID2 0x048 #define TRACE_REG_GPC_CNT_INPUT_SELECT_ID3 0x04c #define TRACE_REG_GPC_CNT_START_SELECT_ID0 0x050 #define TRACE_REG_GPC_CNT_START_SELECT_ID1 0x054 #define TRACE_REG_GPC_CNT_START_SELECT_ID2 0x058 #define TRACE_REG_GPC_CNT_START_SELECT_ID3 0x05c #define TRACE_REG_GPC_CNT_STOP_SELECT_ID0 0x060 #define TRACE_REG_GPC_CNT_STOP_SELECT_ID1 0x064 #define TRACE_REG_GPC_CNT_STOP_SELECT_ID2 0x068 #define TRACE_REG_GPC_CNT_STOP_SELECT_ID3 0x06c #define TRACE_REG_GPC_CNT_MSG_SELECT_ID0 0x070 #define TRACE_REG_GPC_CNT_MSG_SELECT_ID1 0x074 #define TRACE_REG_GPC_CNT_MSG_SELECT_ID2 0x078 #define TRACE_REG_GPC_CNT_MSG_SELECT_ID3 0x07c #define TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID0 0x080 #define TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID1 0x084 #define TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID2 0x088 #define TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID3 0x08c #define TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID0 0x090 #define TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID1 0x094 #define TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID2 0x098 #define TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID3 0x09c #define TRACE_REG_GPC_IRQ_TIMER_SELECT_ID0 0x0a0 #define TRACE_REG_GPC_IRQ_TIMER_SELECT_ID1 0x0a4 #define TRACE_REG_GPC_IRQ_TIMER_SELECT_ID2 0x0a8 #define TRACE_REG_GPC_IRQ_TIMER_SELECT_ID3 0x0ac #define TRACE_REG_GPC_IRQ_ENABLE_ID0 0x0b0 #define TRACE_REG_GPC_IRQ_ENABLE_ID1 0x0b4 #define TRACE_REG_GPC_IRQ_ENABLE_ID2 0x0b8 #define TRACE_REG_GPC_IRQ_ENABLE_ID3 0x0bc struct ipu_trace; struct ipu_subsystem_trace_config; enum ipu_trace_block_type { IPU_TRACE_BLOCK_TUN = 0, /* Trace unit */ IPU_TRACE_BLOCK_TM, /* Trace monitor */ IPU_TRACE_BLOCK_GPC, /* General purpose control */ IPU_TRACE_CSI2, /* CSI2 legacy receiver */ IPU_TRACE_CSI2_3PH, /* CSI2 combo receiver */ IPU_TRACE_SIG2CIOS, IPU_TRACE_TIMER_RST, /* Trace reset control timer */ IPU_TRACE_BLOCK_END /* End of list */ }; struct ipu_trace_block { u32 offset; /* Offset to block inside subsystem */ enum ipu_trace_block_type type; }; int ipu_trace_add(struct ipu_device *isp); int ipu_trace_debugfs_add(struct ipu_device *isp, struct dentry *dir); void ipu_trace_release(struct ipu_device *isp); int ipu_trace_init(struct ipu_device *isp, void __iomem *base, struct device *dev, struct ipu_trace_block *blocks); void ipu_trace_restore(struct device *dev); void ipu_trace_uninit(struct device *dev); void ipu_trace_stop(struct device *dev); int ipu_trace_buffer_dma_handle(struct device *dev, dma_addr_t *dma_handle); #endif ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu.c000066400000000000000000000567261471702545500243150ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2013 - 2024 Intel Corporation #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ipu.h" #include "ipu-buttress.h" #include "ipu-platform.h" #include "ipu-platform-buttress-regs.h" #include "ipu-cpd.h" #include "ipu-pdata.h" #include "ipu-bus.h" #include "ipu-mmu.h" #include "ipu-platform-regs.h" #include "ipu-platform-isys-csi2-reg.h" #include "ipu-trace.h" #if IS_ENABLED(CONFIG_IPU_BRIDGE) && \ LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0) #include #elif defined(CONFIG_IPU_ISYS_BRIDGE) #include "cio2-bridge.h" #endif #define IPU_PCI_BAR 0 enum ipu_version ipu_ver; EXPORT_SYMBOL(ipu_ver); #if IS_ENABLED(CONFIG_IPU_BRIDGE) && \ LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0) || \ defined(CONFIG_IPU_ISYS_BRIDGE) static int ipu_isys_check_fwnode_graph(struct fwnode_handle *fwnode) { struct fwnode_handle *endpoint; if (IS_ERR_OR_NULL(fwnode)) return -EINVAL; endpoint = fwnode_graph_get_next_endpoint(fwnode, NULL); if (endpoint) { fwnode_handle_put(endpoint); return 0; } return ipu_isys_check_fwnode_graph(fwnode->secondary); } #endif static struct ipu_bus_device *ipu_isys_init(struct pci_dev *pdev, struct device *parent, struct ipu_buttress_ctrl *ctrl, void __iomem *base, const struct ipu_isys_internal_pdata *ipdata, unsigned int nr) { struct ipu_bus_device *isys; struct ipu_isys_pdata *pdata; int ret; #if IS_ENABLED(CONFIG_IPU_BRIDGE) && \ LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0) || \ defined(CONFIG_IPU_ISYS_BRIDGE) struct fwnode_handle *fwnode = dev_fwnode(&pdev->dev); ret = ipu_isys_check_fwnode_graph(fwnode); if (ret) { if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary)) { dev_err(&pdev->dev, "fwnode graph has no endpoints connection\n"); return ERR_PTR(-EINVAL); } #if defined(CONFIG_IPU_ISYS_BRIDGE) ret = cio2_bridge_init(pdev); if (ret) { dev_err_probe(&pdev->dev, ret, "ipu_isys_bridge_init() failed\n"); return ERR_PTR(ret); } #else ret = ipu_bridge_init(&pdev->dev, ipu_bridge_parse_ssdb); if (ret) { dev_err_probe(&pdev->dev, ret, "IPU bridge init failed\n"); return ERR_PTR(ret); } #endif } #endif pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); if (!pdata) return ERR_PTR(-ENOMEM); pdata->base = base; pdata->ipdata = ipdata; /* Use 250MHz for ipu6 se */ if (ipu_ver == IPU_VER_6SE) ctrl->ratio = IPU6SE_IS_FREQ_CTL_DEFAULT_RATIO; isys = ipu_bus_initialize_device(pdev, parent, pdata, ctrl, IPU_ISYS_NAME, nr); if (IS_ERR(isys)) { dev_err_probe(&pdev->dev, PTR_ERR(isys), "ipu_bus_initialize_device(isys) failed\n"); kfree(pdata); return isys; } isys->mmu = ipu_mmu_init(&pdev->dev, base, ISYS_MMID, &ipdata->hw_variant); if (IS_ERR(isys->mmu)) { dev_err_probe(&pdev->dev, PTR_ERR(isys->mmu), "ipu_mmu_init(isys->mmu) failed\n"); put_device(&isys->dev); return ERR_CAST(isys->mmu); } isys->mmu->dev = &isys->dev; ret = ipu_bus_add_device(isys); if (ret) return ERR_PTR(ret); return isys; } static struct ipu_bus_device *ipu_psys_init(struct pci_dev *pdev, struct device *parent, struct ipu_buttress_ctrl *ctrl, void __iomem *base, const struct ipu_psys_internal_pdata *ipdata, unsigned int nr) { struct ipu_bus_device *psys; struct ipu_psys_pdata *pdata; int ret; pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); if (!pdata) return ERR_PTR(-ENOMEM); pdata->base = base; pdata->ipdata = ipdata; psys = ipu_bus_initialize_device(pdev, parent, pdata, ctrl, IPU_PSYS_NAME, nr); if (IS_ERR(psys)) { dev_err_probe(&pdev->dev, PTR_ERR(psys), "ipu_bus_initialize_device(psys) failed\n"); kfree(pdata); return psys; } psys->mmu = ipu_mmu_init(&pdev->dev, base, PSYS_MMID, &ipdata->hw_variant); if (IS_ERR(psys->mmu)) { dev_err_probe(&pdev->dev, PTR_ERR(psys->mmu), "ipu_mmu_init(psys->mmu) failed\n"); put_device(&psys->dev); return ERR_CAST(psys->mmu); } psys->mmu->dev = &psys->dev; ret = ipu_bus_add_device(psys); if (ret) return ERR_PTR(ret); return psys; } int ipu_fw_authenticate(void *data, u64 val) { struct ipu_device *isp = data; int ret; if (!isp->secure_mode) return -EINVAL; ret = ipu_buttress_reset_authentication(isp); if (ret) { dev_err(&isp->pdev->dev, "Failed to reset authentication!\n"); return ret; } ret = pm_runtime_get_sync(&isp->psys->dev); if (ret < 0) { dev_err(&isp->pdev->dev, "Runtime PM failed (%d)\n", ret); return ret; } ret = ipu_buttress_authenticate(isp); if (ret) { dev_err(&isp->pdev->dev, "FW authentication failed\n"); return ret; } pm_runtime_put(&isp->psys->dev); return 0; } EXPORT_SYMBOL(ipu_fw_authenticate); DEFINE_SIMPLE_ATTRIBUTE(authenticate_fops, NULL, ipu_fw_authenticate, "%llu\n"); #ifdef CONFIG_DEBUG_FS static int resume_ipu_bus_device(struct ipu_bus_device *adev) { struct device *dev = &adev->dev; const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; if (!pm || !pm->resume) return -EIO; return pm->resume(dev); } static int suspend_ipu_bus_device(struct ipu_bus_device *adev) { struct device *dev = &adev->dev; const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; if (!pm || !pm->suspend) return -EIO; return pm->suspend(dev); } static int force_suspend_get(void *data, u64 *val) { struct ipu_device *isp = data; struct ipu_buttress *b = &isp->buttress; *val = b->force_suspend; return 0; } static int force_suspend_set(void *data, u64 val) { struct ipu_device *isp = data; struct ipu_buttress *b = &isp->buttress; int ret = 0; if (val == b->force_suspend) return 0; if (val) { b->force_suspend = 1; ret = suspend_ipu_bus_device(isp->psys); if (ret) { dev_err(&isp->pdev->dev, "Failed to suspend psys\n"); return ret; } ret = suspend_ipu_bus_device(isp->isys); if (ret) { dev_err(&isp->pdev->dev, "Failed to suspend isys\n"); return ret; } ret = pci_set_power_state(isp->pdev, PCI_D3hot); if (ret) { dev_err(&isp->pdev->dev, "Failed to suspend IUnit PCI device\n"); return ret; } } else { ret = pci_set_power_state(isp->pdev, PCI_D0); if (ret) { dev_err(&isp->pdev->dev, "Failed to suspend IUnit PCI device\n"); return ret; } ret = resume_ipu_bus_device(isp->isys); if (ret) { dev_err(&isp->pdev->dev, "Failed to resume isys\n"); return ret; } ret = resume_ipu_bus_device(isp->psys); if (ret) { dev_err(&isp->pdev->dev, "Failed to resume psys\n"); return ret; } b->force_suspend = 0; } return 0; } DEFINE_SIMPLE_ATTRIBUTE(force_suspend_fops, force_suspend_get, force_suspend_set, "%llu\n"); /* * The sysfs interface for reloading cpd fw is there only for debug purpose, * and it must not be used when either isys or psys is in use. */ static int cpd_fw_reload(void *data, u64 val) { struct ipu_device *isp = data; int rval = -EINVAL; if (isp->cpd_fw_reload) rval = isp->cpd_fw_reload(isp); return rval; } DEFINE_SIMPLE_ATTRIBUTE(cpd_fw_fops, NULL, cpd_fw_reload, "%llu\n"); static int ipu_init_debugfs(struct ipu_device *isp) { struct dentry *file; struct dentry *dir; dir = debugfs_create_dir(IPU_NAME, NULL); if (!dir) return -ENOMEM; file = debugfs_create_file("force_suspend", 0700, dir, isp, &force_suspend_fops); if (!file) goto err; file = debugfs_create_file("authenticate", 0700, dir, isp, &authenticate_fops); if (!file) goto err; file = debugfs_create_file("cpd_fw_reload", 0700, dir, isp, &cpd_fw_fops); if (!file) goto err; if (ipu_trace_debugfs_add(isp, dir)) goto err; isp->ipu_dir = dir; if (ipu_buttress_debugfs_init(isp)) goto err; return 0; err: debugfs_remove_recursive(dir); return -ENOMEM; } static void ipu_remove_debugfs(struct ipu_device *isp) { /* * Since isys and psys debugfs dir will be created under ipu root dir, * mark its dentry to NULL to avoid duplicate removal. */ debugfs_remove_recursive(isp->ipu_dir); isp->ipu_dir = NULL; } #endif /* CONFIG_DEBUG_FS */ static int ipu_pci_config_setup(struct pci_dev *dev) { u16 pci_command; int rval; pci_read_config_word(dev, PCI_COMMAND, &pci_command); pci_command |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER; pci_write_config_word(dev, PCI_COMMAND, pci_command); /* disable IPU6 PCI ATS on mtl ES2 */ if ((boot_cpu_data.x86_model == 0xac || boot_cpu_data.x86_model == 0xaa) && boot_cpu_data.x86_stepping == 0x2 && pci_ats_supported(dev)) pci_disable_ats(dev); /* no msi pci capability for IPU6EP */ if (ipu_ver == IPU_VER_6EP || ipu_ver == IPU_VER_6EP_MTL) { /* likely do nothing as msi not enabled by default */ pci_disable_msi(dev); return 0; } rval = pci_enable_msi(dev); if (rval) dev_err(&dev->dev, "Failed to enable msi (%d)\n", rval); return rval; } static void ipu_configure_vc_mechanism(struct ipu_device *isp) { u32 val = readl(isp->base + BUTTRESS_REG_BTRS_CTRL); if (IPU_BTRS_ARB_STALL_MODE_VC0 == IPU_BTRS_ARB_MODE_TYPE_STALL) val |= BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0; else val &= ~BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0; if (IPU_BTRS_ARB_STALL_MODE_VC1 == IPU_BTRS_ARB_MODE_TYPE_STALL) val |= BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1; else val &= ~BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1; writel(val, isp->base + BUTTRESS_REG_BTRS_CTRL); } int request_cpd_fw(const struct firmware **firmware_p, const char *name, struct device *device) { const struct firmware *fw; struct firmware *tmp; int ret; ret = request_firmware(&fw, name, device); if (ret) return ret; if (is_vmalloc_addr(fw->data)) { *firmware_p = fw; } else { tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); if (!tmp) { release_firmware(fw); return -ENOMEM; } tmp->size = fw->size; tmp->data = vmalloc(fw->size); if (!tmp->data) { kfree(tmp); release_firmware(fw); return -ENOMEM; } memcpy((void *)tmp->data, fw->data, fw->size); *firmware_p = tmp; release_firmware(fw); } return 0; } EXPORT_SYMBOL(request_cpd_fw); static int ipu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct ipu_device *isp; phys_addr_t phys; void __iomem *const *iomap; void __iomem *isys_base = NULL; void __iomem *psys_base = NULL; struct ipu_buttress_ctrl *isys_ctrl = NULL, *psys_ctrl = NULL; unsigned int dma_mask = IPU_DMA_MASK; struct fwnode_handle *fwnode = dev_fwnode(&pdev->dev); u32 is_es; int rval; u32 val; if (!fwnode || fwnode_property_read_u32(fwnode, "is_es", &is_es)) is_es = 0; isp = devm_kzalloc(&pdev->dev, sizeof(*isp), GFP_KERNEL); if (!isp) return -ENOMEM; isp->pdev = pdev; INIT_LIST_HEAD(&isp->devices); rval = pcim_enable_device(pdev); if (rval) { dev_err(&pdev->dev, "Failed to enable CI ISP device (%d)\n", rval); return rval; } dev_info(&pdev->dev, "Device 0x%x (rev: 0x%x)\n", pdev->device, pdev->revision); phys = pci_resource_start(pdev, IPU_PCI_BAR); rval = pcim_iomap_regions(pdev, 1 << IPU_PCI_BAR, pci_name(pdev)); if (rval) { dev_err(&pdev->dev, "Failed to I/O memory remapping (%d)\n", rval); return rval; } dev_info(&pdev->dev, "physical base address 0x%llx\n", phys); iomap = pcim_iomap_table(pdev); if (!iomap) { dev_err(&pdev->dev, "Failed to iomap table (%d)\n", rval); return -ENODEV; } isp->base = iomap[IPU_PCI_BAR]; dev_info(&pdev->dev, "mapped as: 0x%p\n", isp->base); pci_set_drvdata(pdev, isp); pci_set_master(pdev); switch (id->device) { case IPU6_PCI_ID: ipu_ver = IPU_VER_6; isp->cpd_fw_name = IPU6_FIRMWARE_NAME; isp->cpd_fw_name_new = IPU6_FIRMWARE_NAME_NEW; break; case IPU6SE_PCI_ID: ipu_ver = IPU_VER_6SE; isp->cpd_fw_name = IPU6SE_FIRMWARE_NAME; isp->cpd_fw_name_new = IPU6SE_FIRMWARE_NAME_NEW; break; case IPU6EP_ADL_P_PCI_ID: case IPU6EP_RPL_P_PCI_ID: ipu_ver = IPU_VER_6EP; isp->cpd_fw_name = is_es ? IPU6EPES_FIRMWARE_NAME : IPU6EP_FIRMWARE_NAME; isp->cpd_fw_name_new = is_es ? IPU6EPES_FIRMWARE_NAME_NEW : IPU6EP_FIRMWARE_NAME_NEW; break; case IPU6EP_ADL_N_PCI_ID: ipu_ver = IPU_VER_6EP; isp->cpd_fw_name = IPU6EPADLN_FIRMWARE_NAME; isp->cpd_fw_name_new = IPU6EPADLN_FIRMWARE_NAME_NEW; break; case IPU6EP_MTL_PCI_ID: ipu_ver = IPU_VER_6EP_MTL; isp->cpd_fw_name = is_es ? IPU6EPMTLES_FIRMWARE_NAME : IPU6EPMTL_FIRMWARE_NAME; isp->cpd_fw_name_new = is_es ? IPU6EPMTLES_FIRMWARE_NAME_NEW : IPU6EPMTL_FIRMWARE_NAME_NEW; break; default: WARN(1, "Unsupported IPU device"); return -ENODEV; } ipu_internal_pdata_init(); isys_base = isp->base + isys_ipdata.hw_variant.offset; psys_base = isp->base + psys_ipdata.hw_variant.offset; dev_dbg(&pdev->dev, "isys_base: 0x%lx\n", (unsigned long)isys_base); dev_dbg(&pdev->dev, "psys_base: 0x%lx\n", (unsigned long)psys_base); rval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(dma_mask)); if (rval) { dev_err(&pdev->dev, "Failed to set DMA mask (%d)\n", rval); return rval; } dma_set_max_seg_size(&pdev->dev, UINT_MAX); rval = ipu_pci_config_setup(pdev); if (rval) return rval; rval = devm_request_threaded_irq(&pdev->dev, pdev->irq, ipu_buttress_isr, ipu_buttress_isr_threaded, IRQF_SHARED, IPU_NAME, isp); if (rval) { dev_err(&pdev->dev, "Requesting irq failed(%d)\n", rval); return rval; } rval = ipu_buttress_init(isp); if (rval) return rval; dev_dbg(&pdev->dev, "cpd file name: %s\n", isp->cpd_fw_name); rval = request_cpd_fw(&isp->cpd_fw, isp->cpd_fw_name, &pdev->dev); if (rval == -ENOENT) { /* Try again with new FW path */ dev_dbg(&pdev->dev, "cpd file name: %s\n", isp->cpd_fw_name_new); rval = request_cpd_fw(&isp->cpd_fw, isp->cpd_fw_name_new, &pdev->dev); } if (rval) { dev_err(&isp->pdev->dev, "Requesting signed firmware failed\n"); goto buttress_exit; } rval = ipu_cpd_validate_cpd_file(isp, isp->cpd_fw->data, isp->cpd_fw->size); if (rval) { dev_err(&isp->pdev->dev, "Failed to validate cpd\n"); goto out_ipu_bus_del_devices; } rval = ipu_trace_add(isp); if (rval) dev_err(&pdev->dev, "Trace support not available\n"); /* * NOTE Device hierarchy below is important to ensure proper * runtime suspend and resume order. * Also registration order is important to ensure proper * suspend and resume order during system * suspend. Registration order is as follows: * isys->psys */ isys_ctrl = devm_kzalloc(&pdev->dev, sizeof(*isys_ctrl), GFP_KERNEL); if (!isys_ctrl) { rval = -ENOMEM; goto out_ipu_bus_del_devices; } /* Init butress control with default values based on the HW */ memcpy(isys_ctrl, &isys_buttress_ctrl, sizeof(*isys_ctrl)); isp->isys = ipu_isys_init(pdev, &pdev->dev, isys_ctrl, isys_base, &isys_ipdata, 0); if (IS_ERR(isp->isys)) { rval = PTR_ERR(isp->isys); goto out_ipu_bus_del_devices; } psys_ctrl = devm_kzalloc(&pdev->dev, sizeof(*psys_ctrl), GFP_KERNEL); if (!psys_ctrl) { rval = -ENOMEM; goto out_ipu_bus_del_devices; } /* Init butress control with default values based on the HW */ memcpy(psys_ctrl, &psys_buttress_ctrl, sizeof(*psys_ctrl)); isp->psys = ipu_psys_init(pdev, &isp->isys->dev, psys_ctrl, psys_base, &psys_ipdata, 0); if (IS_ERR(isp->psys)) { rval = PTR_ERR(isp->psys); goto out_ipu_bus_del_devices; } rval = pm_runtime_get_sync(&isp->psys->dev); if (rval < 0) { dev_err(&isp->psys->dev, "Failed to get runtime PM\n"); goto out_ipu_bus_del_devices; } rval = ipu_mmu_hw_init(isp->psys->mmu); if (rval) { dev_err(&isp->pdev->dev, "Failed to set mmu hw\n"); goto out_ipu_bus_del_devices; } rval = ipu_buttress_map_fw_image(isp->psys, isp->cpd_fw, &isp->fw_sgt); if (rval) { dev_err(&isp->pdev->dev, "failed to map fw image\n"); goto out_ipu_bus_del_devices; } isp->pkg_dir = ipu_cpd_create_pkg_dir(isp->psys, isp->cpd_fw->data, sg_dma_address(isp->fw_sgt.sgl), &isp->pkg_dir_dma_addr, &isp->pkg_dir_size); if (!isp->pkg_dir) { rval = -ENOMEM; dev_err(&isp->pdev->dev, "failed to create pkg dir\n"); goto out_ipu_bus_del_devices; } rval = ipu_buttress_authenticate(isp); if (rval) { dev_err(&isp->pdev->dev, "FW authentication failed(%d)\n", rval); goto out_ipu_bus_del_devices; } ipu_mmu_hw_cleanup(isp->psys->mmu); pm_runtime_put(&isp->psys->dev); #ifdef CONFIG_DEBUG_FS rval = ipu_init_debugfs(isp); if (rval) { dev_err(&pdev->dev, "Failed to initialize debugfs"); goto out_ipu_bus_del_devices; } #endif /* Configure the arbitration mechanisms for VC requests */ ipu_configure_vc_mechanism(isp); val = readl(isp->base + BUTTRESS_REG_SKU); dev_info(&pdev->dev, "IPU%u-v%u driver version %d.%d\n", val & 0xf, (val >> 4) & 0x7, IPU_MAJOR_VERSION, IPU_MINOR_VERSION); pm_runtime_put_noidle(&pdev->dev); pm_runtime_allow(&pdev->dev); isp->ipu_bus_ready_to_probe = true; return 0; out_ipu_bus_del_devices: if (isp->pkg_dir) { if (isp->psys) { ipu_cpd_free_pkg_dir(isp->psys, isp->pkg_dir, isp->pkg_dir_dma_addr, isp->pkg_dir_size); ipu_buttress_unmap_fw_image(isp->psys, &isp->fw_sgt); } isp->pkg_dir = NULL; } if (!IS_ERR_OR_NULL(isp->psys) && !IS_ERR_OR_NULL(isp->psys->mmu)) ipu_mmu_cleanup(isp->psys->mmu); if (!IS_ERR_OR_NULL(isp->isys) && !IS_ERR_OR_NULL(isp->isys->mmu)) ipu_mmu_cleanup(isp->isys->mmu); if (!IS_ERR_OR_NULL(isp->psys)) pm_runtime_put(&isp->psys->dev); ipu_bus_del_devices(pdev); release_firmware(isp->cpd_fw); buttress_exit: ipu_buttress_exit(isp); return rval; } static void ipu_pci_remove(struct pci_dev *pdev) { struct ipu_device *isp = pci_get_drvdata(pdev); #ifdef CONFIG_DEBUG_FS ipu_remove_debugfs(isp); #endif ipu_trace_release(isp); ipu_cpd_free_pkg_dir(isp->psys, isp->pkg_dir, isp->pkg_dir_dma_addr, isp->pkg_dir_size); ipu_buttress_unmap_fw_image(isp->psys, &isp->fw_sgt); isp->pkg_dir = NULL; isp->pkg_dir_dma_addr = 0; isp->pkg_dir_size = 0; ipu_mmu_cleanup(isp->psys->mmu); ipu_mmu_cleanup(isp->isys->mmu); ipu_bus_del_devices(pdev); pm_runtime_forbid(&pdev->dev); pm_runtime_get_noresume(&pdev->dev); ipu_buttress_exit(isp); release_firmware(isp->cpd_fw); } #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0) static void ipu_pci_reset_notify(struct pci_dev *pdev, bool prepare) { struct ipu_device *isp = pci_get_drvdata(pdev); if (prepare) { dev_err(&pdev->dev, "FLR prepare\n"); pm_runtime_forbid(&isp->pdev->dev); isp->flr_done = true; return; } ipu_buttress_restore(isp); if (isp->secure_mode) ipu_buttress_reset_authentication(isp); ipu_bus_flr_recovery(); isp->ipc_reinit = true; pm_runtime_allow(&isp->pdev->dev); dev_err(&pdev->dev, "FLR completed\n"); } #else static void ipu_pci_reset_prepare(struct pci_dev *pdev) { struct ipu_device *isp = pci_get_drvdata(pdev); dev_warn(&pdev->dev, "FLR prepare\n"); pm_runtime_forbid(&isp->pdev->dev); isp->flr_done = true; } static void ipu_pci_reset_done(struct pci_dev *pdev) { struct ipu_device *isp = pci_get_drvdata(pdev); ipu_buttress_restore(isp); if (isp->secure_mode) ipu_buttress_reset_authentication(isp); ipu_bus_flr_recovery(); isp->ipc_reinit = true; pm_runtime_allow(&isp->pdev->dev); dev_warn(&pdev->dev, "FLR completed\n"); } #endif #ifdef CONFIG_PM /* * PCI base driver code requires driver to provide these to enable * PCI device level PM state transitions (D0<->D3) */ static int ipu_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct ipu_device *isp = pci_get_drvdata(pdev); isp->flr_done = false; return 0; } static int ipu_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct ipu_device *isp = pci_get_drvdata(pdev); struct ipu_buttress *b = &isp->buttress; int rval; /* Configure the arbitration mechanisms for VC requests */ ipu_configure_vc_mechanism(isp); ipu_buttress_set_secure_mode(isp); isp->secure_mode = ipu_buttress_get_secure_mode(isp); dev_info(dev, "IPU in %s mode\n", isp->secure_mode ? "secure" : "non-secure"); ipu_buttress_restore(isp); rval = ipu_buttress_ipc_reset(isp, &b->cse); if (rval) dev_err(&isp->pdev->dev, "IPC reset protocol failed!\n"); rval = pm_runtime_get_sync(&isp->psys->dev); if (rval < 0) { dev_err(&isp->psys->dev, "Failed to get runtime PM\n"); return 0; } rval = ipu_buttress_authenticate(isp); if (rval) dev_err(&isp->pdev->dev, "FW authentication failed(%d)\n", rval); pm_runtime_put(&isp->psys->dev); return 0; } static int ipu_runtime_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct ipu_device *isp = pci_get_drvdata(pdev); int rval; ipu_configure_vc_mechanism(isp); ipu_buttress_restore(isp); if (isp->ipc_reinit) { struct ipu_buttress *b = &isp->buttress; isp->ipc_reinit = false; rval = ipu_buttress_ipc_reset(isp, &b->cse); if (rval) dev_err(&isp->pdev->dev, "IPC reset protocol failed!\n"); } return 0; } static const struct dev_pm_ops ipu_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(&ipu_suspend, &ipu_resume) SET_RUNTIME_PM_OPS(&ipu_suspend, /* Same as in suspend flow */ &ipu_runtime_resume, NULL) }; #define IPU_PM (&ipu_pm_ops) #else #define IPU_PM NULL #endif static const struct pci_device_id ipu_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6_PCI_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6SE_PCI_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6EP_ADL_P_PCI_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6EP_ADL_N_PCI_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6EP_RPL_P_PCI_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6EP_MTL_PCI_ID)}, {0,} }; MODULE_DEVICE_TABLE(pci, ipu_pci_tbl); static const struct pci_error_handlers pci_err_handlers = { #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0) .reset_notify = ipu_pci_reset_notify, #else .reset_prepare = ipu_pci_reset_prepare, .reset_done = ipu_pci_reset_done, #endif }; static struct pci_driver ipu_pci_driver = { .name = IPU_NAME, .id_table = ipu_pci_tbl, .probe = ipu_pci_probe, .remove = ipu_pci_remove, .driver = { .pm = IPU_PM, }, .err_handler = &pci_err_handlers, }; static int __init ipu_init(void) { int rval = ipu_bus_register(); if (rval) { pr_warn("can't register ipu bus (%d)\n", rval); return rval; } rval = pci_register_driver(&ipu_pci_driver); if (rval) { pr_warn("can't register pci driver (%d)\n", rval); goto out_pci_register_driver; } return 0; out_pci_register_driver: ipu_bus_unregister(); return rval; } static void __exit ipu_exit(void) { pci_unregister_driver(&ipu_pci_driver); ipu_bus_unregister(); } module_init(ipu_init); module_exit(ipu_exit); #if IS_ENABLED(CONFIG_IPU_BRIDGE) && \ LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0) MODULE_IMPORT_NS(INTEL_IPU_BRIDGE); #endif MODULE_AUTHOR("Sakari Ailus "); MODULE_AUTHOR("Jouni Högander "); MODULE_AUTHOR("Antti Laakso "); MODULE_AUTHOR("Samu Onkalo "); MODULE_AUTHOR("Jianxu Zheng "); MODULE_AUTHOR("Tianshu Qiu "); MODULE_AUTHOR("Renwei Wu "); MODULE_AUTHOR("Bingbu Cao "); MODULE_AUTHOR("Yunliang Ding "); MODULE_AUTHOR("Zaikuo Wang "); MODULE_AUTHOR("Leifu Zhao "); MODULE_AUTHOR("Xia Wu "); MODULE_AUTHOR("Kun Jiang "); MODULE_AUTHOR("Intel"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Intel ipu pci driver"); ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu.h000066400000000000000000000062771471702545500243160ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2013 - 2024 Intel Corporation */ #ifndef IPU_H #define IPU_H #include #include #include #include #include "ipu-pdata.h" #include "ipu-bus.h" #include "ipu-buttress.h" #include "ipu-trace.h" #define IPU6_PCI_ID 0x9a19 #define IPU6SE_PCI_ID 0x4e19 #define IPU6EP_ADL_P_PCI_ID 0x465d #define IPU6EP_ADL_N_PCI_ID 0x462e #define IPU6EP_RPL_P_PCI_ID 0xa75d #define IPU6EP_MTL_PCI_ID 0x7d19 enum ipu_version { IPU_VER_INVALID = 0, IPU_VER_6, IPU_VER_6SE, IPU_VER_6EP, IPU_VER_6EP_MTL, }; /* * IPU version definitions to reflect the IPU driver changes. * Both ISYS and PSYS share the same version. */ #define IPU_MAJOR_VERSION 1 #define IPU_MINOR_VERSION 0 #define IPU_DRIVER_VERSION (IPU_MAJOR_VERSION << 16 | IPU_MINOR_VERSION) /* processing system frequency: 25Mhz x ratio, Legal values [8,32] */ #define PS_FREQ_CTL_DEFAULT_RATIO 0x12 /* input system frequency: 1600Mhz / divisor. Legal values [2,8] */ #define IS_FREQ_SOURCE 1600000000 #define IS_FREQ_CTL_DIVISOR 0x4 /* * ISYS DMA can overshoot. For higher resolutions over allocation is one line * but it must be at minimum 1024 bytes. Value could be different in * different versions / generations thus provide it via platform data. */ #define IPU_ISYS_OVERALLOC_MIN 1024 /* * Physical pages in GDA is 128, page size is 2K for IPU6, 1K for others. */ #define IPU_DEVICE_GDA_NR_PAGES 128 /* * Virtualization factor to calculate the available virtual pages. */ #define IPU_DEVICE_GDA_VIRT_FACTOR 32 struct pci_dev; struct list_head; struct firmware; #define NR_OF_MMU_RESOURCES 2 struct ipu_device { struct pci_dev *pdev; struct list_head devices; struct ipu_bus_device *isys; struct ipu_bus_device *psys; struct ipu_buttress buttress; const struct firmware *cpd_fw; const char *cpd_fw_name; const char *cpd_fw_name_new; u64 *pkg_dir; dma_addr_t pkg_dir_dma_addr; unsigned int pkg_dir_size; struct sg_table fw_sgt; void __iomem *base; #ifdef CONFIG_DEBUG_FS struct dentry *ipu_dir; #endif struct ipu_trace *trace; bool flr_done; bool ipc_reinit; bool secure_mode; bool ipu_bus_ready_to_probe; int (*cpd_fw_reload)(struct ipu_device *isp); }; #define IPU_DMA_MASK 39 #define IPU_LIB_CALL_TIMEOUT_MS 2000 #define IPU_PSYS_CMD_TIMEOUT_MS 2000 #define IPU_PSYS_OPEN_TIMEOUT_US 50 #define IPU_PSYS_OPEN_RETRY (10000 / IPU_PSYS_OPEN_TIMEOUT_US) int ipu_fw_authenticate(void *data, u64 val); void ipu_configure_spc(struct ipu_device *isp, const struct ipu_hw_variants *hw_variant, int pkg_dir_idx, void __iomem *base, u64 *pkg_dir, dma_addr_t pkg_dir_dma_addr); int request_cpd_fw(const struct firmware **firmware_p, const char *name, struct device *device); extern enum ipu_version ipu_ver; void ipu_internal_pdata_init(void); #if defined(CONFIG_IPU_ISYS_BRIDGE) int cio2_bridge_init(struct pci_dev *cio2); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) #include /* Helpers for building against various kernel versions */ static inline struct media_pipeline *media_entity_pipeline(struct media_entity *entity) { return entity->pipe; } #endif #endif /* IPU_H */ ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu6/000077500000000000000000000000001471702545500242175ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu6/Kconfig000066400000000000000000000025011471702545500255200ustar00rootroot00000000000000config VIDEO_INTEL_IPU6 tristate "Intel IPU driver" depends on ACPI depends on MEDIA_SUPPORT depends on MEDIA_PCI_SUPPORT depends on X86_64 select IOMMU_API select IOMMU_IOVA select X86_DEV_DMA_OPS if X86 select VIDEOBUF2_DMA_CONTIG select V4L2_FWNODE select PHYS_ADDR_T_64BIT select COMMON_CLK help This is the Intel imaging processing unit, found in Intel SoCs and used for capturing images and video from a camera sensor. To compile this driver, say Y here! It contains 3 modules - intel_ipu6, intel_ipu6_isys and intel_ipu6_psys. config VIDEO_INTEL_IPU_TPG bool "Compile for TPG driver" depends on VIDEO_INTEL_IPU6 help If selected, TPG device nodes would be created. Recommended for driver developers only. If you want to the TPG devices exposed to user as media entity, you must select this option, otherwise no. config IPU_ISYS_BRIDGE bool "Intel IPU driver bridge" default y depends on VIDEO_INTEL_IPU6 help If selected, IPU isys bridge would be enabled. If you want supported sensors can be registered as IPU subdevices, select y here. config IPU_SINGLE_BE_SOC_DEVICE bool "Intel IPU uses only one BE SOC device" default n depends on VIDEO_INTEL_IPU6 help If selected, IPU exports only one BE SOC device. If you don't need multiple camera streaming, select y here. ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu6/Makefile000066400000000000000000000022041471702545500256550ustar00rootroot00000000000000# SPDX-License-Identifier: GPL-2.0 # Copyright (c) 2017 - 2024 Intel Corporation. ifneq ($(EXTERNAL_BUILD), 1) srcpath := $(srctree) endif ccflags-y += -DIPU_TPG_FRAME_SYNC \ -DIPU_ISYS_GPC intel-ipu6-objs += ../ipu.o \ ../ipu-bus.o \ ../ipu-dma.o \ ../ipu-mmu.o \ ../ipu-buttress.o \ ../ipu-trace.o \ ../ipu-cpd.o \ ipu6.o \ ../ipu-fw-com.o ifdef CONFIG_IPU_ISYS_BRIDGE intel-ipu6-objs += ../cio2-bridge.o endif obj-$(CONFIG_VIDEO_INTEL_IPU6) += intel-ipu6.o intel-ipu6-isys-objs += ../ipu-isys.o \ ../ipu-isys-csi2.o \ ipu6-isys.o \ ipu6-isys-phy.o \ ipu6-isys-dwc-phy.o \ ipu6-isys-csi2.o \ ipu6-isys-gpc.o \ ../ipu-isys-csi2-be-soc.o \ ../ipu-fw-isys.o \ ../ipu-isys-video.o \ ../ipu-isys-queue.o \ ../ipu-isys-subdev.o obj-$(CONFIG_VIDEO_INTEL_IPU6) += intel-ipu6-isys.o obj-$(CONFIG_VIDEO_INTEL_IPU6) += psys/ ccflags-y += -I$(srcpath)/$(src)/../../../../../include/ ccflags-y += -I$(srcpath)/$(src)/psys/ ccflags-y += -I$(srcpath)/$(src)/../ ccflags-y += -I$(srcpath)/$(src)/ ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu6/ipu-platform-buttress-regs.h000066400000000000000000000307121471702545500316210ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2020 - 2024 Intel Corporation */ #ifndef IPU_PLATFORM_BUTTRESS_REGS_H #define IPU_PLATFORM_BUTTRESS_REGS_H /* IS_WORKPOINT_REQ */ #define IPU_BUTTRESS_REG_IS_FREQ_CTL 0x34 /* PS_WORKPOINT_REQ */ #define IPU_BUTTRESS_REG_PS_FREQ_CTL 0x38 #define IPU_BUTTRESS_IS_FREQ_RATIO_MASK 0xff #define IPU_BUTTRESS_PS_FREQ_RATIO_MASK 0xff #define IPU_IS_FREQ_MAX 533 #define IPU_IS_FREQ_MIN 200 #define IPU_PS_FREQ_MAX 450 #define IPU_IS_FREQ_RATIO_BASE 25 #define IPU_PS_FREQ_RATIO_BASE 25 #define IPU_BUTTRESS_IS_FREQ_CTL_DIVISOR_MASK 0xff #define IPU_BUTTRESS_PS_FREQ_CTL_DIVISOR_MASK 0xff /* should be tuned for real silicon */ #define IPU_IS_FREQ_CTL_DEFAULT_RATIO 0x08 #define IPU6SE_IS_FREQ_CTL_DEFAULT_RATIO 0x0a #define IPU_PS_FREQ_CTL_DEFAULT_RATIO 0x0d #define IPU_IS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO 0x10 #define IPU_PS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO 0x0708 #define IPU_BUTTRESS_PWR_STATE_IS_PWR_SHIFT 3 #define IPU_BUTTRESS_PWR_STATE_IS_PWR_MASK \ (0x3 << IPU_BUTTRESS_PWR_STATE_IS_PWR_SHIFT) #define IPU_BUTTRESS_PWR_STATE_PS_PWR_SHIFT 6 #define IPU_BUTTRESS_PWR_STATE_PS_PWR_MASK \ (0x3 << IPU_BUTTRESS_PWR_STATE_PS_PWR_SHIFT) #define IPU_BUTTRESS_PWR_STATE_DN_DONE 0x0 #define IPU_BUTTRESS_PWR_STATE_UP_PROCESS 0x1 #define IPU_BUTTRESS_PWR_STATE_DN_PROCESS 0x2 #define IPU_BUTTRESS_PWR_STATE_UP_DONE 0x3 #define IPU_BUTTRESS_REG_FPGA_SUPPORT_0 0x270 #define IPU_BUTTRESS_REG_FPGA_SUPPORT_1 0x274 #define IPU_BUTTRESS_REG_FPGA_SUPPORT_2 0x278 #define IPU_BUTTRESS_REG_FPGA_SUPPORT_3 0x27c #define IPU_BUTTRESS_REG_FPGA_SUPPORT_4 0x280 #define IPU_BUTTRESS_REG_FPGA_SUPPORT_5 0x284 #define IPU_BUTTRESS_REG_FPGA_SUPPORT_6 0x288 #define IPU_BUTTRESS_REG_FPGA_SUPPORT_7 0x28c #define BUTTRESS_REG_WDT 0x8 #define BUTTRESS_REG_BTRS_CTRL 0xc #define BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0 BIT(0) #define BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1 BIT(1) #define BUTTRESS_REG_BTRS_CTRL_REF_CLK_IND GENMASK(9, 8) #define BUTTRESS_REG_FW_RESET_CTL 0x30 #define BUTTRESS_FW_RESET_CTL_START BIT(0) #define BUTTRESS_FW_RESET_CTL_DONE BIT(1) #define BUTTRESS_REG_IS_FREQ_CTL 0x34 #define BUTTRESS_IS_FREQ_CTL_DIVISOR_MASK 0xf #define BUTTRESS_REG_PS_FREQ_CTL 0x38 #define BUTTRESS_PS_FREQ_CTL_RATIO_MASK 0xff #define BUTTRESS_FREQ_CTL_START BIT(31) #define BUTTRESS_FREQ_CTL_START_SHIFT 31 #define BUTTRESS_FREQ_CTL_QOS_FLOOR_SHIFT 8 #define BUTTRESS_FREQ_CTL_ICCMAX_LEVEL (GENMASK(19, 16)) #define BUTTRESS_FREQ_CTL_QOS_FLOOR_MASK (0xff << 8) #define BUTTRESS_REG_PWR_STATE 0x5c #define BUTTRESS_PWR_STATE_IS_PWR_SHIFT 3 #define BUTTRESS_PWR_STATE_IS_PWR_MASK (0x3 << 3) #define BUTTRESS_PWR_STATE_PS_PWR_SHIFT 6 #define BUTTRESS_PWR_STATE_PS_PWR_MASK (0x3 << 6) #define BUTTRESS_PWR_STATE_RESET 0x0 #define BUTTRESS_PWR_STATE_PWR_ON_DONE 0x1 #define BUTTRESS_PWR_STATE_PWR_RDY 0x3 #define BUTTRESS_PWR_STATE_PWR_IDLE 0x4 #define BUTTRESS_PWR_STATE_HH_STATUS_SHIFT 11 #define BUTTRESS_PWR_STATE_HH_STATUS_MASK (0x3 << 11) enum { BUTTRESS_PWR_STATE_HH_STATE_IDLE, BUTTRESS_PWR_STATE_HH_STATE_IN_PRGS, BUTTRESS_PWR_STATE_HH_STATE_DONE, BUTTRESS_PWR_STATE_HH_STATE_ERR, }; #define BUTTRESS_PWR_STATE_IS_PWR_FSM_SHIFT 19 #define BUTTRESS_PWR_STATE_IS_PWR_FSM_MASK (0xf << 19) #define BUTTRESS_PWR_STATE_IS_PWR_FSM_IDLE 0x0 #define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_PLL_CMP 0x1 #define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_CLKACK 0x2 #define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_PG_ACK 0x3 #define BUTTRESS_PWR_STATE_IS_PWR_FSM_RST_ASSRT_CYCLES 0x4 #define BUTTRESS_PWR_STATE_IS_PWR_FSM_STOP_CLK_CYCLES1 0x5 #define BUTTRESS_PWR_STATE_IS_PWR_FSM_STOP_CLK_CYCLES2 0x6 #define BUTTRESS_PWR_STATE_IS_PWR_FSM_RST_DEASSRT_CYCLES 0x7 #define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_FUSE_WR_CMP 0x8 #define BUTTRESS_PWR_STATE_IS_PWR_FSM_BRK_POINT 0x9 #define BUTTRESS_PWR_STATE_IS_PWR_FSM_IS_RDY 0xa #define BUTTRESS_PWR_STATE_IS_PWR_FSM_HALT_HALTED 0xb #define BUTTRESS_PWR_STATE_IS_PWR_FSM_RST_DURATION_CNT3 0xc #define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_CLKACK_PD 0xd #define BUTTRESS_PWR_STATE_IS_PWR_FSM_PD_BRK_POINT 0xe #define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_PD_PG_ACK0 0xf #define BUTTRESS_PWR_STATE_PS_PWR_FSM_SHIFT 24 #define BUTTRESS_PWR_STATE_PS_PWR_FSM_MASK (0x1f << 24) #define BUTTRESS_PWR_STATE_PS_PWR_FSM_IDLE 0x0 #define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_PLL_IP_RDY 0x1 #define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_RO_PRE_CNT_EXH 0x2 #define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_VGI_PWRGOOD 0x3 #define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_RO_POST_CNT_EXH 0x4 #define BUTTRESS_PWR_STATE_PS_PWR_FSM_WR_PLL_RATIO 0x5 #define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_PLL_CMP 0x6 #define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_CLKACK 0x7 #define BUTTRESS_PWR_STATE_PS_PWR_FSM_RST_ASSRT_CYCLES 0x8 #define BUTTRESS_PWR_STATE_PS_PWR_FSM_STOP_CLK_CYCLES1 0x9 #define BUTTRESS_PWR_STATE_PS_PWR_FSM_STOP_CLK_CYCLES2 0xa #define BUTTRESS_PWR_STATE_PS_PWR_FSM_RST_DEASSRT_CYCLES 0xb #define BUTTRESS_PWR_STATE_PS_PWR_FSM_PU_BRK_PNT 0xc #define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_FUSE_ACCPT 0xd #define BUTTRESS_PWR_STATE_PS_PWR_FSM_PS_PWR_UP 0xf #define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_4_HALTED 0x10 #define BUTTRESS_PWR_STATE_PS_PWR_FSM_RESET_CNT3 0x11 #define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PD_CLKACK 0x12 #define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PD_OFF_IND 0x13 #define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_DVFS_PH4 0x14 #define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_DVFS_PLL_CMP 0x15 #define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_DVFS_CLKACK 0x16 #define BUTTRESS_REG_SECURITY_CTL 0x300 #define BUTTRESS_REG_SKU 0x314 #define BUTTRESS_REG_SECURITY_TOUCH 0x318 #define BUTTRESS_REG_CAMERA_MASK 0x84 #define BUTTRESS_SECURITY_CTL_FW_SECURE_MODE BIT(16) #define BUTTRESS_SECURITY_CTL_FW_SETUP_SHIFT 0 #define BUTTRESS_SECURITY_CTL_FW_SETUP_MASK 0x1f #define BUTTRESS_SECURITY_CTL_FW_SETUP_DONE 0x1 #define BUTTRESS_SECURITY_CTL_AUTH_DONE 0x2 #define BUTTRESS_SECURITY_CTL_AUTH_FAILED 0x8 #define BUTTRESS_REG_SENSOR_FREQ_CTL 0x16c #define BUTTRESS_SENSOR_FREQ_CTL_OSC_OUT_FREQ_DEFAULT(i) \ (0x1b << ((i) * 10)) #define BUTTRESS_SENSOR_FREQ_CTL_OSC_OUT_FREQ_SHIFT(i) ((i) * 10) #define BUTTRESS_SENSOR_FREQ_CTL_OSC_OUT_FREQ_MASK(i) \ (0x1ff << ((i) * 10)) #define BUTTRESS_SENSOR_CLK_FREQ_6P75MHZ 0x176 #define BUTTRESS_SENSOR_CLK_FREQ_8MHZ 0x164 #define BUTTRESS_SENSOR_CLK_FREQ_9P6MHZ 0x2 #define BUTTRESS_SENSOR_CLK_FREQ_12MHZ 0x1b2 #define BUTTRESS_SENSOR_CLK_FREQ_13P6MHZ 0x1ac #define BUTTRESS_SENSOR_CLK_FREQ_14P4MHZ 0x1cc #define BUTTRESS_SENSOR_CLK_FREQ_15P8MHZ 0x1a6 #define BUTTRESS_SENSOR_CLK_FREQ_16P2MHZ 0xca #define BUTTRESS_SENSOR_CLK_FREQ_17P3MHZ 0x12e #define BUTTRESS_SENSOR_CLK_FREQ_18P6MHZ 0x1c0 #define BUTTRESS_SENSOR_CLK_FREQ_19P2MHZ 0x0 #define BUTTRESS_SENSOR_CLK_FREQ_24MHZ 0xb2 #define BUTTRESS_SENSOR_CLK_FREQ_26MHZ 0xae #define BUTTRESS_SENSOR_CLK_FREQ_27MHZ 0x196 #define BUTTRESS_SENSOR_FREQ_CTL_LJPLL_FB_RATIO_MASK 0xff #define BUTTRESS_SENSOR_FREQ_CTL_SEL_MIPICLK_A_SHIFT 8 #define BUTTRESS_SENSOR_FREQ_CTL_SEL_MIPICLK_A_MASK (0x2 << 8) #define BUTTRESS_SENSOR_FREQ_CTL_SEL_MIPICLK_C_SHIFT 10 #define BUTTRESS_SENSOR_FREQ_CTL_SEL_MIPICLK_C_MASK (0x2 << 10) #define BUTTRESS_SENSOR_FREQ_CTL_LJPLL_FORCE_OFF_SHIFT 12 #define BUTTRESS_SENSOR_FREQ_CTL_LJPLL_REF_RATIO_SHIFT 14 #define BUTTRESS_SENSOR_FREQ_CTL_LJPLL_REF_RATIO_MASK (0x2 << 14) #define BUTTRESS_SENSOR_FREQ_CTL_LJPLL_PVD_RATIO_SHIFT 16 #define BUTTRESS_SENSOR_FREQ_CTL_LJPLL_PVD_RATIO_MASK (0x2 << 16) #define BUTTRESS_SENSOR_FREQ_CTL_LJPLL_OUTPUT_RATIO_SHIFT 18 #define BUTTRESS_SENSOR_FREQ_CTL_LJPLL_OUTPUT_RATIO_MASK (0x2 << 18) #define BUTTRESS_SENSOR_FREQ_CTL_START_SHIFT 31 #define BUTTRESS_REG_SENSOR_CLK_CTL 0x170 /* 0 <= i <= 2 */ #define BUTTRESS_SENSOR_CLK_CTL_OSC_CLK_OUT_EN_SHIFT(i) ((i) * 2) #define BUTTRESS_SENSOR_CLK_CTL_OSC_CLK_OUT_SEL_SHIFT(i) ((i) * 2 + 1) #define BUTTRESS_REG_FW_SOURCE_BASE_LO 0x78 #define BUTTRESS_REG_FW_SOURCE_BASE_HI 0x7C #define BUTTRESS_REG_FW_SOURCE_SIZE 0x80 #define BUTTRESS_REG_ISR_STATUS 0x90 #define BUTTRESS_REG_ISR_ENABLED_STATUS 0x94 #define BUTTRESS_REG_ISR_ENABLE 0x98 #define BUTTRESS_REG_ISR_CLEAR 0x9C #define BUTTRESS_ISR_IS_IRQ BIT(0) #define BUTTRESS_ISR_PS_IRQ BIT(1) #define BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE BIT(2) #define BUTTRESS_ISR_IPC_EXEC_DONE_BY_ISH BIT(3) #define BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING BIT(4) #define BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING BIT(5) #define BUTTRESS_ISR_CSE_CSR_SET BIT(6) #define BUTTRESS_ISR_ISH_CSR_SET BIT(7) #define BUTTRESS_ISR_SPURIOUS_CMP BIT(8) #define BUTTRESS_ISR_WATCHDOG_EXPIRED BIT(9) #define BUTTRESS_ISR_PUNIT_2_IUNIT_IRQ BIT(10) #define BUTTRESS_ISR_SAI_VIOLATION BIT(11) #define BUTTRESS_ISR_HW_ASSERTION BIT(12) #define BUTTRESS_ISR_IS_CORRECTABLE_MEM_ERR BIT(13) #define BUTTRESS_ISR_IS_FATAL_MEM_ERR BIT(14) #define BUTTRESS_ISR_IS_NON_FATAL_MEM_ERR BIT(15) #define BUTTRESS_ISR_PS_CORRECTABLE_MEM_ERR BIT(16) #define BUTTRESS_ISR_PS_FATAL_MEM_ERR BIT(17) #define BUTTRESS_ISR_PS_NON_FATAL_MEM_ERR BIT(18) #define BUTTRESS_ISR_PS_FAST_THROTTLE BIT(19) #define BUTTRESS_ISR_UFI_ERROR BIT(20) #define BUTTRESS_REG_IU2CSEDB0 0x100 #define BUTTRESS_IU2CSEDB0_BUSY BIT(31) #define BUTTRESS_IU2CSEDB0_SHORT_FORMAT_SHIFT 27 #define BUTTRESS_IU2CSEDB0_CLIENT_ID_SHIFT 10 #define BUTTRESS_IU2CSEDB0_IPC_CLIENT_ID_VAL 2 #define BUTTRESS_REG_IU2CSEDATA0 0x104 #define BUTTRESS_IU2CSEDATA0_IPC_BOOT_LOAD 1 #define BUTTRESS_IU2CSEDATA0_IPC_AUTH_RUN 2 #define BUTTRESS_IU2CSEDATA0_IPC_AUTH_REPLACE 3 #define BUTTRESS_IU2CSEDATA0_IPC_UPDATE_SECURE_TOUCH 16 #define BUTTRESS_CSE2IUDATA0_IPC_BOOT_LOAD_DONE 1 #define BUTTRESS_CSE2IUDATA0_IPC_AUTH_RUN_DONE 2 #define BUTTRESS_CSE2IUDATA0_IPC_AUTH_REPLACE_DONE 4 #define BUTTRESS_CSE2IUDATA0_IPC_UPDATE_SECURE_TOUCH_DONE 16 #define BUTTRESS_REG_IU2CSECSR 0x108 #define BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE1 BIT(0) #define BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE2 BIT(1) #define BUTTRESS_IU2CSECSR_IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE BIT(2) #define BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ BIT(3) #define BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID BIT(4) #define BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ BIT(5) #define BUTTRESS_REG_CSE2IUDB0 0x304 #define BUTTRESS_REG_CSE2IUCSR 0x30C #define BUTTRESS_REG_CSE2IUDATA0 0x308 /* 0x20 == NACK, 0xf == unknown command */ #define BUTTRESS_CSE2IUDATA0_IPC_NACK 0xf20 #define BUTTRESS_CSE2IUDATA0_IPC_NACK_MASK 0xffff #define BUTTRESS_REG_ISH2IUCSR 0x50 #define BUTTRESS_REG_ISH2IUDB0 0x54 #define BUTTRESS_REG_ISH2IUDATA0 0x58 #define BUTTRESS_REG_IU2ISHDB0 0x10C #define BUTTRESS_REG_IU2ISHDATA0 0x110 #define BUTTRESS_REG_IU2ISHDATA1 0x114 #define BUTTRESS_REG_IU2ISHCSR 0x118 #define BUTTRESS_REG_ISH_START_DETECT 0x198 #define BUTTRESS_REG_ISH_START_DETECT_MASK 0x19C #define BUTTRESS_REG_FABRIC_CMD 0x88 #define BUTTRESS_FABRIC_CMD_START_TSC_SYNC BIT(0) #define BUTTRESS_FABRIC_CMD_IS_DRAIN BIT(4) #define BUTTRESS_REG_TSW_CTL 0x120 #define BUTTRESS_TSW_CTL_SOFT_RESET BIT(8) #define BUTTRESS_REG_TSC_LO 0x164 #define BUTTRESS_REG_TSC_HI 0x168 #define BUTTRESS_REG_CSI2_PORT_CONFIG_AB 0x200 #define BUTTRESS_CSI2_PORT_CONFIG_AB_MUX_MASK 0x1f #define BUTTRESS_CSI2_PORT_CONFIG_AB_COMBO_SHIFT_B0 16 #define BUTTRESS_REG_PS_FREQ_CAPABILITIES 0xf7498 #define BUTTRESS_PS_FREQ_CAPABILITIES_LAST_RESOLVED_RATIO_SHIFT 24 #define BUTTRESS_PS_FREQ_CAPABILITIES_LAST_RESOLVED_RATIO_MASK (0xff << 24) #define BUTTRESS_PS_FREQ_CAPABILITIES_MAX_RATIO_SHIFT 16 #define BUTTRESS_PS_FREQ_CAPABILITIES_MAX_RATIO_MASK (0xff << 16) #define BUTTRESS_PS_FREQ_CAPABILITIES_EFFICIENT_RATIO_SHIFT 8 #define BUTTRESS_PS_FREQ_CAPABILITIES_EFFICIENT_RATIO_MASK (0xff << 8) #define BUTTRESS_PS_FREQ_CAPABILITIES_MIN_RATIO_SHIFT 0 #define BUTTRESS_PS_FREQ_CAPABILITIES_MIN_RATIO_MASK (0xff) #define BUTTRESS_IRQS (BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING | \ BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE | \ BUTTRESS_ISR_IS_IRQ | \ BUTTRESS_ISR_PS_IRQ) #define IPU6SE_ISYS_PHY_0_BASE 0x10000 /* only use BB0, BB2, BB4, and BB6 on PHY0 */ #define IPU6SE_ISYS_PHY_BB_NUM 4 /* offset from PHY base */ #define PHY_CSI_CFG 0xc0 #define PHY_CSI_RCOMP_CONTROL 0xc8 #define PHY_CSI_BSCAN_EXCLUDE 0xd8 #define PHY_CPHY_DLL_OVRD(x) (0x100 + 0x100 * (x)) #define PHY_DPHY_DLL_OVRD(x) (0x14c + 0x100 * (x)) #define PHY_CPHY_RX_CONTROL1(x) (0x110 + 0x100 * (x)) #define PHY_CPHY_RX_CONTROL2(x) (0x114 + 0x100 * (x)) #define PHY_DPHY_CFG(x) (0x148 + 0x100 * (x)) #define PHY_BB_AFE_CONFIG(x) (0x174 + 0x100 * (x)) #endif /* IPU_PLATFORM_BUTTRESS_REGS_H */ ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu6/ipu-platform-isys-csi2-reg.h000066400000000000000000000254621471702545500314160ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2020 - 2024 Intel Corporation */ #ifndef IPU_PLATFORM_ISYS_CSI2_REG_H #define IPU_PLATFORM_ISYS_CSI2_REG_H #define CSI_REG_BASE 0x220000 #define CSI_REG_BASE_PORT(id) ((id) * 0x1000) #define IPU_CSI_PORT_A_ADDR_OFFSET \ (CSI_REG_BASE + CSI_REG_BASE_PORT(0)) #define IPU_CSI_PORT_B_ADDR_OFFSET \ (CSI_REG_BASE + CSI_REG_BASE_PORT(1)) #define IPU_CSI_PORT_C_ADDR_OFFSET \ (CSI_REG_BASE + CSI_REG_BASE_PORT(2)) #define IPU_CSI_PORT_D_ADDR_OFFSET \ (CSI_REG_BASE + CSI_REG_BASE_PORT(3)) #define IPU_CSI_PORT_E_ADDR_OFFSET \ (CSI_REG_BASE + CSI_REG_BASE_PORT(4)) #define IPU_CSI_PORT_F_ADDR_OFFSET \ (CSI_REG_BASE + CSI_REG_BASE_PORT(5)) #define IPU_CSI_PORT_G_ADDR_OFFSET \ (CSI_REG_BASE + CSI_REG_BASE_PORT(6)) #define IPU_CSI_PORT_H_ADDR_OFFSET \ (CSI_REG_BASE + CSI_REG_BASE_PORT(7)) /* CSI Port Genral Purpose Registers */ #define CSI_REG_PORT_GPREG_SRST 0x0 #define CSI_REG_PORT_GPREG_CSI2_SLV_REG_SRST 0x4 #define CSI_REG_PORT_GPREG_CSI2_PORT_CONTROL 0x8 /* * Port IRQs mapping events: * IRQ0 - CSI_FE event * IRQ1 - CSI_SYNC * IRQ2 - S2M_SIDS0TO7 * IRQ3 - S2M_SIDS8TO15 */ #define CSI_PORT_REG_BASE_IRQ_CSI 0x80 #define CSI_PORT_REG_BASE_IRQ_CSI_SYNC 0xA0 #define CSI_PORT_REG_BASE_IRQ_S2M_SIDS0TOS7 0xC0 #define CSI_PORT_REG_BASE_IRQ_S2M_SIDS8TOS15 0xE0 #define CSI_PORT_REG_BASE_IRQ_EDGE_OFFSET 0x0 #define CSI_PORT_REG_BASE_IRQ_MASK_OFFSET 0x4 #define CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET 0x8 #define CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET 0xc #define CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET 0x10 #define CSI_PORT_REG_BASE_IRQ_LEVEL_NOT_PULSE_OFFSET 0x14 #define IPU6SE_CSI_RX_ERROR_IRQ_MASK 0x7ffff #define IPU6_CSI_RX_ERROR_IRQ_MASK 0xfffff #define CSI_RX_NUM_ERRORS_IN_IRQ 20 #define CSI_RX_NUM_IRQ 32 #define IPU_CSI_RX_IRQ_FS_VC 1 #define IPU_CSI_RX_IRQ_FE_VC 2 /* PPI2CSI */ #define CSI_REG_PPI2CSI_ENABLE 0x200 #define CSI_REG_PPI2CSI_CONFIG_PPI_INTF 0x204 #define PPI_INTF_CONFIG_NOF_ENABLED_DATALANES_SHIFT 3 #define PPI_INTF_CONFIG_RX_AUTO_CLKGATING_SHIFT 5 #define CSI_REG_PPI2CSI_CONFIG_CSI_FEATURE 0x208 enum CSI_PPI2CSI_CTRL { CSI_PPI2CSI_DISABLE = 0, CSI_PPI2CSI_ENABLE = 1, }; /* CSI_FE */ #define CSI_REG_CSI_FE_ENABLE 0x280 #define CSI_REG_CSI_FE_MODE 0x284 #define CSI_REG_CSI_FE_MUX_CTRL 0x288 #define CSI_REG_CSI_FE_SYNC_CNTR_SEL 0x290 enum CSI_FE_ENABLE_TYPE { CSI_FE_DISABLE = 0, CSI_FE_ENABLE = 1, }; enum CSI_FE_MODE_TYPE { CSI_FE_DPHY_MODE = 0, CSI_FE_CPHY_MODE = 1, }; enum CSI_FE_INPUT_SELECTOR { CSI_SENSOR_INPUT = 0, CSI_MIPIGEN_INPUT = 1, }; enum CSI_FE_SYNC_CNTR_SEL_TYPE { CSI_CNTR_SENSOR_LINE_ID = (1 << 0), CSI_CNTR_INT_LINE_PKT_ID = ~CSI_CNTR_SENSOR_LINE_ID, CSI_CNTR_SENSOR_FRAME_ID = (1 << 1), CSI_CNTR_INT_FRAME_PKT_ID = ~CSI_CNTR_SENSOR_FRAME_ID, }; /* MIPI_PKT_GEN */ #define CSI_REG_PIXGEN_COM_BASE_OFFSET 0x300 #define IPU_CSI_PORT_A_PIXGEN_ADDR_OFFSET \ (CSI_REG_BASE + CSI_REG_BASE_PORT(0) + CSI_REG_PIXGEN_COM_BASE_OFFSET) #define IPU_CSI_PORT_B_PIXGEN_ADDR_OFFSET \ (CSI_REG_BASE + CSI_REG_BASE_PORT(1) + CSI_REG_PIXGEN_COM_BASE_OFFSET) #define IPU_CSI_PORT_C_PIXGEN_ADDR_OFFSET \ (CSI_REG_BASE + CSI_REG_BASE_PORT(2) + CSI_REG_PIXGEN_COM_BASE_OFFSET) #define IPU_CSI_PORT_D_PIXGEN_ADDR_OFFSET \ (CSI_REG_BASE + CSI_REG_BASE_PORT(3) + CSI_REG_PIXGEN_COM_BASE_OFFSET) #define IPU_CSI_PORT_E_PIXGEN_ADDR_OFFSET \ (CSI_REG_BASE + CSI_REG_BASE_PORT(4) + CSI_REG_PIXGEN_COM_BASE_OFFSET) #define IPU_CSI_PORT_F_PIXGEN_ADDR_OFFSET \ (CSI_REG_BASE + CSI_REG_BASE_PORT(5) + CSI_REG_PIXGEN_COM_BASE_OFFSET) #define IPU_CSI_PORT_G_PIXGEN_ADDR_OFFSET \ (CSI_REG_BASE + CSI_REG_BASE_PORT(6) + CSI_REG_PIXGEN_COM_BASE_OFFSET) #define IPU_CSI_PORT_H_PIXGEN_ADDR_OFFSET \ (CSI_REG_BASE + CSI_REG_BASE_PORT(7) + CSI_REG_PIXGEN_COM_BASE_OFFSET) #define CSI_REG_PIXGEN_COM_ENABLE_REG_IDX(id) \ (CSI_REG_BASE_PORT(id) + 0x300) #define CSI_REG_PIXGEN_COM_DTYPE_REG_IDX(id) \ (CSI_REG_BASE_PORT(id) + 0x304) #define CSI_REG_PIXGEN_COM_VTYPE_REG_IDX(id) \ (CSI_REG_BASE_PORT(id) + 0x308) #define CSI_REG_PIXGEN_COM_VCHAN_REG_IDX(id) \ (CSI_REG_BASE_PORT(id) + 0x30C) #define CSI_REG_PIXGEN_COM_WCOUNT_REG_IDX(id) \ (CSI_REG_BASE_PORT(id) + 0x310) /* PRBS */ #define CSI_REG_PIXGEN_PRBS_RSTVAL_REG0_IDX(id) \ (CSI_REG_BASE_PORT(id) + 0x314) #define CSI_REG_PIXGEN_PRBS_RSTVAL_REG1_IDX(id) \ (CSI_REG_BASE_PORT(id) + 0x318) /* SYNC_GENERATOR_CONFIG */ #define CSI_REG_PIXGEN_SYNG_FREE_RUN_REG_IDX(id) \ (CSI_REG_BASE_PORT(id) + 0x31C) #define CSI_REG_PIXGEN_SYNG_PAUSE_REG_IDX(id) \ (CSI_REG_BASE_PORT(id) + 0x320) #define CSI_REG_PIXGEN_SYNG_NOF_FRAME_REG_IDX(id) \ (CSI_REG_BASE_PORT(id) + 0x324) #define CSI_REG_PIXGEN_SYNG_NOF_PIXEL_REG_IDX(id) \ (CSI_REG_BASE_PORT(id) + 0x328) #define CSI_REG_PIXGEN_SYNG_NOF_LINE_REG_IDX(id) \ (CSI_REG_BASE_PORT(id) + 0x32C) #define CSI_REG_PIXGEN_SYNG_HBLANK_CYC_REG_IDX(id) \ (CSI_REG_BASE_PORT(id) + 0x330) #define CSI_REG_PIXGEN_SYNG_VBLANK_CYC_REG_IDX(id) \ (CSI_REG_BASE_PORT(id) + 0x334) #define CSI_REG_PIXGEN_SYNG_STAT_HCNT_REG_IDX(id) \ (CSI_REG_BASE_PORT(id) + 0x338) #define CSI_REG_PIXGEN_SYNG_STAT_VCNT_REG_IDX(id) \ (CSI_REG_BASE_PORT(id) + 0x33C) #define CSI_REG_PIXGEN_SYNG_STAT_FCNT_REG_IDX(id) \ (CSI_REG_BASE_PORT(id) + 0x340) #define CSI_REG_PIXGEN_SYNG_STAT_DONE_REG_IDX(id) \ (CSI_REG_BASE_PORT(id) + 0x344) /* TPG */ #define CSI_REG_PIXGEN_TPG_MODE_REG_IDX(id) \ (CSI_REG_BASE_PORT(id) + 0x348) /* TPG: mask_cfg */ #define CSI_REG_PIXGEN_TPG_HCNT_MASK_REG_IDX(id) \ (CSI_REG_BASE_PORT(id) + 0x34C) #define CSI_REG_PIXGEN_TPG_VCNT_MASK_REG_IDX(id) \ (CSI_REG_BASE_PORT(id) + 0x350) #define CSI_REG_PIXGEN_TPG_XYCNT_MASK_REG_IDX(id) \ (CSI_REG_BASE_PORT(id) + 0x354) /* TPG: delta_cfg */ #define CSI_REG_PIXGEN_TPG_HCNT_DELTA_REG_IDX(id) \ (CSI_REG_BASE_PORT(id) + 0x358) #define CSI_REG_PIXGEN_TPG_VCNT_DELTA_REG_IDX(id) \ (CSI_REG_BASE_PORT(id) + 0x35C) /* TPG: color_cfg */ #define CSI_REG_PIXGEN_TPG_R1_REG_IDX(id) (CSI_REG_BASE_PORT(id) + 0x360) #define CSI_REG_PIXGEN_TPG_G1_REG_IDX(id) (CSI_REG_BASE_PORT(id) + 0x364) #define CSI_REG_PIXGEN_TPG_B1_REG_IDX(id) (CSI_REG_BASE_PORT(id) + 0x368) #define CSI_REG_PIXGEN_TPG_R2_REG_IDX(id) (CSI_REG_BASE_PORT(id) + 0x36C) #define CSI_REG_PIXGEN_TPG_G2_REG_IDX(id) (CSI_REG_BASE_PORT(id) + 0x370) #define CSI_REG_PIXGEN_TPG_B2_REG_IDX(id) (CSI_REG_BASE_PORT(id) + 0x374) #define CSI_REG_PIXGEN_PRBS_RSTVAL_REG0 CSI_REG_PIXGEN_PRBS_RSTVAL_REG0_IDX(0) #define CSI_REG_PIXGEN_PRBS_RSTVAL_REG1 CSI_REG_PIXGEN_PRBS_RSTVAL_REG1_IDX(0) #define CSI_REG_PIXGEN_COM_ENABLE_REG CSI_REG_PIXGEN_COM_ENABLE_REG_IDX(0) #define CSI_REG_PIXGEN_TPG_MODE_REG CSI_REG_PIXGEN_TPG_MODE_REG_IDX(0) #define CSI_REG_PIXGEN_TPG_R1_REG CSI_REG_PIXGEN_TPG_R1_REG_IDX(0) #define CSI_REG_PIXGEN_TPG_G1_REG CSI_REG_PIXGEN_TPG_G1_REG_IDX(0) #define CSI_REG_PIXGEN_TPG_B1_REG CSI_REG_PIXGEN_TPG_B1_REG_IDX(0) #define CSI_REG_PIXGEN_TPG_R2_REG CSI_REG_PIXGEN_TPG_R2_REG_IDX(0) #define CSI_REG_PIXGEN_TPG_G2_REG CSI_REG_PIXGEN_TPG_G2_REG_IDX(0) #define CSI_REG_PIXGEN_TPG_B2_REG CSI_REG_PIXGEN_TPG_B2_REG_IDX(0) #define CSI_REG_PIXGEN_TPG_HCNT_MASK_REG CSI_REG_PIXGEN_TPG_HCNT_MASK_REG_IDX(0) #define CSI_REG_PIXGEN_TPG_VCNT_MASK_REG CSI_REG_PIXGEN_TPG_VCNT_MASK_REG_IDX(0) #define CSI_REG_PIXGEN_TPG_XYCNT_MASK_REG \ CSI_REG_PIXGEN_TPG_XYCNT_MASK_REG_IDX(0) #define CSI_REG_PIXGEN_SYNG_NOF_FRAME_REG \ CSI_REG_PIXGEN_SYNG_NOF_FRAME_REG_IDX(0) #define CSI_REG_PIXGEN_SYNG_NOF_LINE_REG \ CSI_REG_PIXGEN_SYNG_NOF_LINE_REG_IDX(0) #define CSI_REG_PIXGEN_SYNG_HBLANK_CYC_REG \ CSI_REG_PIXGEN_SYNG_HBLANK_CYC_REG_IDX(0) #define CSI_REG_PIXGEN_SYNG_VBLANK_CYC_REG \ CSI_REG_PIXGEN_SYNG_VBLANK_CYC_REG_IDX(0) #define CSI_REG_PIXGEN_SYNG_NOF_PIXEL_REG \ CSI_REG_PIXGEN_SYNG_NOF_PIXEL_REG_IDX(0) #define CSI_REG_PIXGEN_COM_WCOUNT_REG CSI_REG_PIXGEN_COM_WCOUNT_REG_IDX(0) #define CSI_REG_PIXGEN_COM_DTYPE_REG CSI_REG_PIXGEN_COM_DTYPE_REG_IDX(0) #define CSI_REG_PIXGEN_COM_VTYPE_REG CSI_REG_PIXGEN_COM_VTYPE_REG_IDX(0) #define CSI_REG_PIXGEN_COM_VCHAN_REG CSI_REG_PIXGEN_COM_VCHAN_REG_IDX(0) #define CSI_REG_PIXGEN_TPG_HCNT_DELTA_REG \ CSI_REG_PIXGEN_TPG_HCNT_DELTA_REG_IDX(0) #define CSI_REG_PIXGEN_TPG_VCNT_DELTA_REG \ CSI_REG_PIXGEN_TPG_VCNT_DELTA_REG_IDX(0) /* CSI HUB General Purpose Registers */ #define CSI_REG_HUB_GPREG_SRST (CSI_REG_BASE + 0x18000) #define CSI_REG_HUB_GPREG_SLV_REG_SRST (CSI_REG_BASE + 0x18004) #define CSI_REG_HUB_GPREG_PHY_CONTROL(id) \ (CSI_REG_BASE + 0x18008 + (id) * 0x8) #define CSI_REG_HUB_GPREG_PHY_CONTROL_RESET BIT(4) #define CSI_REG_HUB_GPREG_PHY_CONTROL_PWR_EN BIT(0) #define CSI_REG_HUB_GPREG_PHY_STATUS(id) \ (CSI_REG_BASE + 0x1800c + (id) * 0x8) #define CSI_REG_HUB_GPREG_PHY_STATUS_POWER_ACK BIT(0) #define CSI_REG_HUB_GPREG_PHY_STATUS_PHY_READY BIT(4) #define CSI_REG_HUB_DRV_ACCESS_PORT(id) (CSI_REG_BASE + 0x18018 + (id) * 4) #define CSI_REG_HUB_FW_ACCESS_PORT(id) (CSI_REG_BASE + 0x17000 + (id) * 4) #define IPU6V6_CSI_REG_HUB_FW_ACCESS_PORT(id) \ (CSI_REG_BASE + 0x16000 + (id) * 4) enum CSI_PORT_CLK_GATING_SWITCH { CSI_PORT_CLK_GATING_OFF = 0, CSI_PORT_CLK_GATING_ON = 1, }; #define CSI_REG_BASE_HUB_IRQ 0x18200 #define IPU_NUM_OF_DLANE_REG_PORT0 2 #define IPU_NUM_OF_DLANE_REG_PORT1 4 #define IPU_NUM_OF_DLANE_REG_PORT2 4 #define IPU_NUM_OF_DLANE_REG_PORT3 2 #define IPU_NUM_OF_DLANE_REG_PORT4 2 #define IPU_NUM_OF_DLANE_REG_PORT5 4 #define IPU_NUM_OF_DLANE_REG_PORT6 4 #define IPU_NUM_OF_DLANE_REG_PORT7 2 /* ipu6se support 2 SIPs, 2 port per SIP, 4 ports 0..3 * sip0 - 0, 1 * sip1 - 2, 3 * 0 and 2 support 4 data lanes, 1 and 3 support 2 data lanes * all offset are base from isys base address */ #define CSI2_HUB_GPREG_SIP_SRST(sip) (0x238038 + (sip) * 4) #define CSI2_HUB_GPREG_SIP_FB_PORT_CFG(sip) (0x238050 + (sip) * 4) #define CSI2_HUB_GPREG_DPHY_TIMER_INCR (0x238040) #define CSI2_HUB_GPREG_HPLL_FREQ (0x238044) #define CSI2_HUB_GPREG_IS_CLK_RATIO (0x238048) #define CSI2_HUB_GPREG_HPLL_FREQ_ISCLK_RATE_OVERRIDE (0x23804c) #define CSI2_HUB_GPREG_PORT_CLKGATING_DISABLE (0x238058) #define CSI2_HUB_GPREG_SIP0_CSI_RX_A_CONTROL (0x23805c) #define CSI2_HUB_GPREG_SIP0_CSI_RX_B_CONTROL (0x238088) #define CSI2_HUB_GPREG_SIP1_CSI_RX_A_CONTROL (0x2380a4) #define CSI2_HUB_GPREG_SIP1_CSI_RX_B_CONTROL (0x2380d0) #define CSI2_SIP_TOP_CSI_RX_BASE(sip) (0x23805c + (sip) * 0x48) #define CSI2_SIP_TOP_CSI_RX_PORT_BASE_0(port) (0x23805c + ((port) / 2) * 0x48) #define CSI2_SIP_TOP_CSI_RX_PORT_BASE_1(port) (0x238088 + ((port) / 2) * 0x48) /* offset from port base */ #define CSI2_SIP_TOP_CSI_RX_PORT_CONTROL (0x0) #define CSI2_SIP_TOP_CSI_RX_DLY_CNT_TERMEN_CLANE (0x4) #define CSI2_SIP_TOP_CSI_RX_DLY_CNT_SETTLE_CLANE (0x8) #define CSI2_SIP_TOP_CSI_RX_DLY_CNT_TERMEN_DLANE(lane) (0xc + (lane) * 8) #define CSI2_SIP_TOP_CSI_RX_DLY_CNT_SETTLE_DLANE(lane) (0x10 + (lane) * 8) #endif /* IPU6_ISYS_CSI2_REG_H */ ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu6/ipu-platform-isys.h000066400000000000000000000013021471702545500277700ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2020 - 2024 Intel Corporation */ #ifndef IPU_PLATFORM_ISYS_H #define IPU_PLATFORM_ISYS_H #define IPU_ISYS_ENTITY_PREFIX "Intel IPU6" /* * FW support max 16 streams */ #define IPU_ISYS_MAX_STREAMS 16 #define ISYS_UNISPART_IRQS (IPU_ISYS_UNISPART_IRQ_SW | \ IPU_ISYS_UNISPART_IRQ_CSI0 | \ IPU_ISYS_UNISPART_IRQ_CSI1) /* IPU6 ISYS compression alignment */ #define IPU_ISYS_COMPRESSION_LINE_ALIGN 512 #define IPU_ISYS_COMPRESSION_HEIGHT_ALIGN 1 #define IPU_ISYS_COMPRESSION_TILE_SIZE_BYTES 512 #define IPU_ISYS_COMPRESSION_PAGE_ALIGN 0x1000 #define IPU_ISYS_COMPRESSION_TILE_STATUS_BITS 4 #define IPU_ISYS_COMPRESSION_MAX 3 #endif ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu6/ipu-platform-regs.h000066400000000000000000000314331471702545500277510ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2018 - 2024 Intel Corporation */ #ifndef IPU_PLATFORM_REGS_H #define IPU_PLATFORM_REGS_H /* * IPU6 uses uniform address within IPU, therefore all subsystem registers * locates in one signle space starts from 0 but in different sctions with * different addresses, the subsystem offsets are defined to 0 as the * register definition will have the address offset to 0. */ #define IPU_UNIFIED_OFFSET 0 #define IPU_ISYS_IOMMU0_OFFSET 0x2E0000 #define IPU_ISYS_IOMMU1_OFFSET 0x2E0500 #define IPU_ISYS_IOMMUI_OFFSET 0x2E0A00 #define IPU_PSYS_IOMMU0_OFFSET 0x1B0000 #define IPU_PSYS_IOMMU1_OFFSET 0x1B0700 #define IPU_PSYS_IOMMU1R_OFFSET 0x1B0E00 #define IPU_PSYS_IOMMUI_OFFSET 0x1B1500 /* the offset from IOMMU base register */ #define IPU_MMU_L1_STREAM_ID_REG_OFFSET 0x0c #define IPU_MMU_L2_STREAM_ID_REG_OFFSET 0x4c #define IPU_PSYS_MMU1W_L2_STREAM_ID_REG_OFFSET 0x8c #define IPU_MMU_INFO_OFFSET 0x8 #define IPU_ISYS_SPC_OFFSET 0x210000 #define IPU6SE_PSYS_SPC_OFFSET 0x110000 #define IPU6_PSYS_SPC_OFFSET 0x118000 #define IPU_ISYS_DMEM_OFFSET 0x200000 #define IPU_PSYS_DMEM_OFFSET 0x100000 #define IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE 0x238200 #define IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK 0x238204 #define IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS 0x238208 #define IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR 0x23820c #define IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE 0x238210 #define IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE 0x238214 #define IPU_REG_ISYS_CSI_TOP_CTRL1_IRQ_EDGE 0x238220 #define IPU_REG_ISYS_CSI_TOP_CTRL1_IRQ_MASK 0x238224 #define IPU_REG_ISYS_CSI_TOP_CTRL1_IRQ_STATUS 0x238228 #define IPU_REG_ISYS_CSI_TOP_CTRL1_IRQ_CLEAR 0x23822c #define IPU_REG_ISYS_CSI_TOP_CTRL1_IRQ_ENABLE 0x238230 #define IPU_REG_ISYS_CSI_TOP_CTRL1_IRQ_LEVEL_NOT_PULSE 0x238234 /* MTL IPU6V6 irq ctrl0 & ctrl1 */ #define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE 0x238700 #define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK 0x238704 #define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS 0x238708 #define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR 0x23870c #define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE 0x238710 #define IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE 0x238714 #define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_EDGE 0x238720 #define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_MASK 0x238724 #define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_STATUS 0x238728 #define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_CLEAR 0x23872c #define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_ENABLE 0x238730 #define IPU6V6_REG_ISYS_CSI_TOP_CTRL1_IRQ_LEVEL_NOT_PULSE 0x238734 #define IPU_REG_ISYS_UNISPART_IRQ_EDGE 0x27c000 #define IPU_REG_ISYS_UNISPART_IRQ_MASK 0x27c004 #define IPU_REG_ISYS_UNISPART_IRQ_STATUS 0x27c008 #define IPU_REG_ISYS_UNISPART_IRQ_CLEAR 0x27c00c #define IPU_REG_ISYS_UNISPART_IRQ_ENABLE 0x27c010 #define IPU_REG_ISYS_UNISPART_IRQ_LEVEL_NOT_PULSE 0x27c014 #define IPU_REG_ISYS_UNISPART_SW_IRQ_REG 0x27c414 #define IPU_REG_ISYS_UNISPART_SW_IRQ_MUX_REG 0x27c418 #define IPU_ISYS_UNISPART_IRQ_CSI0 BIT(2) #define IPU_ISYS_UNISPART_IRQ_CSI1 BIT(3) #define IPU_ISYS_UNISPART_IRQ_SW BIT(22) #define IPU_REG_ISYS_ISL_TOP_IRQ_EDGE 0x2b0200 #define IPU_REG_ISYS_ISL_TOP_IRQ_MASK 0x2b0204 #define IPU_REG_ISYS_ISL_TOP_IRQ_STATUS 0x2b0208 #define IPU_REG_ISYS_ISL_TOP_IRQ_CLEAR 0x2b020c #define IPU_REG_ISYS_ISL_TOP_IRQ_ENABLE 0x2b0210 #define IPU_REG_ISYS_ISL_TOP_IRQ_LEVEL_NOT_PULSE 0x2b0214 #define IPU_REG_ISYS_CMPR_TOP_IRQ_EDGE 0x2d2100 #define IPU_REG_ISYS_CMPR_TOP_IRQ_MASK 0x2d2104 #define IPU_REG_ISYS_CMPR_TOP_IRQ_STATUS 0x2d2108 #define IPU_REG_ISYS_CMPR_TOP_IRQ_CLEAR 0x2d210c #define IPU_REG_ISYS_CMPR_TOP_IRQ_ENABLE 0x2d2110 #define IPU_REG_ISYS_CMPR_TOP_IRQ_LEVEL_NOT_PULSE 0x2d2114 /* CDC Burst collector thresholds for isys - 3 FIFOs i = 0..2 */ #define IPU_REG_ISYS_CDC_THRESHOLD(i) (0x27c400 + ((i) * 4)) #define IPU_ISYS_CSI_PHY_NUM 2 #define IPU_CSI_IRQ_NUM_PER_PIPE 4 #define IPU6SE_ISYS_CSI_PORT_NUM 4 #define IPU6_ISYS_CSI_PORT_NUM 8 #define IPU_ISYS_CSI_PORT_IRQ(irq_num) (1 << (irq_num)) /* PSYS Info bits*/ #define IPU_REG_PSYS_INFO_SEG_CMEM_MASTER(a) (0x2C + ((a) * 12)) #define IPU_REG_PSYS_INFO_SEG_XMEM_MASTER(a) (0x5C + ((a) * 12)) /* PKG DIR OFFSET in IMR in secure mode */ #define IPU_PKG_DIR_IMR_OFFSET 0x40 #define IPU_ISYS_REG_SPC_STATUS_CTRL 0x0 #define IPU_ISYS_SPC_STATUS_START BIT(1) #define IPU_ISYS_SPC_STATUS_RUN BIT(3) #define IPU_ISYS_SPC_STATUS_READY BIT(5) #define IPU_ISYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE BIT(12) #define IPU_ISYS_SPC_STATUS_ICACHE_PREFETCH BIT(13) #define IPU_PSYS_REG_SPC_STATUS_CTRL 0x0 #define IPU_PSYS_REG_SPC_START_PC 0x4 #define IPU_PSYS_REG_SPC_ICACHE_BASE 0x10 #define IPU_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER 0x14 #define IPU_PSYS_SPC_STATUS_START BIT(1) #define IPU_PSYS_SPC_STATUS_RUN BIT(3) #define IPU_PSYS_SPC_STATUS_READY BIT(5) #define IPU_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE BIT(12) #define IPU_PSYS_SPC_STATUS_ICACHE_PREFETCH BIT(13) #define IPU_PSYS_REG_SPP0_STATUS_CTRL 0x20000 #define IPU_INFO_ENABLE_SNOOP BIT(0) #define IPU_INFO_DEC_FORCE_FLUSH BIT(1) #define IPU_INFO_DEC_PASS_THRU BIT(2) #define IPU_INFO_ZLW BIT(3) #define IPU_INFO_STREAM_ID_SET(id) (((id) & 0x1F) << 4) #define IPU_INFO_REQUEST_DESTINATION_IOSF BIT(9) #define IPU_INFO_IMR_BASE BIT(10) #define IPU_INFO_IMR_DESTINED BIT(11) #define IPU_INFO_REQUEST_DESTINATION_PRIMARY IPU_INFO_REQUEST_DESTINATION_IOSF /* Trace unit related register definitions */ #define TRACE_REG_MAX_ISYS_OFFSET 0xfffff #define TRACE_REG_MAX_PSYS_OFFSET 0xfffff #define IPU_ISYS_OFFSET IPU_ISYS_DMEM_OFFSET #define IPU_PSYS_OFFSET IPU_PSYS_DMEM_OFFSET /* ISYS trace unit registers */ /* Trace unit base offset */ #define IPU_TRACE_REG_IS_TRACE_UNIT_BASE 0x27d000 /* Trace monitors */ #define IPU_TRACE_REG_IS_SP_EVQ_BASE 0x211000 /* GPC blocks */ #define IPU_TRACE_REG_IS_SP_GPC_BASE 0x210800 #define IPU_TRACE_REG_IS_ISL_GPC_BASE 0x2b0a00 #define IPU_TRACE_REG_IS_MMU_GPC_BASE 0x2e0f00 /* each CSI2 port has a dedicated trace monitor, index 0..7 */ #define IPU_TRACE_REG_CSI2_TM_BASE(port) (0x220400 + 0x1000 * (port)) /* Trace timers */ #define IPU_TRACE_REG_IS_GPREG_TRACE_TIMER_RST_N 0x27c410 #define TRACE_REG_GPREG_TRACE_TIMER_RST_OFF BIT(0) /* SIG2CIO */ #define IPU_TRACE_REG_CSI2_PORT_SIG2SIO_GR_BASE(port) \ (0x220e00 + (port) * 0x1000) /* PSYS trace unit registers */ /* Trace unit base offset */ #define IPU_TRACE_REG_PS_TRACE_UNIT_BASE 0x1b4000 /* Trace monitors */ #define IPU_TRACE_REG_PS_SPC_EVQ_BASE 0x119000 #define IPU_TRACE_REG_PS_SPP0_EVQ_BASE 0x139000 /* GPC blocks */ #define IPU_TRACE_REG_PS_SPC_GPC_BASE 0x118800 #define IPU_TRACE_REG_PS_SPP0_GPC_BASE 0x138800 #define IPU_TRACE_REG_PS_MMU_GPC_BASE 0x1b1b00 /* Trace timers */ #define IPU_TRACE_REG_PS_GPREG_TRACE_TIMER_RST_N 0x1aa714 /* * s2m_pixel_soc_pixel_remapping is dedicated for the enableing of the * pixel s2m remp ability.Remap here means that s2m rearange the order * of the pixels in each 4 pixels group. * For examle, mirroring remping means that if input's 4 first pixels * are 1 2 3 4 then in output we should see 4 3 2 1 in this 4 first pixels. * 0xE4 is from s2m MAS document. It means no remaping. */ #define S2M_PIXEL_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING 0xE4 /* * csi_be_soc_pixel_remapping is for the enabling of the csi\mipi be pixel * remapping feature. This remapping is exactly like the stream2mmio remapping. */ #define CSI_BE_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING 0xE4 #define IPU_REG_DMA_TOP_AB_GROUP1_BASE_ADDR 0x1AE000 #define IPU_REG_DMA_TOP_AB_GROUP2_BASE_ADDR 0x1AF000 #define IPU_REG_DMA_TOP_AB_RING_MIN_OFFSET(n) (0x4 + (n) * 0xC) #define IPU_REG_DMA_TOP_AB_RING_MAX_OFFSET(n) (0x8 + (n) * 0xC) #define IPU_REG_DMA_TOP_AB_RING_ACCESS_OFFSET(n) (0xC + (n) * 0xC) enum ipu_device_ab_group1_target_id { IPU_DEVICE_AB_GROUP1_TARGET_ID_R0_SPC_DMEM, IPU_DEVICE_AB_GROUP1_TARGET_ID_R1_SPC_DMEM, IPU_DEVICE_AB_GROUP1_TARGET_ID_R2_SPC_DMEM, IPU_DEVICE_AB_GROUP1_TARGET_ID_R3_SPC_STATUS_REG, IPU_DEVICE_AB_GROUP1_TARGET_ID_R4_SPC_MASTER_BASE_ADDR, IPU_DEVICE_AB_GROUP1_TARGET_ID_R5_SPC_PC_STALL, IPU_DEVICE_AB_GROUP1_TARGET_ID_R6_SPC_EQ, IPU_DEVICE_AB_GROUP1_TARGET_ID_R7_SPC_RESERVED, IPU_DEVICE_AB_GROUP1_TARGET_ID_R8_SPC_RESERVED, IPU_DEVICE_AB_GROUP1_TARGET_ID_R9_SPP0, IPU_DEVICE_AB_GROUP1_TARGET_ID_R10_SPP1, IPU_DEVICE_AB_GROUP1_TARGET_ID_R11_CENTRAL_R1, IPU_DEVICE_AB_GROUP1_TARGET_ID_R12_IRQ, IPU_DEVICE_AB_GROUP1_TARGET_ID_R13_CENTRAL_R2, IPU_DEVICE_AB_GROUP1_TARGET_ID_R14_DMA, IPU_DEVICE_AB_GROUP1_TARGET_ID_R15_DMA, IPU_DEVICE_AB_GROUP1_TARGET_ID_R16_GP, IPU_DEVICE_AB_GROUP1_TARGET_ID_R17_ZLW_INSERTER, IPU_DEVICE_AB_GROUP1_TARGET_ID_R18_AB, }; enum nci_ab_access_mode { NCI_AB_ACCESS_MODE_RW, /* read & write */ NCI_AB_ACCESS_MODE_RO, /* read only */ NCI_AB_ACCESS_MODE_WO, /* write only */ NCI_AB_ACCESS_MODE_NA /* No access at all */ }; /* * 3:0 CSI_PORT.irq_out[3:0] CSI_PORT_CTRL0 IRQ outputs (4bits) * [0] CSI_PORT.IRQ_CTRL0_csi * [1] CSI_PORT.IRQ_CTRL1_csi_sync * [2] CSI_PORT.IRQ_CTRL2_s2m_sids0to7 * [3] CSI_PORT.IRQ_CTRL3_s2m_sids8to15 */ #define IPU_ISYS_UNISPART_IRQ_CSI2(port) \ (0x3 << ((port) * IPU_CSI_IRQ_NUM_PER_PIPE)) /* IRQ-related registers in PSYS */ #define IPU_REG_PSYS_GPDEV_IRQ_EDGE 0x1aa200 #define IPU_REG_PSYS_GPDEV_IRQ_MASK 0x1aa204 #define IPU_REG_PSYS_GPDEV_IRQ_STATUS 0x1aa208 #define IPU_REG_PSYS_GPDEV_IRQ_CLEAR 0x1aa20c #define IPU_REG_PSYS_GPDEV_IRQ_ENABLE 0x1aa210 #define IPU_REG_PSYS_GPDEV_IRQ_LEVEL_NOT_PULSE 0x1aa214 /* There are 8 FW interrupts, n = 0..7 */ #define IPU_PSYS_GPDEV_FWIRQ0 5 #define IPU_PSYS_GPDEV_FWIRQ1 6 #define IPU_PSYS_GPDEV_FWIRQ2 7 #define IPU_PSYS_GPDEV_FWIRQ3 8 #define IPU_PSYS_GPDEV_FWIRQ4 9 #define IPU_PSYS_GPDEV_FWIRQ5 10 #define IPU_PSYS_GPDEV_FWIRQ6 11 #define IPU_PSYS_GPDEV_FWIRQ7 12 #define IPU_PSYS_GPDEV_IRQ_FWIRQ(n) (1 << (n)) #define IPU_REG_PSYS_GPDEV_FWIRQ(n) (4 * (n) + 0x1aa100) /* * ISYS GPC (Gerneral Performance Counters) Registers */ #define IPU_ISYS_GPC_BASE 0x2E0000 #define IPU_ISYS_GPREG_TRACE_TIMER_RST 0x27C410 enum ipu_isf_cdc_mmu_gpc_registers { IPU_ISF_CDC_MMU_GPC_SOFT_RESET = 0x0F00, IPU_ISF_CDC_MMU_GPC_OVERALL_ENABLE = 0x0F04, IPU_ISF_CDC_MMU_GPC_ENABLE0 = 0x0F20, IPU_ISF_CDC_MMU_GPC_VALUE0 = 0x0F60, IPU_ISF_CDC_MMU_GPC_CNT_SEL0 = 0x0FA0, }; /* * GPC (Gerneral Performance Counters) Registers */ #define IPU_GPC_BASE 0x1B0000 #define IPU_GPREG_TRACE_TIMER_RST 0x1AA714 enum ipu_cdc_mmu_gpc_registers { IPU_CDC_MMU_GPC_SOFT_RESET = 0x1B00, IPU_CDC_MMU_GPC_OVERALL_ENABLE = 0x1B04, IPU_CDC_MMU_GPC_TRACE_HEADER = 0x1B08, IPU_CDC_MMU_GPC_TRACE_ADDR = 0x1B0C, IPU_CDC_MMU_GPC_TRACE_ENABLE_NPK = 0x1B10, IPU_CDC_MMU_GPC_TRACE_ENABLE_DDR = 0x1B14, IPU_CDC_MMU_GPC_LP_CLEAR = 0x1B18, IPU_CDC_MMU_GPC_LOST_PACKET = 0x1B1C, IPU_CDC_MMU_GPC_ENABLE0 = 0x1B20, IPU_CDC_MMU_GPC_VALUE0 = 0x1B60, IPU_CDC_MMU_GPC_CNT_SEL0 = 0x1BA0, IPU_CDC_MMU_GPC_START_SEL0 = 0x1BE0, IPU_CDC_MMU_GPC_STOP_SEL0 = 0x1C20, IPU_CDC_MMU_GPC_MSG_SEL0 = 0x1C60, IPU_CDC_MMU_GPC_PLOAD_SEL0 = 0x1CA0, IPU_CDC_MMU_GPC_IRQ_TRIGGER_VALUE0 = 0x1CE0, IPU_CDC_MMU_GPC_IRQ_TIMER_SEL0 = 0x1D20, IPU_CDC_MMU_GPC_IRQ_ENABLE0 = 0x1D60, IPU_CDC_MMU_GPC_END = 0x1D9C }; #define IPU_GPC_SENSE_OFFSET 7 #define IPU_GPC_ROUTE_OFFSET 5 #define IPU_GPC_SOURCE_OFFSET 0 /* * Signals monitored by GPC */ #define IPU_GPC_TRACE_TLB_MISS_MMU_LB_IDX 0 #define IPU_GPC_TRACE_FULL_WRITE_LB_IDX 1 #define IPU_GPC_TRACE_NOFULL_WRITE_LB_IDX 2 #define IPU_GPC_TRACE_FULL_READ_LB_IDX 3 #define IPU_GPC_TRACE_NOFULL_READ_LB_IDX 4 #define IPU_GPC_TRACE_STALL_LB_IDX 5 #define IPU_GPC_TRACE_ZLW_LB_IDX 6 #define IPU_GPC_TRACE_TLB_MISS_MMU_HBTX_IDX 7 #define IPU_GPC_TRACE_FULL_WRITE_HBTX_IDX 8 #define IPU_GPC_TRACE_NOFULL_WRITE_HBTX_IDX 9 #define IPU_GPC_TRACE_FULL_READ_HBTX_IDX 10 #define IPU_GPC_TRACE_STALL_HBTX_IDX 11 #define IPU_GPC_TRACE_ZLW_HBTX_IDX 12 #define IPU_GPC_TRACE_TLB_MISS_MMU_HBFRX_IDX 13 #define IPU_GPC_TRACE_FULL_READ_HBFRX_IDX 14 #define IPU_GPC_TRACE_NOFULL_READ_HBFRX_IDX 15 #define IPU_GPC_TRACE_STALL_HBFRX_IDX 16 #define IPU_GPC_TRACE_ZLW_HBFRX_IDX 17 #define IPU_GPC_TRACE_TLB_MISS_ICACHE_IDX 18 #define IPU_GPC_TRACE_FULL_READ_ICACHE_IDX 19 #define IPU_GPC_TRACE_STALL_ICACHE_IDX 20 /* * psys subdomains power request regs */ enum ipu_device_buttress_psys_domain_pos { IPU_PSYS_SUBDOMAIN_ISA = 0, IPU_PSYS_SUBDOMAIN_PSA = 1, IPU_PSYS_SUBDOMAIN_BB = 2, IPU_PSYS_SUBDOMAIN_IDSP1 = 3, /* only in IPU6M */ IPU_PSYS_SUBDOMAIN_IDSP2 = 4, /* only in IPU6M */ }; #define IPU_PSYS_SUBDOMAINS_POWER_MASK (BIT(IPU_PSYS_SUBDOMAIN_ISA) | \ BIT(IPU_PSYS_SUBDOMAIN_PSA) | \ BIT(IPU_PSYS_SUBDOMAIN_BB)) #define IPU_PSYS_SUBDOMAINS_POWER_REQ 0xa0 #define IPU_PSYS_SUBDOMAINS_POWER_STATUS 0xa4 #endif /* IPU_PLATFORM_REGS_H */ ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu6/ipu-platform.h000066400000000000000000000034211471702545500270070ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2013 - 2024 Intel Corporation */ #ifndef IPU_PLATFORM_H #define IPU_PLATFORM_H #define IPU_NAME "intel-ipu6" #define IPU6SE_FIRMWARE_NAME "intel/ipu6se_fw.bin" #define IPU6EP_FIRMWARE_NAME "intel/ipu6ep_fw.bin" #define IPU6EPES_FIRMWARE_NAME "intel/ipu6epes_fw.bin" #define IPU6_FIRMWARE_NAME "intel/ipu6_fw.bin" #define IPU6EPMTL_FIRMWARE_NAME "intel/ipu6epmtl_fw.bin" #define IPU6EPADLN_FIRMWARE_NAME "intel/ipu6epadln_fw.bin" #define IPU6EPMTLES_FIRMWARE_NAME "intel/ipu6epmtles_fw.bin" #define IPU6SE_FIRMWARE_NAME_NEW "intel/ipu/ipu6se_fw.bin" #define IPU6EP_FIRMWARE_NAME_NEW "intel/ipu/ipu6ep_fw.bin" #define IPU6EPES_FIRMWARE_NAME_NEW "intel/ipu/ipu6epes_fw.bin" #define IPU6_FIRMWARE_NAME_NEW "intel/ipu/ipu6_fw.bin" #define IPU6EPMTL_FIRMWARE_NAME_NEW "intel/ipu/ipu6epmtl_fw.bin" #define IPU6EPADLN_FIRMWARE_NAME_NEW "intel/ipu/ipu6epadln_fw.bin" #define IPU6EPMTLES_FIRMWARE_NAME_NEW "intel/ipu/ipu6epmtles_fw.bin" /* * The following definitions are encoded to the media_device's model field so * that the software components which uses IPU driver can get the hw stepping * information. */ #define IPU_MEDIA_DEV_MODEL_NAME "ipu6-downstream" #define IPU6SE_ISYS_NUM_STREAMS IPU6SE_NONSECURE_STREAM_ID_MAX #define IPU6_ISYS_NUM_STREAMS IPU6_NONSECURE_STREAM_ID_MAX /* declearations, definitions in ipu6.c */ extern struct ipu_isys_internal_pdata isys_ipdata; extern struct ipu_psys_internal_pdata psys_ipdata; extern const struct ipu_buttress_ctrl isys_buttress_ctrl; extern const struct ipu_buttress_ctrl psys_buttress_ctrl; /* definitions in ipu6-isys.c */ extern struct ipu_trace_block isys_trace_blocks[]; /* definitions in ipu6-psys.c */ extern struct ipu_trace_block psys_trace_blocks[]; #endif ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c000066400000000000000000000425241471702545500271000ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2020 - 2024 Intel Corporation #include #include #include #include "ipu.h" #include "ipu-buttress.h" #include "ipu-isys.h" #include "ipu-platform-buttress-regs.h" #include "ipu-platform-regs.h" #include "ipu-platform-isys-csi2-reg.h" #include "ipu6-isys-csi2.h" #include "ipu6-isys-phy.h" #include "ipu6-isys-dwc-phy.h" #include "ipu-isys-csi2.h" struct ipu6_csi2_error { const char *error_string; bool is_info_only; }; struct ipu6_csi_irq_info_map { u32 irq_error_mask; u32 irq_num; unsigned int irq_base; unsigned int irq_base_ctrl2; struct ipu6_csi2_error *errors; }; /* * Strings corresponding to CSI-2 receiver errors are here. * Corresponding macros are defined in the header file. */ static struct ipu6_csi2_error dphy_rx_errors[] = { {"Single packet header error corrected", true}, {"Multiple packet header errors detected", true}, {"Payload checksum (CRC) error", true}, {"Transfer FIFO overflow", false}, {"Reserved short packet data type detected", true}, {"Reserved long packet data type detected", true}, {"Incomplete long packet detected", false}, {"Frame sync error", false}, {"Line sync error", false}, {"DPHY recoverable synchronization error", true}, {"DPHY fatal error", false}, {"DPHY elastic FIFO overflow", false}, {"Inter-frame short packet discarded", true}, {"Inter-frame long packet discarded", true}, {"MIPI pktgen overflow", false}, {"MIPI pktgen data loss", false}, {"FIFO overflow", false}, {"Lane deskew", false}, {"SOT sync error", false}, {"HSIDLE detected", false} }; static refcount_t phy_power_ref_count[IPU_ISYS_CSI_PHY_NUM]; static int ipu6_csi2_phy_power_set(struct ipu_isys *isys, struct ipu_isys_csi2_config *cfg, bool on) { int ret = 0; unsigned int port, phy_id; refcount_t *ref; void __iomem *isys_base = isys->pdata->base; unsigned int nr; port = cfg->port; phy_id = port / 4; ref = &phy_power_ref_count[phy_id]; dev_dbg(&isys->adev->dev, "for phy %d port %d, lanes: %d\n", phy_id, port, cfg->nlanes); nr = (ipu_ver == IPU_VER_6 || ipu_ver == IPU_VER_6EP || ipu_ver == IPU_VER_6EP_MTL) ? IPU6_ISYS_CSI_PORT_NUM : IPU6SE_ISYS_CSI_PORT_NUM; if (!isys_base || port >= nr) { dev_warn(&isys->adev->dev, "invalid port ID %d\n", port); return -EINVAL; } if (on) { if (refcount_read(ref)) { /* already up */ dev_warn(&isys->adev->dev, "for phy %d is already UP", phy_id); refcount_inc(ref); return 0; } ret = ipu6_isys_phy_powerup_ack(isys, phy_id); if (ret) return ret; ipu6_isys_phy_reset(isys, phy_id, 0); ipu6_isys_phy_common_init(isys); ret = ipu6_isys_phy_config(isys); if (ret) return ret; ipu6_isys_phy_reset(isys, phy_id, 1); ret = ipu6_isys_phy_ready(isys, phy_id); if (ret) return ret; refcount_set(ref, 1); return 0; } /* power off process */ if (refcount_dec_and_test(ref)) ret = ipu6_isys_phy_powerdown_ack(isys, phy_id); if (ret) dev_err(&isys->adev->dev, "phy poweroff failed!"); return ret; } static int ipu6_csi2_dwc_phy_power_set(struct ipu_isys *isys, struct ipu_isys_csi2_config *cfg, bool on) { int ret = 0; u32 port; u32 phy_id, primary, secondary; u32 nlanes; u32 mbps; void __iomem *isys_base = isys->pdata->base; u32 nr; s64 link_freq; port = cfg->port; nr = (ipu_ver == IPU_VER_6 || ipu_ver == IPU_VER_6EP || ipu_ver == IPU_VER_6EP_MTL) ? IPU6_ISYS_CSI_PORT_NUM : IPU6SE_ISYS_CSI_PORT_NUM; if (!isys_base || port >= nr) { dev_warn(&isys->adev->dev, "invalid port ID %d\n", port); return -EINVAL; } nlanes = cfg->nlanes; /* only port a/c/e support 4 lanes */ if (nlanes == 4 && port % 2) { dev_err(&isys->adev->dev, "invalid csi-port %u with %u lanes\n", port, nlanes); return -EINVAL; } ret = ipu_isys_csi2_get_link_freq(&isys->csi2[port], &link_freq); if (ret) { dev_err(&isys->adev->dev, "get link freq failed(%d).\n", ret); return ret; } do_div(link_freq, 1000000); mbps = link_freq * 2; phy_id = port; primary = port & ~1; secondary = primary + 1; if (on) { if (nlanes == 4) { dev_dbg(&isys->adev->dev, "config phy %u and %u in aggregation mode", primary, secondary); ipu6_isys_dwc_phy_reset(isys, primary); ipu6_isys_dwc_phy_reset(isys, secondary); ipu6_isys_dwc_phy_aggr_setup(isys, primary, secondary, mbps); ret = ipu6_isys_dwc_phy_config(isys, primary, mbps); if (ret) return ret; ret = ipu6_isys_dwc_phy_config(isys, secondary, mbps); if (ret) return ret; ret = ipu6_isys_dwc_phy_powerup_ack(isys, primary); if (ret) return ret; ret = ipu6_isys_dwc_phy_powerup_ack(isys, secondary); if (ret) return ret; return 0; } dev_dbg(&isys->adev->dev, "config phy %u with %u lanes in non-aggr mode", phy_id, nlanes); ipu6_isys_dwc_phy_reset(isys, phy_id); ret = ipu6_isys_dwc_phy_config(isys, phy_id, mbps); if (ret) return ret; ret = ipu6_isys_dwc_phy_powerup_ack(isys, phy_id); if (ret) return ret; return 0; } if (nlanes == 4) { dev_dbg(&isys->adev->dev, "Powerdown phy %u and phy %u for port %u", primary, secondary, port); ipu6_isys_dwc_phy_reset(isys, secondary); ipu6_isys_dwc_phy_reset(isys, primary); return 0; } dev_dbg(&isys->adev->dev, "Powerdown phy %u with %u lanes", phy_id, nlanes); ipu6_isys_dwc_phy_reset(isys, phy_id); return 0; } static void ipu6_isys_register_errors(struct ipu_isys_csi2 *csi2) { u32 mask = 0; u32 irq = readl(csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET); mask = (ipu_ver == IPU_VER_6 || ipu_ver == IPU_VER_6EP || ipu_ver == IPU_VER_6EP_MTL) ? IPU6_CSI_RX_ERROR_IRQ_MASK : IPU6SE_CSI_RX_ERROR_IRQ_MASK; writel(irq & mask, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET); csi2->receiver_errors |= irq & mask; } void ipu_isys_csi2_error(struct ipu_isys_csi2 *csi2) { struct ipu6_csi2_error *errors; u32 status; unsigned int i; /* Register errors once more in case of error interrupts are disabled */ ipu6_isys_register_errors(csi2); status = csi2->receiver_errors; csi2->receiver_errors = 0; errors = dphy_rx_errors; for (i = 0; i < CSI_RX_NUM_ERRORS_IN_IRQ; i++) { if (status & BIT(i)) dev_err_ratelimited(&csi2->isys->adev->dev, "csi2-%i error: %s\n", csi2->index, errors[i].error_string); } } const unsigned int csi2_port_cfg[][3] = { {0, 0, 0x1f}, /* no link */ {4, 0, 0x10}, /* x4 + x4 config */ {2, 0, 0x12}, /* x2 + x2 config */ {1, 0, 0x13}, /* x1 + x1 config */ {2, 1, 0x15}, /* x2x1 + x2x1 config */ {1, 1, 0x16}, /* x1x1 + x1x1 config */ {2, 2, 0x18}, /* x2x2 + x2x2 config */ {1, 2, 0x19}, /* x1x2 + x1x2 config */ }; const unsigned int phy_port_cfg[][4] = { /* port, nlanes, bbindex, portcfg */ /* sip0 */ {0, 1, 0, 0x15}, {0, 2, 0, 0x15}, {0, 4, 0, 0x15}, {0, 4, 2, 0x22}, /* sip1 */ {2, 1, 4, 0x15}, {2, 2, 4, 0x15}, {2, 4, 4, 0x15}, {2, 4, 6, 0x22}, }; static int ipu_isys_csi2_phy_config_by_port(struct ipu_isys *isys, unsigned int port, unsigned int nlanes) { void __iomem *base = isys->adev->isp->base; u32 val, reg, i; unsigned int bbnum; dev_dbg(&isys->adev->dev, "%s port %u with %u lanes", __func__, port, nlanes); /* hard code for x2x2 + x2x2 with <1.5Gbps */ for (i = 0; i < IPU6SE_ISYS_PHY_BB_NUM; i++) { /* cphy_dll_ovrd.crcdc_fsm_dlane0 = 13 */ reg = IPU6SE_ISYS_PHY_0_BASE + PHY_CPHY_DLL_OVRD(i); val = readl(base + reg); val |= 13 << 1; /* val &= ~0x1; */ writel(val, base + reg); /* cphy_rx_control1.en_crc1 = 1 */ reg = IPU6SE_ISYS_PHY_0_BASE + PHY_CPHY_RX_CONTROL1(i); val = readl(base + reg); val |= 0x1 << 31; writel(val, base + reg); /* dphy_cfg.reserved = 1 * dphy_cfg.lden_from_dll_ovrd_0 = 1 */ reg = IPU6SE_ISYS_PHY_0_BASE + PHY_DPHY_CFG(i); val = readl(base + reg); val |= 0x1 << 25; val |= 0x1 << 26; writel(val, base + reg); /* cphy_dll_ovrd.lden_crcdc_fsm_dlane0 = 1 */ reg = IPU6SE_ISYS_PHY_0_BASE + PHY_CPHY_DLL_OVRD(i); val = readl(base + reg); val |= 1; writel(val, base + reg); } /* bb afe config, use minimal channel loss */ for (i = 0; i < ARRAY_SIZE(phy_port_cfg); i++) { if (phy_port_cfg[i][0] == port && phy_port_cfg[i][1] == nlanes) { bbnum = phy_port_cfg[i][2] / 2; reg = IPU6SE_ISYS_PHY_0_BASE + PHY_BB_AFE_CONFIG(bbnum); val = readl(base + reg); val |= phy_port_cfg[i][3]; writel(val, base + reg); } } return 0; } static void ipu_isys_csi2_rx_control(struct ipu_isys *isys) { void __iomem *base = isys->adev->isp->base; u32 val, reg; /* lp11 release */ reg = CSI2_HUB_GPREG_SIP0_CSI_RX_A_CONTROL; val = readl(base + reg); val |= 0x1; writel(0x1, base + CSI2_HUB_GPREG_SIP0_CSI_RX_A_CONTROL); reg = CSI2_HUB_GPREG_SIP0_CSI_RX_B_CONTROL; val = readl(base + reg); val |= 0x1; writel(0x1, base + CSI2_HUB_GPREG_SIP0_CSI_RX_B_CONTROL); reg = CSI2_HUB_GPREG_SIP1_CSI_RX_A_CONTROL; val = readl(base + reg); val |= 0x1; writel(0x1, base + CSI2_HUB_GPREG_SIP1_CSI_RX_A_CONTROL); reg = CSI2_HUB_GPREG_SIP1_CSI_RX_B_CONTROL; val = readl(base + reg); val |= 0x1; writel(0x1, base + CSI2_HUB_GPREG_SIP1_CSI_RX_B_CONTROL); } static int ipu_isys_csi2_set_port_cfg(struct v4l2_subdev *sd, unsigned int port, unsigned int nlanes) { struct ipu_isys_csi2 *csi2 = to_ipu_isys_csi2(sd); struct ipu_isys *isys = csi2->isys; unsigned int sip = port / 2; unsigned int index; switch (nlanes) { case 1: index = 5; break; case 2: index = 6; break; case 4: index = 1; break; default: dev_err(&isys->adev->dev, "lanes nr %u is unsupported\n", nlanes); return -EINVAL; } dev_dbg(&isys->adev->dev, "port config for port %u with %u lanes\n", port, nlanes); writel(csi2_port_cfg[index][2], isys->pdata->base + CSI2_HUB_GPREG_SIP_FB_PORT_CFG(sip)); return 0; } static void ipu_isys_csi2_set_timing(struct v4l2_subdev *sd, struct ipu_isys_csi2_timing timing, unsigned int port, unsigned int nlanes) { u32 port_base; void __iomem *reg; struct ipu_isys_csi2 *csi2 = to_ipu_isys_csi2(sd); struct ipu_isys *isys = csi2->isys; unsigned int i; port_base = (port % 2) ? CSI2_SIP_TOP_CSI_RX_PORT_BASE_1(port) : CSI2_SIP_TOP_CSI_RX_PORT_BASE_0(port); dev_dbg(&isys->adev->dev, "set timing for port %u base 0x%x with %u lanes\n", port, port_base, nlanes); reg = isys->pdata->base + port_base; reg += CSI2_SIP_TOP_CSI_RX_DLY_CNT_TERMEN_CLANE; writel(timing.ctermen, reg); reg = isys->pdata->base + port_base; reg += CSI2_SIP_TOP_CSI_RX_DLY_CNT_SETTLE_CLANE; writel(timing.csettle, reg); for (i = 0; i < nlanes; i++) { reg = isys->pdata->base + port_base; reg += CSI2_SIP_TOP_CSI_RX_DLY_CNT_TERMEN_DLANE(i); writel(timing.dtermen, reg); reg = isys->pdata->base + port_base; reg += CSI2_SIP_TOP_CSI_RX_DLY_CNT_SETTLE_DLANE(i); writel(timing.dsettle, reg); } } int ipu_isys_csi2_set_stream(struct v4l2_subdev *sd, struct ipu_isys_csi2_timing timing, unsigned int nlanes, int enable) { struct ipu_isys_csi2 *csi2 = to_ipu_isys_csi2(sd); struct ipu_isys *isys = csi2->isys; struct media_pipeline *mp = media_entity_pipeline(&sd->entity); struct ipu_isys_pipeline *ip = container_of(mp, struct ipu_isys_pipeline, pipe); struct ipu_isys_csi2_config *cfg = v4l2_get_subdev_hostdata(media_entity_to_v4l2_subdev (ip->external->entity)); unsigned int port, port_max; int ret = 0; u32 mask = 0; unsigned int i; port = cfg->port; dev_dbg(&isys->adev->dev, "for port %u with %u lanes\n", port, nlanes); mask = (ipu_ver == IPU_VER_6 || ipu_ver == IPU_VER_6EP || ipu_ver == IPU_VER_6EP_MTL) ? IPU6_CSI_RX_ERROR_IRQ_MASK : IPU6SE_CSI_RX_ERROR_IRQ_MASK; if (!enable) { writel(0, csi2->base + CSI_REG_CSI_FE_ENABLE); writel(0, csi2->base + CSI_REG_PPI2CSI_ENABLE); /* Disable interrupts */ writel(0, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET); writel(mask, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET); writel(0, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET); writel(0xffffffff, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET); /* power down phy */ if (ipu_ver == IPU_VER_6EP_MTL) ret = ipu6_csi2_dwc_phy_power_set(isys, cfg, false); if (ipu_ver == IPU_VER_6 || ipu_ver == IPU_VER_6EP) ret = ipu6_csi2_phy_power_set(isys, cfg, false); /* Disable clock */ writel(0, isys->pdata->base + CSI_REG_HUB_FW_ACCESS_PORT(port)); writel(0, isys->pdata->base + CSI_REG_HUB_DRV_ACCESS_PORT(port)); return ret; } /* reset port reset */ writel(0x1, csi2->base + CSI_REG_PORT_GPREG_SRST); usleep_range(100, 200); writel(0x0, csi2->base + CSI_REG_PORT_GPREG_SRST); /* We need enable clock for all ports for MTL */ port_max = (ipu_ver == IPU_VER_6 || ipu_ver == IPU_VER_6EP || ipu_ver == IPU_VER_6EP_MTL) ? IPU6_ISYS_CSI_PORT_NUM : IPU6SE_ISYS_CSI_PORT_NUM; /* Enable port clock */ for (i = 0; i < port_max; i++) { writel(1, isys->pdata->base + CSI_REG_HUB_DRV_ACCESS_PORT(i)); if (ipu_ver == IPU_VER_6EP_MTL) writel(1, isys->pdata->base + IPU6V6_CSI_REG_HUB_FW_ACCESS_PORT(i)); else writel(1, isys->pdata->base + CSI_REG_HUB_FW_ACCESS_PORT(i)); } /* enable all error related irq */ writel(mask, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET); writel(mask, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + CSI_PORT_REG_BASE_IRQ_MASK_OFFSET); writel(mask, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET); writel(mask, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + CSI_PORT_REG_BASE_IRQ_LEVEL_NOT_PULSE_OFFSET); writel(mask, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI + CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET); /* To save CPU wakeups, disable CSI SOF/EOF irq */ writel(0xffffffff, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET); writel(0, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + CSI_PORT_REG_BASE_IRQ_MASK_OFFSET); writel(0xffffffff, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET); writel(0, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + CSI_PORT_REG_BASE_IRQ_LEVEL_NOT_PULSE_OFFSET); writel(0xffffffff, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET); /* Configure FE/PPI2CSI and enable FE/ PPI2CSI */ writel(0, csi2->base + CSI_REG_CSI_FE_MODE); writel(CSI_SENSOR_INPUT, csi2->base + CSI_REG_CSI_FE_MUX_CTRL); writel(CSI_CNTR_SENSOR_LINE_ID | CSI_CNTR_SENSOR_FRAME_ID, csi2->base + CSI_REG_CSI_FE_SYNC_CNTR_SEL); writel(((nlanes - 1) << PPI_INTF_CONFIG_NOF_ENABLED_DATALANES_SHIFT) | (0 << PPI_INTF_CONFIG_RX_AUTO_CLKGATING_SHIFT), csi2->base + CSI_REG_PPI2CSI_CONFIG_PPI_INTF); writel(0x06, csi2->base + CSI_REG_PPI2CSI_CONFIG_CSI_FEATURE); writel(1, csi2->base + CSI_REG_PPI2CSI_ENABLE); writel(1, csi2->base + CSI_REG_CSI_FE_ENABLE); if (ipu_ver == IPU_VER_6 || ipu_ver == IPU_VER_6EP) { /* Enable DPHY power */ ret = ipu6_csi2_phy_power_set(isys, cfg, true); if (ret) { dev_err(&isys->adev->dev, "CSI-%d PHY power up failed %d\n", cfg->port, ret); return ret; } } else if (ipu_ver == IPU_VER_6EP_MTL) { /* Enable DWC DPHY power */ ret = ipu6_csi2_dwc_phy_power_set(isys, cfg, true); if (ret) { dev_err(&isys->adev->dev, "CSI-%d DWC-PHY power up failed %d\n", cfg->port, ret); return ret; } } else if (ipu_ver == IPU_VER_6SE) { ipu_isys_csi2_phy_config_by_port(isys, port, nlanes); /* 9'b00010.1000 for 400Mhz isys freqency */ writel(0x28, isys->pdata->base + CSI2_HUB_GPREG_DPHY_TIMER_INCR); /* set port cfg and rx timing */ ipu_isys_csi2_set_timing(sd, timing, port, nlanes); ret = ipu_isys_csi2_set_port_cfg(sd, port, nlanes); if (ret) return ret; ipu_isys_csi2_rx_control(isys); } return 0; } void ipu_isys_csi2_isr(struct ipu_isys_csi2 *csi2) { u32 status; ipu6_isys_register_errors(csi2); status = readl(csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET); writel(status, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC + CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET); if (status & IPU_CSI_RX_IRQ_FS_VC) ipu_isys_csi2_sof_event(csi2); if (status & IPU_CSI_RX_IRQ_FE_VC) ipu_isys_csi2_eof_event(csi2); } unsigned int ipu_isys_csi2_get_current_field(struct ipu_isys_pipeline *ip, unsigned int *timestamp) { struct ipu_isys_video *av = container_of(ip, struct ipu_isys_video, ip); struct ipu_isys *isys = av->isys; unsigned int field = V4L2_FIELD_TOP; struct ipu_isys_buffer *short_packet_ib = list_last_entry(&ip->short_packet_active, struct ipu_isys_buffer, head); struct ipu_isys_private_buffer *pb = ipu_isys_buffer_to_private_buffer(short_packet_ib); struct ipu_isys_mipi_packet_header *ph = (struct ipu_isys_mipi_packet_header *) pb->buffer; /* Check if the first SOF packet is received. */ if ((ph->dtype & IPU_ISYS_SHORT_PACKET_DTYPE_MASK) != 0) dev_warn(&isys->adev->dev, "First short packet is not SOF.\n"); field = (ph->word_count % 2) ? V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM; dev_dbg(&isys->adev->dev, "Interlaced field ready. frame_num = %d field = %d\n", ph->word_count, field); return field; } ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h000066400000000000000000000004741471702545500271030ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2020 - 2024 Intel Corporation */ #ifndef IPU6_ISYS_CSI2_H #define IPU6_ISYS_CSI2_H struct ipu_isys_csi2_timing; struct ipu_isys_csi2; struct ipu_isys_pipeline; struct v4l2_subdev; #define IPU_ISYS_SHORT_PACKET_DTYPE_MASK 0x3f #endif /* IPU6_ISYS_CSI2_H */ ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu6/ipu6-isys-dwc-phy.c000066400000000000000000000332641471702545500276140ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2013 - 2024 Intel Corporation */ #include #include #include #include #include "ipu.h" #include "ipu-buttress.h" #include "ipu-isys.h" #include "ipu-isys-csi2.h" #include "ipu-platform-regs.h" #include "ipu-platform-isys-csi2-reg.h" #include "ipu6-isys-csi2.h" #include "ipu6-isys-dwc-phy.h" #define IPU_DWC_DPHY_MAX_NUM (6) #define IPU_DWC_DPHY_BASE(i) (0x238038 + 0x34 * (i)) #define IPU_DWC_DPHY_RSTZ (0x00) #define IPU_DWC_DPHY_SHUTDOWNZ (0x04) #define IPU_DWC_DPHY_HSFREQRANGE (0x08) #define IPU_DWC_DPHY_CFGCLKFREQRANGE (0x0c) #define IPU_DWC_DPHY_TEST_IFC_ACCESS_MODE (0x10) #define IPU_DWC_DPHY_TEST_IFC_REQ (0x14) #define IPU_DWC_DPHY_TEST_IFC_REQ_COMPLETION (0x18) #define IPU_DWC_DPHY_TEST_IFC_CTRL0 (0x1c) #define IPU_DWC_DPHY_TEST_IFC_CTRL1 (0x20) #define IPU_DWC_DPHY_TEST_IFC_CTRL1_RO (0x24) #define IPU_DWC_DPHY_DFT_CTRL0 (0x28) #define IPU_DWC_DPHY_DFT_CTRL1 (0x2c) #define IPU_DWC_DPHY_DFT_CTRL2 (0x30) #define PPI_DATAWIDTH_8BIT 0 #define PPI_DATAWIDTH_16BIT 1 /* * test IFC request definition: * - req: 0 for read, 1 for write * - 12 bits address * - 8bits data (will ignore for read) * --24----16------4-----0 * --|-data-|-addr-|-req-| */ #define IFC_REQ(req, addr, data) ((data) << 16 | (addr) << 4 | (req)) enum req_type { TEST_IFC_REQ_READ = 0, TEST_IFC_REQ_WRITE = 1, TEST_IFC_REQ_RESET = 2, }; enum access_mode { TEST_IFC_ACCESS_MODE_FSM = 0, /* backup mode for DFT/workaround etc */ TEST_IFC_ACCESS_MODE_IFC_CTL = 1, }; enum phy_fsm_state { PHY_FSM_STATE_POWERON = 0, PHY_FSM_STATE_BGPON = 1, PHY_FSM_STATE_CAL_TYPE = 2, PHY_FSM_STATE_BURNIN_CAL = 3, PHY_FSM_STATE_TERMCAL = 4, PHY_FSM_STATE_OFFSETCAL = 5, PHY_FSM_STATE_OFFSET_LANE = 6, PHY_FSM_STATE_IDLE = 7, PHY_FSM_STATE_ULP = 8, PHY_FSM_STATE_DDLTUNNING = 9, PHY_FSM_STATE_SKEW_BACKWARD = 10, PHY_FSM_STATE_INVALID, }; static void dwc_dphy_write(struct ipu_isys *isys, u32 phy_id, u32 addr, u32 data) { void __iomem *isys_base = isys->pdata->base; void __iomem *base = isys_base + IPU_DWC_DPHY_BASE(phy_id); dev_dbg(&isys->adev->dev, "write: reg 0x%lx = data 0x%x", base + addr - isys_base, data); writel(data, base + addr); } static u32 dwc_dphy_read(struct ipu_isys *isys, u32 phy_id, u32 addr) { u32 data; void __iomem *isys_base = isys->pdata->base; void __iomem *base = isys_base + IPU_DWC_DPHY_BASE(phy_id); data = readl(base + addr); dev_dbg(&isys->adev->dev, "read: reg 0x%lx = data 0x%x", base + addr - isys_base, data); return data; } static void dwc_dphy_write_mask(struct ipu_isys *isys, u32 phy_id, u32 addr, u32 data, u8 shift, u8 width) { u32 temp; u32 mask; mask = (1 << width) - 1; temp = dwc_dphy_read(isys, phy_id, addr); temp &= ~(mask << shift); temp |= (data & mask) << shift; dwc_dphy_write(isys, phy_id, addr, temp); } static u32 __maybe_unused dwc_dphy_read_mask(struct ipu_isys *isys, u32 phy_id, u32 addr, u8 shift, u8 width) { return (dwc_dphy_read(isys, phy_id, addr) >> shift) & ((1 << width) - 1); } #define DWC_DPHY_TIMEOUT (5000000) static int dwc_dphy_ifc_read(struct ipu_isys *isys, u32 phy_id, u32 addr, u32 *val) { int rval; u32 completion; void __iomem *isys_base = isys->pdata->base; void __iomem *base = isys_base + IPU_DWC_DPHY_BASE(phy_id); void __iomem *reg; u32 timeout = DWC_DPHY_TIMEOUT; dwc_dphy_write(isys, phy_id, IPU_DWC_DPHY_TEST_IFC_REQ, IFC_REQ(TEST_IFC_REQ_READ, addr, 0)); reg = base + IPU_DWC_DPHY_TEST_IFC_REQ_COMPLETION; rval = readl_poll_timeout(reg, completion, !(completion & BIT(0)), 10, timeout); if (rval) { dev_err(&isys->adev->dev, "%s: ifc request read timeout!", __func__); return rval; } *val = completion >> 8 & 0xff; dev_dbg(&isys->adev->dev, "ifc read 0x%x = 0x%x", addr, *val); return 0; } static int dwc_dphy_ifc_write(struct ipu_isys *isys, u32 phy_id, u32 addr, u32 data) { int rval; u32 completion; void __iomem *reg; void __iomem *isys_base = isys->pdata->base; void __iomem *base = isys_base + IPU_DWC_DPHY_BASE(phy_id); u32 timeout = DWC_DPHY_TIMEOUT; dwc_dphy_write(isys, phy_id, IPU_DWC_DPHY_TEST_IFC_REQ, IFC_REQ(TEST_IFC_REQ_WRITE, addr, data)); completion = readl(base + IPU_DWC_DPHY_TEST_IFC_REQ_COMPLETION); reg = base + IPU_DWC_DPHY_TEST_IFC_REQ_COMPLETION; rval = readl_poll_timeout(reg, completion, !(completion & BIT(0)), 10, timeout); if (rval) { dev_err(&isys->adev->dev, "%s: ifc request write timeout", __func__); return rval; } return 0; } static void dwc_dphy_ifc_write_mask(struct ipu_isys *isys, u32 phy_id, u32 addr, u32 data, u8 shift, u8 width) { int rval; u32 temp, mask; rval = dwc_dphy_ifc_read(isys, phy_id, addr, &temp); if (rval) { dev_err(&isys->adev->dev, "dphy proxy read failed with %d", rval); return; } mask = (1 << width) - 1; temp &= ~(mask << shift); temp |= (data & mask) << shift; rval = dwc_dphy_ifc_write(isys, phy_id, addr, temp); if (rval) dev_err(&isys->adev->dev, "dphy proxy write failed(%d)", rval); } static u32 dwc_dphy_ifc_read_mask(struct ipu_isys *isys, u32 phy_id, u32 addr, u8 shift, u8 width) { int rval; u32 val; rval = dwc_dphy_ifc_read(isys, phy_id, addr, &val); if (rval) { dev_err(&isys->adev->dev, "dphy proxy read failed with %d", rval); return 0; } return ((val >> shift) & ((1 << width) - 1)); } static int dwc_dphy_pwr_up(struct ipu_isys *isys, u32 phy_id) { u32 fsm_state; #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 7, 0) ktime_t __timeout = ktime_add_us(ktime_get(), DWC_DPHY_TIMEOUT); #else int ret; u32 timeout = DWC_DPHY_TIMEOUT; #endif dwc_dphy_write(isys, phy_id, IPU_DWC_DPHY_RSTZ, 1); usleep_range(10, 20); dwc_dphy_write(isys, phy_id, IPU_DWC_DPHY_SHUTDOWNZ, 1); #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 7, 0) for (;;) { fsm_state = dwc_dphy_ifc_read_mask(isys, phy_id, 0x1e, 0, 4); if (fsm_state == PHY_FSM_STATE_IDLE || fsm_state == PHY_FSM_STATE_ULP) break; if (ktime_compare(ktime_get(), __timeout) > 0) { fsm_state = dwc_dphy_ifc_read_mask(isys, phy_id, 0x1e, 0, 4); break; } usleep_range(50, 100); } if (fsm_state != PHY_FSM_STATE_IDLE && fsm_state != PHY_FSM_STATE_ULP) { dev_err(&isys->adev->dev, "DPHY%d power up failed, state 0x%x", phy_id, fsm_state); return -ETIMEDOUT; } #else ret = read_poll_timeout(dwc_dphy_ifc_read_mask, fsm_state, (fsm_state == PHY_FSM_STATE_IDLE || fsm_state == PHY_FSM_STATE_ULP), 100, timeout, false, isys, phy_id, 0x1e, 0, 4); if (ret) { dev_err(&isys->adev->dev, "DPHY%d power up failed, state 0x%x", phy_id, fsm_state); return ret; } #endif return 0; } struct dwc_dphy_freq_range { u8 hsfreq; u32 min; u32 max; u32 default_mbps; u32 osc_freq_target; }; #define DPHY_FREQ_RANGE_NUM (63) #define DPHY_FREQ_RANGE_INVALID_INDEX (0xff) const struct dwc_dphy_freq_range freqranges[DPHY_FREQ_RANGE_NUM] = { {0x00, 80, 97, 80, 335}, {0x10, 80, 107, 90, 335}, {0x20, 84, 118, 100, 335}, {0x30, 93, 128, 110, 335}, {0x01, 103, 139, 120, 335}, {0x11, 112, 149, 130, 335}, {0x21, 122, 160, 140, 335}, {0x31, 131, 170, 150, 335}, {0x02, 141, 181, 160, 335}, {0x12, 150, 191, 170, 335}, {0x22, 160, 202, 180, 335}, {0x32, 169, 212, 190, 335}, {0x03, 183, 228, 205, 335}, {0x13, 198, 244, 220, 335}, {0x23, 212, 259, 235, 335}, {0x33, 226, 275, 250, 335}, {0x04, 250, 301, 275, 335}, {0x14, 274, 328, 300, 335}, {0x25, 297, 354, 325, 335}, {0x35, 321, 380, 350, 335}, {0x05, 369, 433, 400, 335}, {0x16, 416, 485, 450, 335}, {0x26, 464, 538, 500, 335}, {0x37, 511, 590, 550, 335}, {0x07, 559, 643, 600, 335}, {0x18, 606, 695, 650, 335}, {0x28, 654, 748, 700, 335}, {0x39, 701, 800, 750, 335}, {0x09, 749, 853, 800, 335}, {0x19, 796, 905, 850, 335}, {0x29, 844, 958, 900, 335}, {0x3a, 891, 1010, 950, 335}, {0x0a, 939, 1063, 1000, 335}, {0x1a, 986, 1115, 1050, 335}, {0x2a, 1034, 1168, 1100, 335}, {0x3b, 1081, 1220, 1150, 335}, {0x0b, 1129, 1273, 1200, 335}, {0x1b, 1176, 1325, 1250, 335}, {0x2b, 1224, 1378, 1300, 335}, {0x3c, 1271, 1430, 1350, 335}, {0x0c, 1319, 1483, 1400, 335}, {0x1c, 1366, 1535, 1450, 335}, {0x2c, 1414, 1588, 1500, 335}, {0x3d, 1461, 1640, 1550, 208}, {0x0d, 1509, 1693, 1600, 214}, {0x1d, 1556, 1745, 1650, 221}, {0x2e, 1604, 1798, 1700, 228}, {0x3e, 1651, 1850, 1750, 234}, {0x0e, 1699, 1903, 1800, 241}, {0x1e, 1746, 1955, 1850, 248}, {0x2f, 1794, 2008, 1900, 255}, {0x3f, 1841, 2060, 1950, 261}, {0x0f, 1889, 2113, 2000, 268}, {0x40, 1936, 2165, 2050, 275}, {0x41, 1984, 2218, 2100, 281}, {0x42, 2031, 2270, 2150, 288}, {0x43, 2079, 2323, 2200, 294}, {0x44, 2126, 2375, 2250, 302}, {0x45, 2174, 2428, 2300, 308}, {0x46, 2221, 2480, 2350, 315}, {0x47, 2269, 2500, 2400, 321}, {0x48, 2316, 2500, 2450, 328}, {0x49, 2364, 2500, 2500, 335}, }; static u32 get_hsfreq_by_mbps(u32 mbps) { int i; for (i = DPHY_FREQ_RANGE_NUM - 1; i >= 0; i--) { if (freqranges[i].default_mbps == mbps || (mbps >= freqranges[i].min && mbps <= freqranges[i].max)) return i; } return DPHY_FREQ_RANGE_INVALID_INDEX; } int ipu6_isys_dwc_phy_config(struct ipu_isys *isys, u32 phy_id, u32 mbps) { u32 index; u32 osc_freq_target; u32 cfg_clk_freqrange; struct ipu_bus_device *adev = to_ipu_bus_device(&isys->adev->dev); struct ipu_device *isp = adev->isp; dev_dbg(&isys->adev->dev, "config phy %u with %u mbps", phy_id, mbps); index = get_hsfreq_by_mbps(mbps); if (index == DPHY_FREQ_RANGE_INVALID_INDEX) { dev_err(&isys->adev->dev, "link freq not found for mbps %u", mbps); return -EINVAL; } dwc_dphy_write_mask(isys, phy_id, IPU_DWC_DPHY_HSFREQRANGE, freqranges[index].hsfreq, 0, 7); /* Force termination Calibration */ if (isys->phy_termcal_val) { dwc_dphy_ifc_write_mask(isys, phy_id, 0x20a, 0x1, 0, 1); dwc_dphy_ifc_write_mask(isys, phy_id, 0x209, 0x3, 0, 2); dwc_dphy_ifc_write_mask(isys, phy_id, 0x209, isys->phy_termcal_val, 4, 4); } /* * Enable override to configure the DDL target oscillation * frequency on bit 0 of register 0xe4 */ dwc_dphy_ifc_write_mask(isys, phy_id, 0xe4, 0x1, 0, 1); /* * configure registers 0xe2, 0xe3 with the * appropriate DDL target oscillation frequency * 0x1cc(460) */ osc_freq_target = freqranges[index].osc_freq_target; dwc_dphy_ifc_write_mask(isys, phy_id, 0xe2, osc_freq_target & 0xff, 0, 8); dwc_dphy_ifc_write_mask(isys, phy_id, 0xe3, (osc_freq_target >> 8) & 0xf, 0, 4); if (mbps < 1500) { /* deskew_polarity_rw, for < 1.5Gbps */ dwc_dphy_ifc_write_mask(isys, phy_id, 0x8, 0x1, 5, 1); } /* * Set cfgclkfreqrange[5:0] = round[(Fcfg_clk(MHz)-17)*4] * (38.4 - 17) * 4 = ~85 (0x55) */ cfg_clk_freqrange = (isp->buttress.ref_clk - 170) * 4 / 10; dev_dbg(&isys->adev->dev, "ref_clk = %u clf_freqrange = %u", isp->buttress.ref_clk, cfg_clk_freqrange); dwc_dphy_write_mask(isys, phy_id, IPU_DWC_DPHY_CFGCLKFREQRANGE, cfg_clk_freqrange, 0, 8); /* * run without external reference resistor for 2Gbps * dwc_dphy_ifc_write_mask(isys, phy_id, 0x4, 0x0, 4, 1); */ dwc_dphy_write_mask(isys, phy_id, IPU_DWC_DPHY_DFT_CTRL2, 0x1, 4, 1); dwc_dphy_write_mask(isys, phy_id, IPU_DWC_DPHY_DFT_CTRL2, 0x1, 8, 1); return 0; } void ipu6_isys_dwc_phy_aggr_setup(struct ipu_isys *isys, u32 master, u32 slave, u32 mbps) { /* Config mastermacro */ dwc_dphy_ifc_write_mask(isys, master, 0x133, 0x1, 0, 1); dwc_dphy_ifc_write_mask(isys, slave, 0x133, 0x0, 0, 1); /* Config master PHY clk lane to drive long channel clk */ dwc_dphy_ifc_write_mask(isys, master, 0x307, 0x1, 2, 1); dwc_dphy_ifc_write_mask(isys, slave, 0x307, 0x0, 2, 1); /* Config both PHYs data lanes to get clk from long channel */ dwc_dphy_ifc_write_mask(isys, master, 0x508, 0x1, 5, 1); dwc_dphy_ifc_write_mask(isys, slave, 0x508, 0x1, 5, 1); dwc_dphy_ifc_write_mask(isys, master, 0x708, 0x1, 5, 1); dwc_dphy_ifc_write_mask(isys, slave, 0x708, 0x1, 5, 1); /* Config slave PHY clk lane to bypass long channel clk to DDR clk */ dwc_dphy_ifc_write_mask(isys, master, 0x308, 0x0, 3, 1); dwc_dphy_ifc_write_mask(isys, slave, 0x308, 0x1, 3, 1); /* Override slave PHY clk lane enable (DPHYRXCLK_CLL_demux module) */ dwc_dphy_ifc_write_mask(isys, slave, 0xe0, 0x3, 0, 2); /* Override slave PHY DDR clk lane enable (DPHYHSRX_div124 module) */ dwc_dphy_ifc_write_mask(isys, slave, 0xe1, 0x1, 1, 1); dwc_dphy_ifc_write_mask(isys, slave, 0x307, 0x1, 3, 1); /* Turn off slave PHY LP-RX clk lane */ dwc_dphy_ifc_write_mask(isys, slave, 0x304, 0x1, 7, 1); dwc_dphy_ifc_write_mask(isys, slave, 0x305, 0xa, 0, 5); } #define PHY_E (4) int ipu6_isys_dwc_phy_powerup_ack(struct ipu_isys *isys, u32 phy_id) { int rval; u32 rescal_done; rval = dwc_dphy_pwr_up(isys, phy_id); if (rval != 0) { dev_err(&isys->adev->dev, "dphy%u power up failed(%d)", phy_id, rval); return rval; } /* reset forcerxmode */ dwc_dphy_write_mask(isys, phy_id, IPU_DWC_DPHY_DFT_CTRL2, 0, 4, 1); dwc_dphy_write_mask(isys, phy_id, IPU_DWC_DPHY_DFT_CTRL2, 0, 8, 1); dev_dbg(&isys->adev->dev, "phy %u is ready!", phy_id); if (phy_id != PHY_E || isys->phy_termcal_val) return 0; usleep_range(100, 200); rescal_done = dwc_dphy_ifc_read_mask(isys, phy_id, 0x221, 7, 1); if (rescal_done) { isys->phy_termcal_val = dwc_dphy_ifc_read_mask(isys, phy_id, 0x220, 2, 4); dev_dbg(&isys->adev->dev, "termcal done with value = %u", isys->phy_termcal_val); } return 0; } void ipu6_isys_dwc_phy_reset(struct ipu_isys *isys, u32 phy_id) { dev_dbg(&isys->adev->dev, "Reset phy %u", phy_id); dwc_dphy_write(isys, phy_id, IPU_DWC_DPHY_SHUTDOWNZ, 0); dwc_dphy_write(isys, phy_id, IPU_DWC_DPHY_RSTZ, 0); dwc_dphy_write(isys, phy_id, IPU_DWC_DPHY_TEST_IFC_ACCESS_MODE, TEST_IFC_ACCESS_MODE_FSM); dwc_dphy_write(isys, phy_id, IPU_DWC_DPHY_TEST_IFC_REQ, TEST_IFC_REQ_RESET); } ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu6/ipu6-isys-dwc-phy.h000066400000000000000000000010261471702545500276100ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2013 - 2024 Intel Corporation */ #ifndef IPU6_ISYS_DWC_PHY_H #define IPU6_ISYS_DWC_PHY_H int ipu6_isys_dwc_phy_powerup_ack(struct ipu_isys *isys, u32 phy_id); int ipu6_isys_dwc_phy_config(struct ipu_isys *isys, u32 phy_id, u32 mbps); int ipu6_isys_dwc_phy_termcal_rext(struct ipu_isys *isys, u32 mbps); void ipu6_isys_dwc_phy_reset(struct ipu_isys *isys, u32 phy_id); void ipu6_isys_dwc_phy_aggr_setup(struct ipu_isys *isys, u32 master, u32 slave, u32 mbps); #endif ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu6/ipu6-isys-gpc.c000066400000000000000000000114621471702545500270060ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2020 - 2024 Intel Corporation #ifdef CONFIG_DEBUG_FS #include #include #include "ipu-isys.h" #include "ipu-platform-regs.h" #define IPU_ISYS_GPC_NUM 16 #ifndef CONFIG_PM #define pm_runtime_get_sync(d) 0 #define pm_runtime_put(d) 0 #endif struct ipu_isys_gpc { bool enable; unsigned int route; unsigned int source; unsigned int sense; unsigned int gpcindex; void *prit; }; struct ipu_isys_gpcs { bool gpc_enable; struct ipu_isys_gpc gpc[IPU_ISYS_GPC_NUM]; void *prit; }; static int ipu6_isys_gpc_global_enable_get(void *data, u64 *val) { struct ipu_isys_gpcs *isys_gpcs = data; struct ipu_isys *isys = isys_gpcs->prit; mutex_lock(&isys->mutex); *val = isys_gpcs->gpc_enable; mutex_unlock(&isys->mutex); return 0; } static int ipu6_isys_gpc_global_enable_set(void *data, u64 val) { struct ipu_isys_gpcs *isys_gpcs = data; struct ipu_isys *isys = isys_gpcs->prit; void __iomem *base; int i, ret; if (val != 0 && val != 1) return -EINVAL; if (!isys || !isys->pdata || !isys->pdata->base) return -EINVAL; mutex_lock(&isys->mutex); base = isys->pdata->base + IPU_ISYS_GPC_BASE; ret = pm_runtime_get_sync(&isys->adev->dev); if (ret < 0) { pm_runtime_put(&isys->adev->dev); mutex_unlock(&isys->mutex); return ret; } if (!val) { writel(0x0, base + IPU_ISYS_GPREG_TRACE_TIMER_RST); writel(0x0, base + IPU_ISF_CDC_MMU_GPC_OVERALL_ENABLE); writel(0xffff, base + IPU_ISF_CDC_MMU_GPC_SOFT_RESET); isys_gpcs->gpc_enable = false; for (i = 0; i < IPU_ISYS_GPC_NUM; i++) { isys_gpcs->gpc[i].enable = 0; isys_gpcs->gpc[i].sense = 0; isys_gpcs->gpc[i].route = 0; isys_gpcs->gpc[i].source = 0; } pm_runtime_put(&isys->adev->dev); } else { /* * Set gpc reg and start all gpc here. * RST free running local timer. */ writel(0x0, base + IPU_ISYS_GPREG_TRACE_TIMER_RST); writel(0x1, base + IPU_ISYS_GPREG_TRACE_TIMER_RST); for (i = 0; i < IPU_ISYS_GPC_NUM; i++) { /* Enable */ writel(isys_gpcs->gpc[i].enable, base + IPU_ISF_CDC_MMU_GPC_ENABLE0 + 4 * i); /* Setting (route/source/sense) */ writel((isys_gpcs->gpc[i].sense << IPU_GPC_SENSE_OFFSET) + (isys_gpcs->gpc[i].route << IPU_GPC_ROUTE_OFFSET) + (isys_gpcs->gpc[i].source << IPU_GPC_SOURCE_OFFSET), base + IPU_ISF_CDC_MMU_GPC_CNT_SEL0 + 4 * i); } /* Soft reset and Overall Enable. */ writel(0x0, base + IPU_ISF_CDC_MMU_GPC_OVERALL_ENABLE); writel(0xffff, base + IPU_ISF_CDC_MMU_GPC_SOFT_RESET); writel(0x1, base + IPU_ISF_CDC_MMU_GPC_OVERALL_ENABLE); isys_gpcs->gpc_enable = true; } mutex_unlock(&isys->mutex); return 0; } DEFINE_SIMPLE_ATTRIBUTE(isys_gpc_globe_enable_fops, ipu6_isys_gpc_global_enable_get, ipu6_isys_gpc_global_enable_set, "%llu\n"); static int ipu6_isys_gpc_count_get(void *data, u64 *val) { struct ipu_isys_gpc *isys_gpc = data; struct ipu_isys *isys = isys_gpc->prit; void __iomem *base; if (!isys || !isys->pdata || !isys->pdata->base) return -EINVAL; spin_lock(&isys->power_lock); if (isys->power) { base = isys->pdata->base + IPU_ISYS_GPC_BASE; *val = readl(base + IPU_ISF_CDC_MMU_GPC_VALUE0 + 4 * isys_gpc->gpcindex); } else { *val = 0; } spin_unlock(&isys->power_lock); return 0; } DEFINE_SIMPLE_ATTRIBUTE(isys_gpc_count_fops, ipu6_isys_gpc_count_get, NULL, "%llu\n"); int ipu_isys_gpc_init_debugfs(struct ipu_isys *isys) { struct dentry *gpcdir; struct dentry *dir; struct dentry *file; int i; char gpcname[10]; struct ipu_isys_gpcs *isys_gpcs; isys_gpcs = devm_kzalloc(&isys->adev->dev, sizeof(*isys_gpcs), GFP_KERNEL); if (!isys_gpcs) return -ENOMEM; gpcdir = debugfs_create_dir("gpcs", isys->debugfsdir); if (IS_ERR(gpcdir)) return -ENOMEM; isys_gpcs->prit = isys; file = debugfs_create_file("enable", 0600, gpcdir, isys_gpcs, &isys_gpc_globe_enable_fops); if (IS_ERR(file)) goto err; for (i = 0; i < IPU_ISYS_GPC_NUM; i++) { sprintf(gpcname, "gpc%d", i); dir = debugfs_create_dir(gpcname, gpcdir); if (IS_ERR(dir)) goto err; #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) file = debugfs_create_bool("enable", 0600, dir, &isys_gpcs->gpc[i].enable); if (IS_ERR(file)) goto err; #else debugfs_create_bool("enable", 0600, dir, &isys_gpcs->gpc[i].enable); #endif debugfs_create_u32("source", 0600, dir, &isys_gpcs->gpc[i].source); debugfs_create_u32("route", 0600, dir, &isys_gpcs->gpc[i].route); debugfs_create_u32("sense", 0600, dir, &isys_gpcs->gpc[i].sense); isys_gpcs->gpc[i].gpcindex = i; isys_gpcs->gpc[i].prit = isys; file = debugfs_create_file("count", 0400, dir, &isys_gpcs->gpc[i], &isys_gpc_count_fops); if (IS_ERR(file)) goto err; } return 0; err: debugfs_remove_recursive(gpcdir); return -ENOMEM; } #endif ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu6/ipu6-isys-phy.c000066400000000000000000000367431471702545500270460ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2013 - 2024 Intel Corporation */ #include #include #include #include "ipu.h" #include "ipu-buttress.h" #include "ipu-isys.h" #include "ipu-isys-csi2.h" #include "ipu-platform-regs.h" #include "ipu-platform-isys-csi2-reg.h" #include "ipu6-isys-csi2.h" #include "ipu6-isys-phy.h" #define LOOP (2000) #define PHY_REG_INIT_CTL 0x00000694 #define PHY_REG_INIT_CTL_PORT_OFFSET 0x00000600 struct phy_reg { u32 reg; u32 val; }; static const struct phy_reg common_init_regs[] = { /* for TGL-U, use 0x80000000 */ {0x00000040, 0x80000000}, {0x00000044, 0x00a80880}, {0x00000044, 0x00b80880}, {0x00000010, 0x0000078c}, {0x00000344, 0x2f4401e2}, {0x00000544, 0x924401e2}, {0x00000744, 0x594401e2}, {0x00000944, 0x624401e2}, {0x00000b44, 0xfc4401e2}, {0x00000d44, 0xc54401e2}, {0x00000f44, 0x034401e2}, {0x00001144, 0x8f4401e2}, {0x00001344, 0x754401e2}, {0x00001544, 0xe94401e2}, {0x00001744, 0xcb4401e2}, {0x00001944, 0xfa4401e2} }; static const struct phy_reg x1_port0_config_regs[] = { {0x00000694, 0xc80060fa}, {0x00000680, 0x3d4f78ea}, {0x00000690, 0x10a0140b}, {0x000006a8, 0xdf04010a}, {0x00000700, 0x57050060}, {0x00000710, 0x0030001c}, {0x00000738, 0x5f004444}, {0x0000073c, 0x78464204}, {0x00000748, 0x7821f940}, {0x0000074c, 0xb2000433}, {0x00000494, 0xfe6030fa}, {0x00000480, 0x29ef5ed0}, {0x00000490, 0x10a0540b}, {0x000004a8, 0x7a01010a}, {0x00000500, 0xef053460}, {0x00000510, 0xe030101c}, {0x00000538, 0xdf808444}, {0x0000053c, 0xc8422204}, {0x00000540, 0x0180088c}, {0x00000574, 0x00000000}, {0x00000000, 0x00000000} }; static const struct phy_reg x1_port1_config_regs[] = { {0x00000c94, 0xc80060fa}, {0x00000c80, 0xcf47abea}, {0x00000c90, 0x10a0840b}, {0x00000ca8, 0xdf04010a}, {0x00000d00, 0x57050060}, {0x00000d10, 0x0030001c}, {0x00000d38, 0x5f004444}, {0x00000d3c, 0x78464204}, {0x00000d48, 0x7821f940}, {0x00000d4c, 0xb2000433}, {0x00000a94, 0xc91030fa}, {0x00000a80, 0x5a166ed0}, {0x00000a90, 0x10a0540b}, {0x00000aa8, 0x5d060100}, {0x00000b00, 0xef053460}, {0x00000b10, 0xa030101c}, {0x00000b38, 0xdf808444}, {0x00000b3c, 0xc8422204}, {0x00000b40, 0x0180088c}, {0x00000b74, 0x00000000}, {0x00000000, 0x00000000} }; static const struct phy_reg x1_port2_config_regs[] = { {0x00001294, 0x28f000fa}, {0x00001280, 0x08130cea}, {0x00001290, 0x10a0140b}, {0x000012a8, 0xd704010a}, {0x00001300, 0x8d050060}, {0x00001310, 0x0030001c}, {0x00001338, 0xdf008444}, {0x0000133c, 0x78422204}, {0x00001348, 0x7821f940}, {0x0000134c, 0x5a000433}, {0x00001094, 0x2d20b0fa}, {0x00001080, 0xade75dd0}, {0x00001090, 0x10a0540b}, {0x000010a8, 0xb101010a}, {0x00001100, 0x33053460}, {0x00001110, 0x0030101c}, {0x00001138, 0xdf808444}, {0x0000113c, 0xc8422204}, {0x00001140, 0x8180088c}, {0x00001174, 0x00000000}, {0x00000000, 0x00000000} }; static const struct phy_reg x1_port3_config_regs[] = { {0x00001894, 0xc80060fa}, {0x00001880, 0x0f90fd6a}, {0x00001890, 0x10a0840b}, {0x000018a8, 0xdf04010a}, {0x00001900, 0x57050060}, {0x00001910, 0x0030001c}, {0x00001938, 0x5f004444}, {0x0000193c, 0x78464204}, {0x00001948, 0x7821f940}, {0x0000194c, 0xb2000433}, {0x00001694, 0x3050d0fa}, {0x00001680, 0x0ef6d050}, {0x00001690, 0x10a0540b}, {0x000016a8, 0xe301010a}, {0x00001700, 0x69053460}, {0x00001710, 0xa030101c}, {0x00001738, 0xdf808444}, {0x0000173c, 0xc8422204}, {0x00001740, 0x0180088c}, {0x00001774, 0x00000000}, {0x00000000, 0x00000000} }; static const struct phy_reg x2_port0_config_regs[] = { {0x00000694, 0xc80060fa}, {0x00000680, 0x3d4f78ea}, {0x00000690, 0x10a0140b}, {0x000006a8, 0xdf04010a}, {0x00000700, 0x57050060}, {0x00000710, 0x0030001c}, {0x00000738, 0x5f004444}, {0x0000073c, 0x78464204}, {0x00000748, 0x7821f940}, {0x0000074c, 0xb2000433}, {0x00000494, 0xc80060fa}, {0x00000480, 0x29ef5ed8}, {0x00000490, 0x10a0540b}, {0x000004a8, 0x7a01010a}, {0x00000500, 0xef053460}, {0x00000510, 0xe030101c}, {0x00000538, 0xdf808444}, {0x0000053c, 0xc8422204}, {0x00000540, 0x0180088c}, {0x00000574, 0x00000000}, {0x00000294, 0xc80060fa}, {0x00000280, 0xcb45b950}, {0x00000290, 0x10a0540b}, {0x000002a8, 0x8c01010a}, {0x00000300, 0xef053460}, {0x00000310, 0x8030101c}, {0x00000338, 0x41808444}, {0x0000033c, 0x32422204}, {0x00000340, 0x0180088c}, {0x00000374, 0x00000000}, {0x00000000, 0x00000000} }; static const struct phy_reg x2_port1_config_regs[] = { {0x00000c94, 0xc80060fa}, {0x00000c80, 0xcf47abea}, {0x00000c90, 0x10a0840b}, {0x00000ca8, 0xdf04010a}, {0x00000d00, 0x57050060}, {0x00000d10, 0x0030001c}, {0x00000d38, 0x5f004444}, {0x00000d3c, 0x78464204}, {0x00000d48, 0x7821f940}, {0x00000d4c, 0xb2000433}, {0x00000a94, 0xc80060fa}, {0x00000a80, 0x5a166ed8}, {0x00000a90, 0x10a0540b}, {0x00000aa8, 0x7a01010a}, {0x00000b00, 0xef053460}, {0x00000b10, 0xa030101c}, {0x00000b38, 0xdf808444}, {0x00000b3c, 0xc8422204}, {0x00000b40, 0x0180088c}, {0x00000b74, 0x00000000}, {0x00000894, 0xc80060fa}, {0x00000880, 0x4d4f21d0}, {0x00000890, 0x10a0540b}, {0x000008a8, 0x5601010a}, {0x00000900, 0xef053460}, {0x00000910, 0x8030101c}, {0x00000938, 0xdf808444}, {0x0000093c, 0xc8422204}, {0x00000940, 0x0180088c}, {0x00000974, 0x00000000}, {0x00000000, 0x00000000} }; static const struct phy_reg x2_port2_config_regs[] = { {0x00001294, 0xc80060fa}, {0x00001280, 0x08130cea}, {0x00001290, 0x10a0140b}, {0x000012a8, 0xd704010a}, {0x00001300, 0x8d050060}, {0x00001310, 0x0030001c}, {0x00001338, 0xdf008444}, {0x0000133c, 0x78422204}, {0x00001348, 0x7821f940}, {0x0000134c, 0x5a000433}, {0x00001094, 0xc80060fa}, {0x00001080, 0xade75dd8}, {0x00001090, 0x10a0540b}, {0x000010a8, 0xb101010a}, {0x00001100, 0x33053460}, {0x00001110, 0x0030101c}, {0x00001138, 0xdf808444}, {0x0000113c, 0xc8422204}, {0x00001140, 0x8180088c}, {0x00001174, 0x00000000}, {0x00000e94, 0xc80060fa}, {0x00000e80, 0x0fbf16d0}, {0x00000e90, 0x10a0540b}, {0x00000ea8, 0x7a01010a}, {0x00000f00, 0xf5053460}, {0x00000f10, 0xc030101c}, {0x00000f38, 0xdf808444}, {0x00000f3c, 0xc8422204}, {0x00000f40, 0x8180088c}, {0x00000000, 0x00000000} }; static const struct phy_reg x2_port3_config_regs[] = { {0x00001894, 0xc80060fa}, {0x00001880, 0x0f90fd6a}, {0x00001890, 0x10a0840b}, {0x000018a8, 0xdf04010a}, {0x00001900, 0x57050060}, {0x00001910, 0x0030001c}, {0x00001938, 0x5f004444}, {0x0000193c, 0x78464204}, {0x00001948, 0x7821f940}, {0x0000194c, 0xb2000433}, {0x00001694, 0xc80060fa}, {0x00001680, 0x0ef6d058}, {0x00001690, 0x10a0540b}, {0x000016a8, 0x7a01010a}, {0x00001700, 0x69053460}, {0x00001710, 0xa030101c}, {0x00001738, 0xdf808444}, {0x0000173c, 0xc8422204}, {0x00001740, 0x0180088c}, {0x00001774, 0x00000000}, {0x00001494, 0xc80060fa}, {0x00001480, 0xf9d34bd0}, {0x00001490, 0x10a0540b}, {0x000014a8, 0x7a01010a}, {0x00001500, 0x1b053460}, {0x00001510, 0x0030101c}, {0x00001538, 0xdf808444}, {0x0000153c, 0xc8422204}, {0x00001540, 0x8180088c}, {0x00001574, 0x00000000}, {0x00000000, 0x00000000} }; static const struct phy_reg x4_port0_config_regs[] = { {0x00000694, 0xc80060fa}, {0x00000680, 0x3d4f78fa}, {0x00000690, 0x10a0140b}, {0x000006a8, 0xdf04010a}, {0x00000700, 0x57050060}, {0x00000710, 0x0030001c}, {0x00000738, 0x5f004444}, {0x0000073c, 0x78464204}, {0x00000748, 0x7821f940}, {0x0000074c, 0xb2000433}, {0x00000494, 0xfe6030fa}, {0x00000480, 0x29ef5ed8}, {0x00000490, 0x10a0540b}, {0x000004a8, 0x7a01010a}, {0x00000500, 0xef053460}, {0x00000510, 0xe030101c}, {0x00000538, 0xdf808444}, {0x0000053c, 0xc8422204}, {0x00000540, 0x0180088c}, {0x00000574, 0x00000004}, {0x00000294, 0x23e030fa}, {0x00000280, 0xcb45b950}, {0x00000290, 0x10a0540b}, {0x000002a8, 0x8c01010a}, {0x00000300, 0xef053460}, {0x00000310, 0x8030101c}, {0x00000338, 0x41808444}, {0x0000033c, 0x32422204}, {0x00000340, 0x0180088c}, {0x00000374, 0x00000004}, {0x00000894, 0x5620b0fa}, {0x00000880, 0x4d4f21dc}, {0x00000890, 0x10a0540b}, {0x000008a8, 0x5601010a}, {0x00000900, 0xef053460}, {0x00000910, 0x8030101c}, {0x00000938, 0xdf808444}, {0x0000093c, 0xc8422204}, {0x00000940, 0x0180088c}, {0x00000974, 0x00000004}, {0x00000a94, 0xc91030fa}, {0x00000a80, 0x5a166ecc}, {0x00000a90, 0x10a0540b}, {0x00000aa8, 0x5d01010a}, {0x00000b00, 0xef053460}, {0x00000b10, 0xa030101c}, {0x00000b38, 0xdf808444}, {0x00000b3c, 0xc8422204}, {0x00000b40, 0x0180088c}, {0x00000b74, 0x00000004}, {0x00000000, 0x00000000} }; static const struct phy_reg x4_port1_config_regs[] = { {0x00000000, 0x00000000} }; static const struct phy_reg x4_port2_config_regs[] = { {0x00001294, 0x28f000fa}, {0x00001280, 0x08130cfa}, {0x00001290, 0x10c0140b}, {0x000012a8, 0xd704010a}, {0x00001300, 0x8d050060}, {0x00001310, 0x0030001c}, {0x00001338, 0xdf008444}, {0x0000133c, 0x78422204}, {0x00001348, 0x7821f940}, {0x0000134c, 0x5a000433}, {0x00001094, 0x2d20b0fa}, {0x00001080, 0xade75dd8}, {0x00001090, 0x10a0540b}, {0x000010a8, 0xb101010a}, {0x00001100, 0x33053460}, {0x00001110, 0x0030101c}, {0x00001138, 0xdf808444}, {0x0000113c, 0xc8422204}, {0x00001140, 0x8180088c}, {0x00001174, 0x00000004}, {0x00000e94, 0xd308d0fa}, {0x00000e80, 0x0fbf16d0}, {0x00000e90, 0x10a0540b}, {0x00000ea8, 0x2c01010a}, {0x00000f00, 0xf5053460}, {0x00000f10, 0xc030101c}, {0x00000f38, 0xdf808444}, {0x00000f3c, 0xc8422204}, {0x00000f40, 0x8180088c}, {0x00000f74, 0x00000004}, {0x00001494, 0x136850fa}, {0x00001480, 0xf9d34bdc}, {0x00001490, 0x10a0540b}, {0x000014a8, 0x5a01010a}, {0x00001500, 0x1b053460}, {0x00001510, 0x0030101c}, {0x00001538, 0xdf808444}, {0x0000153c, 0xc8422204}, {0x00001540, 0x8180088c}, {0x00001574, 0x00000004}, {0x00001694, 0x3050d0fa}, {0x00001680, 0x0ef6d04c}, {0x00001690, 0x10a0540b}, {0x000016a8, 0xe301010a}, {0x00001700, 0x69053460}, {0x00001710, 0xa030101c}, {0x00001738, 0xdf808444}, {0x0000173c, 0xc8422204}, {0x00001740, 0x0180088c}, {0x00001774, 0x00000004}, {0x00000000, 0x00000000} }; static const struct phy_reg x4_port3_config_regs[] = { {0x00000000, 0x00000000} }; static const struct phy_reg *x1_config_regs[4] = { x1_port0_config_regs, x1_port1_config_regs, x1_port2_config_regs, x1_port3_config_regs }; static const struct phy_reg *x2_config_regs[4] = { x2_port0_config_regs, x2_port1_config_regs, x2_port2_config_regs, x2_port3_config_regs }; static const struct phy_reg *x4_config_regs[4] = { x4_port0_config_regs, x4_port1_config_regs, x4_port2_config_regs, x4_port3_config_regs }; static const struct phy_reg **config_regs[3] = { x1_config_regs, x2_config_regs, x4_config_regs, }; int ipu6_isys_phy_powerup_ack(struct ipu_isys *isys, unsigned int phy_id) { unsigned int i; u32 val; void __iomem *isys_base = isys->pdata->base; val = readl(isys_base + CSI_REG_HUB_GPREG_PHY_CONTROL(phy_id)); val |= CSI_REG_HUB_GPREG_PHY_CONTROL_PWR_EN; writel(val, isys_base + CSI_REG_HUB_GPREG_PHY_CONTROL(phy_id)); for (i = 0; i < LOOP; i++) { if (readl(isys_base + CSI_REG_HUB_GPREG_PHY_STATUS(phy_id)) & CSI_REG_HUB_GPREG_PHY_STATUS_POWER_ACK) return 0; usleep_range(100, 200); } dev_warn(&isys->adev->dev, "PHY%d powerup ack timeout", phy_id); return -ETIMEDOUT; } int ipu6_isys_phy_powerdown_ack(struct ipu_isys *isys, unsigned int phy_id) { unsigned int i; u32 val; void __iomem *isys_base = isys->pdata->base; writel(0, isys_base + CSI_REG_HUB_GPREG_PHY_CONTROL(phy_id)); for (i = 0; i < LOOP; i++) { usleep_range(10, 20); val = readl(isys_base + CSI_REG_HUB_GPREG_PHY_STATUS(phy_id)); if (!(val & CSI_REG_HUB_GPREG_PHY_STATUS_POWER_ACK)) return 0; } dev_warn(&isys->adev->dev, "PHY %d poweroff ack timeout.\n", phy_id); return -ETIMEDOUT; } int ipu6_isys_phy_reset(struct ipu_isys *isys, unsigned int phy_id, bool assert) { void __iomem *isys_base = isys->pdata->base; u32 val; val = readl(isys_base + CSI_REG_HUB_GPREG_PHY_CONTROL(phy_id)); if (assert) val |= CSI_REG_HUB_GPREG_PHY_CONTROL_RESET; else val &= ~(CSI_REG_HUB_GPREG_PHY_CONTROL_RESET); writel(val, isys_base + CSI_REG_HUB_GPREG_PHY_CONTROL(phy_id)); return 0; } int ipu6_isys_phy_ready(struct ipu_isys *isys, unsigned int phy_id) { unsigned int i; u32 val; void __iomem *isys_base = isys->pdata->base; for (i = 0; i < LOOP; i++) { val = readl(isys_base + CSI_REG_HUB_GPREG_PHY_STATUS(phy_id)); dev_dbg(&isys->adev->dev, "PHY%d ready status 0x%x\n", phy_id, val); if (val & CSI_REG_HUB_GPREG_PHY_STATUS_PHY_READY) return 0; usleep_range(10, 20); } dev_warn(&isys->adev->dev, "PHY%d ready timeout\n", phy_id); return -ETIMEDOUT; } int ipu6_isys_phy_common_init(struct ipu_isys *isys) { unsigned int phy_id; void __iomem *phy_base; struct ipu_bus_device *adev = to_ipu_bus_device(&isys->adev->dev); struct ipu_device *isp = adev->isp; void __iomem *isp_base = isp->base; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) struct v4l2_async_subdev *asd; struct sensor_async_subdev *s_asd; unsigned int i; list_for_each_entry(asd, &isys->notifier.asd_list, asd_list) { s_asd = container_of(asd, struct sensor_async_subdev, asd); #else struct v4l2_async_connection *asc; struct sensor_async_sd *s_asd; unsigned int i; list_for_each_entry(asc, &isys->notifier.done_list, asc_entry) { s_asd = container_of(asc, struct sensor_async_sd, asc); #endif phy_id = s_asd->csi2.port / 4; phy_base = isp_base + IPU6_ISYS_PHY_BASE(phy_id); for (i = 0 ; i < ARRAY_SIZE(common_init_regs); i++) { writel(common_init_regs[i].val, phy_base + common_init_regs[i].reg); } } return 0; } static int ipu6_isys_driver_port_to_phy_port(struct ipu_isys_csi2_config *cfg) { int phy_port; int ret; if (!(cfg->nlanes == 4 || cfg->nlanes == 2 || cfg->nlanes == 1)) return -EINVAL; /* B,F -> C0 A,E -> C1 C,G -> C2 D,H -> C4 */ /* normalize driver port number */ phy_port = cfg->port % 4; /* swap port number only for A and B */ if (phy_port == 0) phy_port = 1; else if (phy_port == 1) phy_port = 0; ret = phy_port; /* check validity per lane configuration */ if ((cfg->nlanes == 4) && !(phy_port == 0 || phy_port == 2)) ret = -EINVAL; else if ((cfg->nlanes == 2 || cfg->nlanes == 1) && !(phy_port >= 0 && phy_port <= 3)) ret = -EINVAL; return ret; } int ipu6_isys_phy_config(struct ipu_isys *isys) { int phy_port; unsigned int phy_id; void __iomem *phy_base; struct ipu_bus_device *adev = to_ipu_bus_device(&isys->adev->dev); struct ipu_device *isp = adev->isp; void __iomem *isp_base = isp->base; const struct phy_reg **phy_config_regs; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) struct v4l2_async_subdev *asd; struct sensor_async_subdev *s_asd; struct ipu_isys_csi2_config cfg; int i; list_for_each_entry(asd, &isys->notifier.asd_list, asd_list) { s_asd = container_of(asd, struct sensor_async_subdev, asd); #else struct v4l2_async_connection *asc; struct sensor_async_sd *s_asd; struct ipu_isys_csi2_config cfg; int i; list_for_each_entry(asc, &isys->notifier.done_list, asc_entry) { s_asd = container_of(asc, struct sensor_async_sd, asc); #endif cfg.port = s_asd->csi2.port; cfg.nlanes = s_asd->csi2.nlanes; phy_port = ipu6_isys_driver_port_to_phy_port(&cfg); if (phy_port < 0) { dev_err(&isys->adev->dev, "invalid port %d for lane %d", cfg.port, cfg.nlanes); return -ENXIO; } phy_id = cfg.port / 4; phy_base = isp_base + IPU6_ISYS_PHY_BASE(phy_id); dev_dbg(&isys->adev->dev, "port%d PHY%u lanes %u\n", cfg.port, phy_id, cfg.nlanes); phy_config_regs = config_regs[cfg.nlanes/2]; cfg.port = phy_port; for (i = 0; phy_config_regs[cfg.port][i].reg; i++) { writel(phy_config_regs[cfg.port][i].val, phy_base + phy_config_regs[cfg.port][i].reg); } } return 0; } ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu6/ipu6-isys-phy.h000066400000000000000000000130111471702545500270320ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2013 - 2024 Intel Corporation */ #ifndef IPU6_ISYS_PHY_H #define IPU6_ISYS_PHY_H /* bridge to phy in buttress reg map, each phy has 16 kbytes * for tgl u/y, only 2 phys */ #define IPU6_ISYS_PHY_0_BASE 0x10000 #define IPU6_ISYS_PHY_1_BASE 0x14000 #define IPU6_ISYS_PHY_2_BASE 0x18000 #define IPU6_ISYS_PHY_BASE(i) (0x10000 + (i) * 0x4000) /* ppi mapping per phy : * * x4x4: * port0 - PPI range {0, 1, 2, 3, 4} * port2 - PPI range {6, 7, 8, 9, 10} * * x4x2: * port0 - PPI range {0, 1, 2, 3, 4} * port2 - PPI range {6, 7, 8} * * x2x4: * port0 - PPI range {0, 1, 2} * port2 - PPI range {6, 7, 8, 9, 10} * * x2x2: * port0 - PPI range {0, 1, 2} * port1 - PPI range {3, 4, 5} * port2 - PPI range {6, 7, 8} * port3 - PPI range {9, 10, 11} */ /* cbbs config regs */ #define PHY_CBBS1_BASE 0x0 /* register offset */ #define PHY_CBBS1_DFX_VMISCCTL 0x0 #define PHY_CBBS1_DFX_VBYTESEL0 0x4 #define PHY_CBBS1_DFX_VBYTESEL1 0x8 #define PHY_CBBS1_VISA2OBS_CTRL_REG 0xc #define PHY_CBBS1_PGL_CTRL_REG 0x10 #define PHY_CBBS1_RCOMP_CTRL_REG_1 0x14 #define PHY_CBBS1_RCOMP_CTRL_REG_2 0x18 #define PHY_CBBS1_RCOMP_CTRL_REG_3 0x1c #define PHY_CBBS1_RCOMP_CTRL_REG_4 0x20 #define PHY_CBBS1_RCOMP_CTRL_REG_5 0x24 #define PHY_CBBS1_RCOMP_STATUS_REG_1 0x28 #define PHY_CBBS1_RCOMP_STATUS_REG_2 0x2c #define PHY_CBBS1_CLOCK_CTRL_REG 0x30 #define PHY_CBBS1_CBB_ISOLATION_REG 0x34 #define PHY_CBBS1_CBB_PLL_CONTROL 0x38 #define PHY_CBBS1_CBB_STATUS_REG 0x3c #define PHY_CBBS1_AFE_CONTROL_REG_1 0x40 #define PHY_CBBS1_AFE_CONTROL_REG_2 0x44 #define PHY_CBBS1_CBB_SPARE 0x48 #define PHY_CBBS1_CRI_CLK_CONTROL 0x4c /* dbbs shared, i = [0..11] */ #define PHY_DBBS_SHARED(ppi) ((ppi) * 0x200 + 0x200) /* register offset */ #define PHY_DBBDFE_DFX_V1MISCCTL 0x0 #define PHY_DBBDFE_DFX_V1BYTESEL0 0x4 #define PHY_DBBDFE_DFX_V1BYTESEL1 0x8 #define PHY_DBBDFE_DFX_V2MISCCTL 0xc #define PHY_DBBDFE_DFX_V2BYTESEL0 0x10 #define PHY_DBBDFE_DFX_V2BYTESEL1 0x14 #define PHY_DBBDFE_GBLCTL 0x18 #define PHY_DBBDFE_GBL_STATUS 0x1c /* dbbs udln, i = [0..11] */ #define IPU6_ISYS_PHY_DBBS_UDLN(ppi) ((ppi) * 0x200 + 0x280) /* register offset */ #define PHY_DBBUDLN_CTL 0x0 #define PHY_DBBUDLN_CLK_CTL 0x4 #define PHY_DBBUDLN_SOFT_RST_CTL 0x8 #define PHY_DBBUDLN_STRAP_VALUES 0xc #define PHY_DBBUDLN_TXRX_CTL 0x10 #define PHY_DBBUDLN_MST_SLV_INIT_CTL 0x14 #define PHY_DBBUDLN_TX_TIMING_CTL0 0x18 #define PHY_DBBUDLN_TX_TIMING_CTL1 0x1c #define PHY_DBBUDLN_TX_TIMING_CTL2 0x20 #define PHY_DBBUDLN_TX_TIMING_CTL3 0x24 #define PHY_DBBUDLN_RX_TIMING_CTL 0x28 #define PHY_DBBUDLN_PPI_STATUS_CTL 0x2c #define PHY_DBBUDLN_PPI_STATUS 0x30 #define PHY_DBBUDLN_ERR_CTL 0x34 #define PHY_DBBUDLN_ERR_STATUS 0x38 #define PHY_DBBUDLN_DFX_LPBK_CTL 0x3c #define PHY_DBBUDLN_DFX_PPI_CTL 0x40 #define PHY_DBBUDLN_DFX_TX_DPHY_CTL 0x44 #define PHY_DBBUDLN_DFX_TXRX_PRBSPAT_CTL 0x48 #define PHY_DBBUDLN_DFX_TXRX_PRBSPAT_SEED 0x4c #define PHY_DBBUDLN_DFX_PRBSPAT_MAX_WRD_CNT 0x50 #define PHY_DBBUDLN_DFX_PRBSPAT_STATUS 0x54 #define PHY_DBBUDLN_DFX_PRBSPAT_WRD_CNT0_STATUS 0x58 #define PHY_DBBUDLN_DFX_PRBSPAT_WRD_CNT1_STATUS 0x5c #define PHY_DBBUDLN_DFX_PRBSPAT_FF_ERR_STATUS 0x60 #define PHY_DBBUDLN_DFX_PRBSPAT_FF_REF_STATUS 0x64 #define PHY_DBBUDLN_DFX_PRBSPAT_FF_WRD_CNT0_STATUS 0x68 #define PHY_DBBUDLN_DFX_PRBSPAT_FF_WRD_CNT1_STATUS 0x6c #define PHY_DBBUDLN_RSVD_CTL 0x70 #define PHY_DBBUDLN_TINIT_DONE BIT(27) /* dbbs supar, i = [0..11] */ #define IPU6_ISYS_PHY_DBBS_SUPAR(ppi) ((ppi) * 0x200 + 0x300) /* register offset */ #define PHY_DBBSUPAR_TXRX_FUPAR_CTL 0x0 #define PHY_DBBSUPAR_TXHS_AFE_CTL 0x4 #define PHY_DBBSUPAR_TXHS_AFE_LEGDIS_CTL 0x8 #define PHY_DBBSUPAR_TXHS_AFE_EQ_CTL 0xc #define PHY_DBBSUPAR_RXHS_AFE_CTL1 0x10 #define PHY_DBBSUPAR_RXHS_AFE_PICTL1 0x14 #define PHY_DBBSUPAR_TXRXLP_AFE_CTL 0x18 #define PHY_DBBSUPAR_DFX_TXRX_STATUS 0x1c #define PHY_DBBSUPAR_DFX_TXRX_CTL 0x20 #define PHY_DBBSUPAR_DFX_DIGMON_CTL 0x24 #define PHY_DBBSUPAR_DFX_LOCMON_CTL 0x28 #define PHY_DBBSUPAR_DFX_RCOMP_CTL1 0x2c #define PHY_DBBSUPAR_DFX_RCOMP_CTL2 0x30 #define PHY_DBBSUPAR_CAL_TOP1 0x34 #define PHY_DBBSUPAR_CAL_SHARED1 0x38 #define PHY_DBBSUPAR_CAL_SHARED2 0x3c #define PHY_DBBSUPAR_CAL_CDR1 0x40 #define PHY_DBBSUPAR_CAL_OCAL1 0x44 #define PHY_DBBSUPAR_CAL_DCC_DLL1 0x48 #define PHY_DBBSUPAR_CAL_DLL2 0x4c #define PHY_DBBSUPAR_CAL_DFX1 0x50 #define PHY_DBBSUPAR_CAL_DFX2 0x54 #define PHY_DBBSUPAR_CAL_DFX3 0x58 #define PHY_BBSUPAR_CAL_DFX4 0x5c #define PHY_DBBSUPAR_CAL_DFX5 0x60 #define PHY_DBBSUPAR_CAL_DFX6 0x64 #define PHY_DBBSUPAR_CAL_DFX7 0x68 #define PHY_DBBSUPAR_DFX_AFE_SPARE_CTL1 0x6c #define PHY_DBBSUPAR_DFX_AFE_SPARE_CTL2 0x70 #define PHY_DBBSUPAR_SPARE 0x74 /* sai, i = [0..11] */ #define IPU6_ISYS_PHY_SAI 0xf800 /* register offset */ #define PHY_SAI_CTRL_REG0 0x40 #define PHY_SAI_CTRL_REG0_1 0x44 #define PHY_SAI_WR_REG0 0x48 #define PHY_SAI_WR_REG0_1 0x4c #define PHY_SAI_RD_REG0 0x50 #define PHY_SAI_RD_REG0_1 0x54 int ipu6_isys_phy_powerup_ack(struct ipu_isys *isys, unsigned int phy_id); int ipu6_isys_phy_powerdown_ack(struct ipu_isys *isys, unsigned int phy_id); int ipu6_isys_phy_reset(struct ipu_isys *isys, unsigned int phy_id, bool assert); int ipu6_isys_phy_ready(struct ipu_isys *isys, unsigned int phy_id); int ipu6_isys_phy_common_init(struct ipu_isys *isys); int ipu6_isys_phy_config(struct ipu_isys *isys); #endif ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu6/ipu6-isys.c000066400000000000000000000134011471702545500262320ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2020 - 2024 Intel Corporation #include #include #include "ipu.h" #include "ipu-platform-regs.h" #include "ipu-trace.h" #include "ipu-isys.h" #include "ipu-platform-isys-csi2-reg.h" const struct ipu_isys_pixelformat ipu_isys_pfmts[] = { {V4L2_PIX_FMT_SBGGR12, 16, 12, 0, MEDIA_BUS_FMT_SBGGR12_1X12, IPU_FW_ISYS_FRAME_FORMAT_RAW16}, {V4L2_PIX_FMT_SGBRG12, 16, 12, 0, MEDIA_BUS_FMT_SGBRG12_1X12, IPU_FW_ISYS_FRAME_FORMAT_RAW16}, {V4L2_PIX_FMT_SGRBG12, 16, 12, 0, MEDIA_BUS_FMT_SGRBG12_1X12, IPU_FW_ISYS_FRAME_FORMAT_RAW16}, {V4L2_PIX_FMT_SRGGB12, 16, 12, 0, MEDIA_BUS_FMT_SRGGB12_1X12, IPU_FW_ISYS_FRAME_FORMAT_RAW16}, {V4L2_PIX_FMT_SBGGR10, 16, 10, 0, MEDIA_BUS_FMT_SBGGR10_1X10, IPU_FW_ISYS_FRAME_FORMAT_RAW16}, {V4L2_PIX_FMT_SGBRG10, 16, 10, 0, MEDIA_BUS_FMT_SGBRG10_1X10, IPU_FW_ISYS_FRAME_FORMAT_RAW16}, {V4L2_PIX_FMT_SGRBG10, 16, 10, 0, MEDIA_BUS_FMT_SGRBG10_1X10, IPU_FW_ISYS_FRAME_FORMAT_RAW16}, {V4L2_PIX_FMT_SRGGB10, 16, 10, 0, MEDIA_BUS_FMT_SRGGB10_1X10, IPU_FW_ISYS_FRAME_FORMAT_RAW16}, {V4L2_PIX_FMT_SBGGR8, 8, 8, 0, MEDIA_BUS_FMT_SBGGR8_1X8, IPU_FW_ISYS_FRAME_FORMAT_RAW8}, {V4L2_PIX_FMT_SGBRG8, 8, 8, 0, MEDIA_BUS_FMT_SGBRG8_1X8, IPU_FW_ISYS_FRAME_FORMAT_RAW8}, {V4L2_PIX_FMT_SGRBG8, 8, 8, 0, MEDIA_BUS_FMT_SGRBG8_1X8, IPU_FW_ISYS_FRAME_FORMAT_RAW8}, {V4L2_PIX_FMT_SRGGB8, 8, 8, 0, MEDIA_BUS_FMT_SRGGB8_1X8, IPU_FW_ISYS_FRAME_FORMAT_RAW8}, {} }; struct ipu_trace_block isys_trace_blocks[] = { { .offset = IPU_TRACE_REG_IS_TRACE_UNIT_BASE, .type = IPU_TRACE_BLOCK_TUN, }, { .offset = IPU_TRACE_REG_IS_SP_EVQ_BASE, .type = IPU_TRACE_BLOCK_TM, }, { .offset = IPU_TRACE_REG_IS_SP_GPC_BASE, .type = IPU_TRACE_BLOCK_GPC, }, { .offset = IPU_TRACE_REG_IS_ISL_GPC_BASE, .type = IPU_TRACE_BLOCK_GPC, }, { .offset = IPU_TRACE_REG_IS_MMU_GPC_BASE, .type = IPU_TRACE_BLOCK_GPC, }, { /* Note! this covers all 8 blocks */ .offset = IPU_TRACE_REG_CSI2_TM_BASE(0), .type = IPU_TRACE_CSI2, }, { /* Note! this covers all 11 blocks */ .offset = IPU_TRACE_REG_CSI2_PORT_SIG2SIO_GR_BASE(0), .type = IPU_TRACE_SIG2CIOS, }, { .offset = IPU_TRACE_REG_IS_GPREG_TRACE_TIMER_RST_N, .type = IPU_TRACE_TIMER_RST, }, { .type = IPU_TRACE_BLOCK_END, } }; void isys_setup_hw(struct ipu_isys *isys) { void __iomem *base = isys->pdata->base; const u8 *thd = isys->pdata->ipdata->hw_variant.cdc_fifo_threshold; u32 irqs = 0; unsigned int i, nr; nr = (ipu_ver == IPU_VER_6 || ipu_ver == IPU_VER_6EP || ipu_ver == IPU_VER_6EP_MTL) ? IPU6_ISYS_CSI_PORT_NUM : IPU6SE_ISYS_CSI_PORT_NUM; /* Enable irqs for all MIPI ports */ for (i = 0; i < nr; i++) irqs |= IPU_ISYS_UNISPART_IRQ_CSI2(i); if (ipu_ver == IPU_VER_6EP_MTL) { writel(irqs, base + IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE); writel(irqs, base + IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE); writel(0xffffffff, base + IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR); writel(irqs, base + IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK); writel(irqs, base + IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE); } else { writel(irqs, base + IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE); writel(irqs, base + IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE); writel(0xffffffff, base + IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR); writel(irqs, base + IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK); writel(irqs, base + IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE); } irqs = ISYS_UNISPART_IRQS; writel(irqs, base + IPU_REG_ISYS_UNISPART_IRQ_EDGE); writel(irqs, base + IPU_REG_ISYS_UNISPART_IRQ_LEVEL_NOT_PULSE); writel(0xffffffff, base + IPU_REG_ISYS_UNISPART_IRQ_CLEAR); writel(irqs, base + IPU_REG_ISYS_UNISPART_IRQ_MASK); writel(irqs, base + IPU_REG_ISYS_UNISPART_IRQ_ENABLE); writel(0, base + IPU_REG_ISYS_UNISPART_SW_IRQ_REG); writel(0, base + IPU_REG_ISYS_UNISPART_SW_IRQ_MUX_REG); /* Write CDC FIFO threshold values for isys */ for (i = 0; i < isys->pdata->ipdata->hw_variant.cdc_fifos; i++) writel(thd[i], base + IPU_REG_ISYS_CDC_THRESHOLD(i)); } irqreturn_t isys_isr(struct ipu_bus_device *adev) { struct ipu_isys *isys = ipu_bus_get_drvdata(adev); void __iomem *base = isys->pdata->base; u32 status_sw, status_csi; u32 ctrl0_status, ctrl0_clear; spin_lock(&isys->power_lock); if (!isys->power) { spin_unlock(&isys->power_lock); return IRQ_NONE; } if (ipu_ver == IPU_VER_6EP_MTL) { ctrl0_status = IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS; ctrl0_clear = IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR; } else { ctrl0_status = IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS; ctrl0_clear = IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR; } status_csi = readl(isys->pdata->base + ctrl0_status); status_sw = readl(isys->pdata->base + IPU_REG_ISYS_UNISPART_IRQ_STATUS); writel(ISYS_UNISPART_IRQS & ~IPU_ISYS_UNISPART_IRQ_SW, base + IPU_REG_ISYS_UNISPART_IRQ_MASK); do { writel(status_csi, isys->pdata->base + ctrl0_clear); writel(status_sw, isys->pdata->base + IPU_REG_ISYS_UNISPART_IRQ_CLEAR); if (isys->isr_csi2_bits & status_csi) { unsigned int i; for (i = 0; i < isys->pdata->ipdata->csi2.nports; i++) { /* irq from not enabled port */ if (!isys->csi2[i].base) continue; if (status_csi & IPU_ISYS_UNISPART_IRQ_CSI2(i)) ipu_isys_csi2_isr(&isys->csi2[i]); } } writel(0, base + IPU_REG_ISYS_UNISPART_SW_IRQ_REG); if (!isys_isr_one(adev)) status_sw = IPU_ISYS_UNISPART_IRQ_SW; else status_sw = 0; status_csi = readl(isys->pdata->base + ctrl0_status); status_sw |= readl(isys->pdata->base + IPU_REG_ISYS_UNISPART_IRQ_STATUS); } while (((status_csi & isys->isr_csi2_bits) || (status_sw & IPU_ISYS_UNISPART_IRQ_SW)) && !isys->adev->isp->flr_done); writel(ISYS_UNISPART_IRQS, base + IPU_REG_ISYS_UNISPART_IRQ_MASK); spin_unlock(&isys->power_lock); return IRQ_HANDLED; } ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu6/ipu6.c000066400000000000000000000217631471702545500252570ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 - 2024 Intel Corporation #include #include #include #include #include #include "ipu.h" #include "ipu-cpd.h" #include "ipu-isys.h" #include "ipu-psys.h" #include "ipu-platform.h" #include "ipu-platform-regs.h" #include "ipu-platform-buttress-regs.h" #include "ipu-platform-isys-csi2-reg.h" struct ipu_cell_program_t { unsigned int magic_number; unsigned int blob_offset; unsigned int blob_size; unsigned int start[3]; unsigned int icache_source; unsigned int icache_target; unsigned int icache_size; unsigned int pmem_source; unsigned int pmem_target; unsigned int pmem_size; unsigned int data_source; unsigned int data_target; unsigned int data_size; unsigned int bss_target; unsigned int bss_size; unsigned int cell_id; unsigned int regs_addr; unsigned int cell_pmem_data_bus_address; unsigned int cell_dmem_data_bus_address; unsigned int cell_pmem_control_bus_address; unsigned int cell_dmem_control_bus_address; unsigned int next; unsigned int dummy[2]; }; static unsigned int ipu6se_csi_offsets[] = { IPU_CSI_PORT_A_ADDR_OFFSET, IPU_CSI_PORT_B_ADDR_OFFSET, IPU_CSI_PORT_C_ADDR_OFFSET, IPU_CSI_PORT_D_ADDR_OFFSET, }; static unsigned int ipu6_csi_offsets[] = { IPU_CSI_PORT_A_ADDR_OFFSET, IPU_CSI_PORT_B_ADDR_OFFSET, IPU_CSI_PORT_C_ADDR_OFFSET, IPU_CSI_PORT_D_ADDR_OFFSET, IPU_CSI_PORT_E_ADDR_OFFSET, IPU_CSI_PORT_F_ADDR_OFFSET, IPU_CSI_PORT_G_ADDR_OFFSET, IPU_CSI_PORT_H_ADDR_OFFSET }; struct ipu_isys_internal_pdata isys_ipdata = { .hw_variant = { .offset = IPU_UNIFIED_OFFSET, .nr_mmus = 3, .mmu_hw = { { .offset = IPU_ISYS_IOMMU0_OFFSET, .info_bits = IPU_INFO_REQUEST_DESTINATION_IOSF, .nr_l1streams = 16, .l1_block_sz = { 3, 8, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1 }, .nr_l2streams = 16, .l2_block_sz = { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 }, .insert_read_before_invalidate = false, .l1_stream_id_reg_offset = IPU_MMU_L1_STREAM_ID_REG_OFFSET, .l2_stream_id_reg_offset = IPU_MMU_L2_STREAM_ID_REG_OFFSET, }, { .offset = IPU_ISYS_IOMMU1_OFFSET, .info_bits = IPU_INFO_STREAM_ID_SET(0), .nr_l1streams = 16, .l1_block_sz = { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 4 }, .nr_l2streams = 16, .l2_block_sz = { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 }, .insert_read_before_invalidate = false, .l1_stream_id_reg_offset = IPU_MMU_L1_STREAM_ID_REG_OFFSET, .l2_stream_id_reg_offset = IPU_MMU_L2_STREAM_ID_REG_OFFSET, }, { .offset = IPU_ISYS_IOMMUI_OFFSET, .info_bits = IPU_INFO_STREAM_ID_SET(0), .nr_l1streams = 0, .nr_l2streams = 0, .insert_read_before_invalidate = false, }, }, .cdc_fifos = 3, .cdc_fifo_threshold = {6, 8, 2}, .dmem_offset = IPU_ISYS_DMEM_OFFSET, .spc_offset = IPU_ISYS_SPC_OFFSET, }, .isys_dma_overshoot = IPU_ISYS_OVERALLOC_MIN, }; struct ipu_psys_internal_pdata psys_ipdata = { .hw_variant = { .offset = IPU_UNIFIED_OFFSET, .nr_mmus = 4, .mmu_hw = { { .offset = IPU_PSYS_IOMMU0_OFFSET, .info_bits = IPU_INFO_REQUEST_DESTINATION_IOSF, .nr_l1streams = 16, .l1_block_sz = { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 }, .nr_l2streams = 16, .l2_block_sz = { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 }, .insert_read_before_invalidate = false, .l1_stream_id_reg_offset = IPU_MMU_L1_STREAM_ID_REG_OFFSET, .l2_stream_id_reg_offset = IPU_MMU_L2_STREAM_ID_REG_OFFSET, }, { .offset = IPU_PSYS_IOMMU1_OFFSET, .info_bits = IPU_INFO_STREAM_ID_SET(0), .nr_l1streams = 32, .l1_block_sz = { 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 10, 5, 4, 14, 6, 4, 14, 6, 4, 8, 4, 2, 1, 1, 1, 1, 14 }, .nr_l2streams = 32, .l2_block_sz = { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 }, .insert_read_before_invalidate = false, .l1_stream_id_reg_offset = IPU_MMU_L1_STREAM_ID_REG_OFFSET, .l2_stream_id_reg_offset = IPU_PSYS_MMU1W_L2_STREAM_ID_REG_OFFSET, }, { .offset = IPU_PSYS_IOMMU1R_OFFSET, .info_bits = IPU_INFO_STREAM_ID_SET(0), .nr_l1streams = 16, .l1_block_sz = { 1, 4, 4, 4, 4, 16, 8, 4, 32, 16, 16, 2, 2, 2, 1, 12 }, .nr_l2streams = 16, .l2_block_sz = { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 }, .insert_read_before_invalidate = false, .l1_stream_id_reg_offset = IPU_MMU_L1_STREAM_ID_REG_OFFSET, .l2_stream_id_reg_offset = IPU_MMU_L2_STREAM_ID_REG_OFFSET, }, { .offset = IPU_PSYS_IOMMUI_OFFSET, .info_bits = IPU_INFO_STREAM_ID_SET(0), .nr_l1streams = 0, .nr_l2streams = 0, .insert_read_before_invalidate = false, }, }, .dmem_offset = IPU_PSYS_DMEM_OFFSET, }, }; const struct ipu_buttress_ctrl isys_buttress_ctrl = { .ratio = IPU_IS_FREQ_CTL_DEFAULT_RATIO, .qos_floor = IPU_IS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO, .freq_ctl = IPU_BUTTRESS_REG_IS_FREQ_CTL, .pwr_sts_shift = IPU_BUTTRESS_PWR_STATE_IS_PWR_SHIFT, .pwr_sts_mask = IPU_BUTTRESS_PWR_STATE_IS_PWR_MASK, .pwr_sts_on = IPU_BUTTRESS_PWR_STATE_UP_DONE, .pwr_sts_off = IPU_BUTTRESS_PWR_STATE_DN_DONE, }; const struct ipu_buttress_ctrl psys_buttress_ctrl = { .ratio = IPU_PS_FREQ_CTL_DEFAULT_RATIO, .qos_floor = IPU_PS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO, .freq_ctl = IPU_BUTTRESS_REG_PS_FREQ_CTL, .pwr_sts_shift = IPU_BUTTRESS_PWR_STATE_PS_PWR_SHIFT, .pwr_sts_mask = IPU_BUTTRESS_PWR_STATE_PS_PWR_MASK, .pwr_sts_on = IPU_BUTTRESS_PWR_STATE_UP_DONE, .pwr_sts_off = IPU_BUTTRESS_PWR_STATE_DN_DONE, }; static void ipu6_pkg_dir_configure_spc(struct ipu_device *isp, const struct ipu_hw_variants *hw_variant, int pkg_dir_idx, void __iomem *base, u64 *pkg_dir, dma_addr_t pkg_dir_vied_address) { struct ipu_psys *psys = ipu_bus_get_drvdata(isp->psys); struct ipu_isys *isys = ipu_bus_get_drvdata(isp->isys); unsigned int server_fw_virtaddr; struct ipu_cell_program_t *prog; void __iomem *spc_base; dma_addr_t dma_addr; if (!pkg_dir || !isp->cpd_fw) { dev_err(&isp->pdev->dev, "invalid addr\n"); return; } server_fw_virtaddr = *(pkg_dir + (pkg_dir_idx + 1) * 2); if (pkg_dir_idx == IPU_CPD_PKG_DIR_ISYS_SERVER_IDX) { dma_addr = sg_dma_address(isys->fw_sgt.sgl); prog = (struct ipu_cell_program_t *)((u64)isp->cpd_fw->data + (server_fw_virtaddr - dma_addr)); } else { dma_addr = sg_dma_address(psys->fw_sgt.sgl); prog = (struct ipu_cell_program_t *)((u64)isp->cpd_fw->data + (server_fw_virtaddr - dma_addr)); } spc_base = base + prog->regs_addr; if (spc_base != (base + hw_variant->spc_offset)) dev_warn(&isp->pdev->dev, "SPC reg addr 0x%p not matching value from CPD 0x%p\n", base + hw_variant->spc_offset, spc_base); writel(server_fw_virtaddr + prog->blob_offset + prog->icache_source, spc_base + IPU_PSYS_REG_SPC_ICACHE_BASE); writel(IPU_INFO_REQUEST_DESTINATION_IOSF, spc_base + IPU_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER); writel(prog->start[1], spc_base + IPU_PSYS_REG_SPC_START_PC); writel(pkg_dir_vied_address, base + hw_variant->dmem_offset); } void ipu_configure_spc(struct ipu_device *isp, const struct ipu_hw_variants *hw_variant, int pkg_dir_idx, void __iomem *base, u64 *pkg_dir, dma_addr_t pkg_dir_dma_addr) { u32 val; void __iomem *dmem_base = base + hw_variant->dmem_offset; void __iomem *spc_regs_base = base + hw_variant->spc_offset; val = readl(spc_regs_base + IPU_PSYS_REG_SPC_STATUS_CTRL); val |= IPU_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE; writel(val, spc_regs_base + IPU_PSYS_REG_SPC_STATUS_CTRL); if (isp->secure_mode) writel(IPU_PKG_DIR_IMR_OFFSET, dmem_base); else ipu6_pkg_dir_configure_spc(isp, hw_variant, pkg_dir_idx, base, pkg_dir, pkg_dir_dma_addr); } EXPORT_SYMBOL(ipu_configure_spc); void ipu_internal_pdata_init(void) { if (ipu_ver == IPU_VER_6 || ipu_ver == IPU_VER_6EP || ipu_ver == IPU_VER_6EP_MTL) { isys_ipdata.csi2.nports = ARRAY_SIZE(ipu6_csi_offsets); isys_ipdata.csi2.offsets = ipu6_csi_offsets; isys_ipdata.num_parallel_streams = IPU6_ISYS_NUM_STREAMS; psys_ipdata.hw_variant.spc_offset = IPU6_PSYS_SPC_OFFSET; } else if (ipu_ver == IPU_VER_6SE) { isys_ipdata.csi2.nports = ARRAY_SIZE(ipu6se_csi_offsets); isys_ipdata.csi2.offsets = ipu6se_csi_offsets; isys_ipdata.num_parallel_streams = IPU6SE_ISYS_NUM_STREAMS; psys_ipdata.hw_variant.spc_offset = IPU6SE_PSYS_SPC_OFFSET; } } ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu6/psys/000077500000000000000000000000001471702545500252155ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu6/psys/Makefile000066400000000000000000000013401471702545500266530ustar00rootroot00000000000000# SPDX-License-Identifier: GPL-2.0-only is_kernel_lt_6_10 = $(shell if [ $$(printf "6.10\n$(KERNELVERSION)" | sort -V | head -n1) != "6.10" ]; then echo 1; fi) ifeq ($(is_kernel_lt_6_10), 1) ifneq ($(EXTERNAL_BUILD), 1) src := $(srctree)/$(src) endif endif intel-ipu6-psys-objs += ipu-psys.o \ ipu6-psys.o \ ipu-resources.o \ ipu6-l-scheduler.o \ ipu6-ppg.o intel-ipu6-psys-objs += ipu-fw-resources.o \ ipu6-fw-resources.o \ ipu6se-fw-resources.o \ ipu6ep-fw-resources.o \ ipu-fw-psys.o obj-$(CONFIG_VIDEO_INTEL_IPU6) += intel-ipu6-psys.o ifeq ($(is_kernel_lt_6_10), 1) ccflags-y += -I$(src)/../ipu6/ endif ccflags-y += -I$(src)/../ ccflags-y += -I$(src)/../../ ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu6/psys/ipu-fw-psys.c000066400000000000000000000370671471702545500276010ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2016 - 2024 Intel Corporation #include #include #include #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) #include "ipu-fw-com.h" #else #include "ipu6-fw-com.h" #endif #include "ipu-fw-psys.h" #include "ipu-psys.h" int ipu_fw_psys_pg_start(struct ipu_psys_kcmd *kcmd) { kcmd->kpg->pg->state = IPU_FW_PSYS_PROCESS_GROUP_STARTED; return 0; } #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) int ipu_fw_psys_pg_disown(struct ipu_psys_kcmd *kcmd) { struct ipu_fw_psys_cmd *psys_cmd; int ret = 0; psys_cmd = ipu_send_get_token(kcmd->fh->psys->fwcom, 0); if (!psys_cmd) { dev_err(&kcmd->fh->psys->adev->dev, "%s failed to get token!\n", __func__); kcmd->pg_user = NULL; ret = -ENODATA; goto out; } psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_START; psys_cmd->msg = 0; psys_cmd->context_handle = kcmd->kpg->pg->ipu_virtual_address; ipu_send_put_token(kcmd->fh->psys->fwcom, 0); out: return ret; } int ipu_fw_psys_ppg_suspend(struct ipu_psys_kcmd *kcmd) { struct ipu_fw_psys_cmd *psys_cmd; int ret = 0; /* ppg suspend cmd uses QUEUE_DEVICE_ID instead of QUEUE_COMMAND_ID */ psys_cmd = ipu_send_get_token(kcmd->fh->psys->fwcom, 1); if (!psys_cmd) { dev_err(&kcmd->fh->psys->adev->dev, "%s failed to get token!\n", __func__); kcmd->pg_user = NULL; ret = -ENODATA; goto out; } psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_SUSPEND; psys_cmd->msg = 0; psys_cmd->context_handle = kcmd->kpg->pg->ipu_virtual_address; ipu_send_put_token(kcmd->fh->psys->fwcom, 1); out: return ret; } int ipu_fw_psys_ppg_resume(struct ipu_psys_kcmd *kcmd) { struct ipu_fw_psys_cmd *psys_cmd; int ret = 0; psys_cmd = ipu_send_get_token(kcmd->fh->psys->fwcom, 0); if (!psys_cmd) { dev_err(&kcmd->fh->psys->adev->dev, "%s failed to get token!\n", __func__); kcmd->pg_user = NULL; ret = -ENODATA; goto out; } psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_RESUME; psys_cmd->msg = 0; psys_cmd->context_handle = kcmd->kpg->pg->ipu_virtual_address; ipu_send_put_token(kcmd->fh->psys->fwcom, 0); out: return ret; } int ipu_fw_psys_pg_abort(struct ipu_psys_kcmd *kcmd) { struct ipu_fw_psys_cmd *psys_cmd; int ret = 0; psys_cmd = ipu_send_get_token(kcmd->fh->psys->fwcom, 0); if (!psys_cmd) { dev_err(&kcmd->fh->psys->adev->dev, "%s failed to get token!\n", __func__); kcmd->pg_user = NULL; ret = -ENODATA; goto out; } psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_STOP; psys_cmd->msg = 0; psys_cmd->context_handle = kcmd->kpg->pg->ipu_virtual_address; ipu_send_put_token(kcmd->fh->psys->fwcom, 0); out: return ret; } int ipu_fw_psys_pg_submit(struct ipu_psys_kcmd *kcmd) { kcmd->kpg->pg->state = IPU_FW_PSYS_PROCESS_GROUP_BLOCKED; return 0; } int ipu_fw_psys_rcv_event(struct ipu_psys *psys, struct ipu_fw_psys_event *event) { void *rcv; rcv = ipu_recv_get_token(psys->fwcom, 0); if (!rcv) return 0; memcpy(event, rcv, sizeof(*event)); ipu_recv_put_token(psys->fwcom, 0); return 1; } #else int ipu_fw_psys_pg_disown(struct ipu_psys_kcmd *kcmd) { struct ipu_fw_psys_cmd *psys_cmd; struct device *dev = &kcmd->fh->psys->adev->auxdev.dev; int ret = 0; psys_cmd = ipu6_send_get_token(kcmd->fh->psys->fwcom, 0); if (!psys_cmd) { dev_err(dev, "%s failed to get token!\n", __func__); kcmd->pg_user = NULL; ret = -ENODATA; goto out; } psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_START; psys_cmd->msg = 0; psys_cmd->context_handle = kcmd->kpg->pg->ipu_virtual_address; ipu6_send_put_token(kcmd->fh->psys->fwcom, 0); out: return ret; } int ipu_fw_psys_ppg_suspend(struct ipu_psys_kcmd *kcmd) { struct device *dev = &kcmd->fh->psys->adev->auxdev.dev; struct ipu_fw_psys_cmd *psys_cmd; int ret = 0; /* ppg suspend cmd uses QUEUE_DEVICE_ID instead of QUEUE_COMMAND_ID */ psys_cmd = ipu6_send_get_token(kcmd->fh->psys->fwcom, 1); if (!psys_cmd) { dev_err(dev, "%s failed to get token!\n", __func__); kcmd->pg_user = NULL; ret = -ENODATA; goto out; } psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_SUSPEND; psys_cmd->msg = 0; psys_cmd->context_handle = kcmd->kpg->pg->ipu_virtual_address; ipu6_send_put_token(kcmd->fh->psys->fwcom, 1); out: return ret; } int ipu_fw_psys_ppg_resume(struct ipu_psys_kcmd *kcmd) { struct device *dev = &kcmd->fh->psys->adev->auxdev.dev; struct ipu_fw_psys_cmd *psys_cmd; int ret = 0; psys_cmd = ipu6_send_get_token(kcmd->fh->psys->fwcom, 0); if (!psys_cmd) { dev_err(dev, "%s failed to get token!\n", __func__); kcmd->pg_user = NULL; ret = -ENODATA; goto out; } psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_RESUME; psys_cmd->msg = 0; psys_cmd->context_handle = kcmd->kpg->pg->ipu_virtual_address; ipu6_send_put_token(kcmd->fh->psys->fwcom, 0); out: return ret; } int ipu_fw_psys_pg_abort(struct ipu_psys_kcmd *kcmd) { struct device *dev = &kcmd->fh->psys->adev->auxdev.dev; struct ipu_fw_psys_cmd *psys_cmd; int ret = 0; psys_cmd = ipu6_send_get_token(kcmd->fh->psys->fwcom, 0); if (!psys_cmd) { dev_err(dev, "%s failed to get token!\n", __func__); kcmd->pg_user = NULL; ret = -ENODATA; goto out; } psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_STOP; psys_cmd->msg = 0; psys_cmd->context_handle = kcmd->kpg->pg->ipu_virtual_address; ipu6_send_put_token(kcmd->fh->psys->fwcom, 0); out: return ret; } int ipu_fw_psys_pg_submit(struct ipu_psys_kcmd *kcmd) { kcmd->kpg->pg->state = IPU_FW_PSYS_PROCESS_GROUP_BLOCKED; return 0; } int ipu_fw_psys_rcv_event(struct ipu_psys *psys, struct ipu_fw_psys_event *event) { void *rcv; rcv = ipu6_recv_get_token(psys->fwcom, 0); if (!rcv) return 0; memcpy(event, rcv, sizeof(*event)); ipu6_recv_put_token(psys->fwcom, 0); return 1; } #endif int ipu_fw_psys_terminal_set(struct ipu_fw_psys_terminal *terminal, int terminal_idx, struct ipu_psys_kcmd *kcmd, u32 buffer, unsigned int size) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0) struct device *dev = &kcmd->fh->psys->adev->auxdev.dev; #endif u32 type; u32 buffer_state; type = terminal->terminal_type; switch (type) { case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_IN: case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_OUT: case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_IN: case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_OUT: case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_IN: case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_OUT: case IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM: case IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT: buffer_state = IPU_FW_PSYS_BUFFER_UNDEFINED; break; case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_STREAM: case IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN: case IPU_FW_PSYS_TERMINAL_TYPE_STATE_IN: buffer_state = IPU_FW_PSYS_BUFFER_FULL; break; case IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT: case IPU_FW_PSYS_TERMINAL_TYPE_STATE_OUT: buffer_state = IPU_FW_PSYS_BUFFER_EMPTY; break; default: #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_err(&kcmd->fh->psys->adev->dev, "unknown terminal type: 0x%x\n", type); #else dev_err(dev, "unknown terminal type: 0x%x\n", type); #endif return -EAGAIN; } if (type == IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN || type == IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT) { struct ipu_fw_psys_data_terminal *dterminal = (struct ipu_fw_psys_data_terminal *)terminal; dterminal->connection_type = IPU_FW_PSYS_CONNECTION_MEMORY; dterminal->frame.data_bytes = size; if (!ipu_fw_psys_pg_get_protocol(kcmd)) dterminal->frame.data = buffer; else dterminal->frame.data_index = terminal_idx; dterminal->frame.buffer_state = buffer_state; } else { struct ipu_fw_psys_param_terminal *pterminal = (struct ipu_fw_psys_param_terminal *)terminal; if (!ipu_fw_psys_pg_get_protocol(kcmd)) pterminal->param_payload.buffer = buffer; else pterminal->param_payload.terminal_index = terminal_idx; } return 0; } void ipu_fw_psys_pg_dump(struct ipu_psys *psys, struct ipu_psys_kcmd *kcmd, const char *note) { ipu6_fw_psys_pg_dump(psys, kcmd, note); } int ipu_fw_psys_pg_get_id(struct ipu_psys_kcmd *kcmd) { return kcmd->kpg->pg->ID; } int ipu_fw_psys_pg_get_terminal_count(struct ipu_psys_kcmd *kcmd) { return kcmd->kpg->pg->terminal_count; } int ipu_fw_psys_pg_get_size(struct ipu_psys_kcmd *kcmd) { return kcmd->kpg->pg->size; } int ipu_fw_psys_pg_set_ipu_vaddress(struct ipu_psys_kcmd *kcmd, dma_addr_t vaddress) { kcmd->kpg->pg->ipu_virtual_address = vaddress; return 0; } struct ipu_fw_psys_terminal *ipu_fw_psys_pg_get_terminal(struct ipu_psys_kcmd *kcmd, int index) { struct ipu_fw_psys_terminal *terminal; u16 *terminal_offset_table; terminal_offset_table = (uint16_t *)((char *)kcmd->kpg->pg + kcmd->kpg->pg->terminals_offset); terminal = (struct ipu_fw_psys_terminal *) ((char *)kcmd->kpg->pg + terminal_offset_table[index]); return terminal; } void ipu_fw_psys_pg_set_token(struct ipu_psys_kcmd *kcmd, u64 token) { kcmd->kpg->pg->token = (u64)token; } u64 ipu_fw_psys_pg_get_token(struct ipu_psys_kcmd *kcmd) { return kcmd->kpg->pg->token; } int ipu_fw_psys_pg_get_protocol(struct ipu_psys_kcmd *kcmd) { return kcmd->kpg->pg->protocol_version; } int ipu_fw_psys_ppg_set_buffer_set(struct ipu_psys_kcmd *kcmd, struct ipu_fw_psys_terminal *terminal, int terminal_idx, u32 buffer) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) u32 type; u32 buffer_state; u32 *buffer_ptr; struct ipu_fw_psys_buffer_set *buf_set = kcmd->kbuf_set->buf_set; #else struct device *dev = &kcmd->fh->psys->adev->auxdev.dev; struct ipu_fw_psys_buffer_set *buf_set = kcmd->kbuf_set->buf_set; u32 buffer_state; u32 *buffer_ptr; u32 type; #endif type = terminal->terminal_type; switch (type) { case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_IN: case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_OUT: case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_IN: case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_OUT: case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_IN: case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_OUT: case IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM: case IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT: buffer_state = IPU_FW_PSYS_BUFFER_UNDEFINED; break; case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_STREAM: case IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN: case IPU_FW_PSYS_TERMINAL_TYPE_STATE_IN: buffer_state = IPU_FW_PSYS_BUFFER_FULL; break; case IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT: case IPU_FW_PSYS_TERMINAL_TYPE_STATE_OUT: buffer_state = IPU_FW_PSYS_BUFFER_EMPTY; break; default: #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_err(&kcmd->fh->psys->adev->dev, "unknown terminal type: 0x%x\n", type); #else dev_err(dev, "unknown terminal type: 0x%x\n", type); #endif return -EAGAIN; } buffer_ptr = (u32 *)((char *)buf_set + sizeof(*buf_set) + terminal_idx * sizeof(*buffer_ptr)); *buffer_ptr = buffer; if (type == IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN || type == IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT) { struct ipu_fw_psys_data_terminal *dterminal = (struct ipu_fw_psys_data_terminal *)terminal; dterminal->frame.buffer_state = buffer_state; } return 0; } size_t ipu_fw_psys_ppg_get_buffer_set_size(struct ipu_psys_kcmd *kcmd) { return (sizeof(struct ipu_fw_psys_buffer_set) + kcmd->kpg->pg->terminal_count * sizeof(u32)); } int ipu_fw_psys_ppg_buffer_set_vaddress(struct ipu_fw_psys_buffer_set *buf_set, u32 vaddress) { buf_set->ipu_virtual_address = vaddress; return 0; } int ipu_fw_psys_ppg_buffer_set_set_kernel_enable_bitmap( struct ipu_fw_psys_buffer_set *buf_set, u32 *kernel_enable_bitmap) { memcpy(buf_set->kernel_enable_bitmap, (u8 *)kernel_enable_bitmap, sizeof(buf_set->kernel_enable_bitmap)); return 0; } struct ipu_fw_psys_buffer_set * ipu_fw_psys_ppg_create_buffer_set(struct ipu_psys_kcmd *kcmd, void *kaddr, u32 frame_counter) { struct ipu_fw_psys_buffer_set *buffer_set = NULL; unsigned int i; buffer_set = (struct ipu_fw_psys_buffer_set *)kaddr; /* * Set base struct members */ buffer_set->ipu_virtual_address = 0; buffer_set->process_group_handle = kcmd->kpg->pg->ipu_virtual_address; buffer_set->frame_counter = frame_counter; buffer_set->terminal_count = kcmd->kpg->pg->terminal_count; /* * Initialize adjacent buffer addresses */ for (i = 0; i < buffer_set->terminal_count; i++) { u32 *buffer = (u32 *)((char *)buffer_set + sizeof(*buffer_set) + sizeof(u32) * i); *buffer = 0; } return buffer_set; } int ipu_fw_psys_ppg_enqueue_bufs(struct ipu_psys_kcmd *kcmd) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0) struct device *dev = &kcmd->fh->psys->adev->auxdev.dev; #endif struct ipu_fw_psys_cmd *psys_cmd; unsigned int queue_id; int ret = 0; unsigned int size; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) if (ipu_ver == IPU_VER_6SE) #else if (ipu_ver == IPU6_VER_6SE) #endif size = IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID; else size = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID; queue_id = kcmd->kpg->pg->base_queue_id; if (queue_id >= size) return -EINVAL; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) psys_cmd = ipu_send_get_token(kcmd->fh->psys->fwcom, queue_id); if (!psys_cmd) { dev_err(&kcmd->fh->psys->adev->dev, "%s failed to get token!\n", __func__); kcmd->pg_user = NULL; return -ENODATA; } #else psys_cmd = ipu6_send_get_token(kcmd->fh->psys->fwcom, queue_id); if (!psys_cmd) { dev_err(dev, "%s failed to get token!\n", __func__); kcmd->pg_user = NULL; return -ENODATA; } #endif psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_RUN; psys_cmd->msg = 0; psys_cmd->context_handle = kcmd->kbuf_set->buf_set->ipu_virtual_address; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) ipu_send_put_token(kcmd->fh->psys->fwcom, queue_id); #else ipu6_send_put_token(kcmd->fh->psys->fwcom, queue_id); #endif return ret; } u8 ipu_fw_psys_ppg_get_base_queue_id(struct ipu_psys_kcmd *kcmd) { return kcmd->kpg->pg->base_queue_id; } void ipu_fw_psys_ppg_set_base_queue_id(struct ipu_psys_kcmd *kcmd, u8 queue_id) { kcmd->kpg->pg->base_queue_id = queue_id; } #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) int ipu_fw_psys_open(struct ipu_psys *psys) { int retry = IPU_PSYS_OPEN_RETRY, retval; retval = ipu_fw_com_open(psys->fwcom); if (retval) { dev_err(&psys->adev->dev, "fw com open failed.\n"); return retval; } do { usleep_range(IPU_PSYS_OPEN_TIMEOUT_US, IPU_PSYS_OPEN_TIMEOUT_US + 10); retval = ipu_fw_com_ready(psys->fwcom); if (!retval) { dev_dbg(&psys->adev->dev, "psys port open ready!\n"); break; } } while (retry-- > 0); if (!retry && retval) { dev_err(&psys->adev->dev, "psys port open ready failed %d\n", retval); ipu_fw_com_close(psys->fwcom); return retval; } return 0; } int ipu_fw_psys_close(struct ipu_psys *psys) { int retval; retval = ipu_fw_com_close(psys->fwcom); if (retval) { dev_err(&psys->adev->dev, "fw com close failed.\n"); return retval; } return retval; } #else int ipu_fw_psys_open(struct ipu_psys *psys) { struct device *dev = &psys->adev->auxdev.dev; int retry = IPU_PSYS_OPEN_RETRY, retval; retval = ipu6_fw_com_open(psys->fwcom); if (retval) { dev_err(dev, "fw com open failed.\n"); return retval; } do { usleep_range(IPU_PSYS_OPEN_TIMEOUT_US, IPU_PSYS_OPEN_TIMEOUT_US + 10); retval = ipu6_fw_com_ready(psys->fwcom); if (retval) { dev_dbg(dev, "psys port open ready!\n"); break; } retry--; } while (retry > 0); if (!retry) { dev_err(dev, "psys port open ready failed\n"); ipu6_fw_com_close(psys->fwcom); return -EIO; } return 0; } int ipu_fw_psys_close(struct ipu_psys *psys) { struct device *dev = &psys->adev->auxdev.dev; int retval; retval = ipu6_fw_com_close(psys->fwcom); if (retval) { dev_err(dev, "fw com close failed.\n"); return retval; } return retval; } #endif ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu6/psys/ipu-fw-psys.h000066400000000000000000000266341471702545500276040ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2016 - 2024 Intel Corporation */ #ifndef IPU_FW_PSYS_H #define IPU_FW_PSYS_H #include "ipu6-platform-resources.h" #include "ipu6se-platform-resources.h" #include "ipu6ep-platform-resources.h" #define IPU_FW_PSYS_CMD_QUEUE_SIZE 0x20 #define IPU_FW_PSYS_EVENT_QUEUE_SIZE 0x40 #define IPU_FW_PSYS_CMD_BITS 64 #define IPU_FW_PSYS_EVENT_BITS 128 enum { IPU_FW_PSYS_EVENT_TYPE_SUCCESS = 0, IPU_FW_PSYS_EVENT_TYPE_UNKNOWN_ERROR = 1, IPU_FW_PSYS_EVENT_TYPE_RET_REM_OBJ_NOT_FOUND = 2, IPU_FW_PSYS_EVENT_TYPE_RET_REM_OBJ_TOO_BIG = 3, IPU_FW_PSYS_EVENT_TYPE_RET_REM_OBJ_DDR_TRANS_ERR = 4, IPU_FW_PSYS_EVENT_TYPE_RET_REM_OBJ_NULL_PKG_DIR_ADDR = 5, IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_LOAD_FRAME_ERR = 6, IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_LOAD_FRAGMENT_ERR = 7, IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_PROCESS_COUNT_ZERO = 8, IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_PROCESS_INIT_ERR = 9, IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_ABORT = 10, IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_NULL = 11, IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_VALIDATION_ERR = 12, IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_INVALID_FRAME = 13 }; enum { IPU_FW_PSYS_EVENT_QUEUE_MAIN_ID, IPU_FW_PSYS_N_PSYS_EVENT_QUEUE_ID }; enum { IPU_FW_PSYS_PROCESS_GROUP_ERROR = 0, IPU_FW_PSYS_PROCESS_GROUP_CREATED, IPU_FW_PSYS_PROCESS_GROUP_READY, IPU_FW_PSYS_PROCESS_GROUP_BLOCKED, IPU_FW_PSYS_PROCESS_GROUP_STARTED, IPU_FW_PSYS_PROCESS_GROUP_RUNNING, IPU_FW_PSYS_PROCESS_GROUP_STALLED, IPU_FW_PSYS_PROCESS_GROUP_STOPPED, IPU_FW_PSYS_N_PROCESS_GROUP_STATES }; enum { IPU_FW_PSYS_CONNECTION_MEMORY = 0, IPU_FW_PSYS_CONNECTION_MEMORY_STREAM, IPU_FW_PSYS_CONNECTION_STREAM, IPU_FW_PSYS_N_CONNECTION_TYPES }; enum { IPU_FW_PSYS_BUFFER_NULL = 0, IPU_FW_PSYS_BUFFER_UNDEFINED, IPU_FW_PSYS_BUFFER_EMPTY, IPU_FW_PSYS_BUFFER_NONEMPTY, IPU_FW_PSYS_BUFFER_FULL, IPU_FW_PSYS_N_BUFFER_STATES }; enum { IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN = 0, IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT, IPU_FW_PSYS_TERMINAL_TYPE_PARAM_STREAM, IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_IN, IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_OUT, IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_IN, IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_OUT, IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_IN, IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_OUT, IPU_FW_PSYS_TERMINAL_TYPE_STATE_IN, IPU_FW_PSYS_TERMINAL_TYPE_STATE_OUT, IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM, IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT, IPU_FW_PSYS_N_TERMINAL_TYPES }; enum { IPU_FW_PSYS_COL_DIMENSION = 0, IPU_FW_PSYS_ROW_DIMENSION = 1, IPU_FW_PSYS_N_DATA_DIMENSION = 2 }; enum { IPU_FW_PSYS_PROCESS_GROUP_CMD_NOP = 0, IPU_FW_PSYS_PROCESS_GROUP_CMD_SUBMIT, IPU_FW_PSYS_PROCESS_GROUP_CMD_ATTACH, IPU_FW_PSYS_PROCESS_GROUP_CMD_DETACH, IPU_FW_PSYS_PROCESS_GROUP_CMD_START, IPU_FW_PSYS_PROCESS_GROUP_CMD_DISOWN, IPU_FW_PSYS_PROCESS_GROUP_CMD_RUN, IPU_FW_PSYS_PROCESS_GROUP_CMD_STOP, IPU_FW_PSYS_PROCESS_GROUP_CMD_SUSPEND, IPU_FW_PSYS_PROCESS_GROUP_CMD_RESUME, IPU_FW_PSYS_PROCESS_GROUP_CMD_ABORT, IPU_FW_PSYS_PROCESS_GROUP_CMD_RESET, IPU_FW_PSYS_N_PROCESS_GROUP_CMDS }; enum { IPU_FW_PSYS_PROCESS_GROUP_PROTOCOL_LEGACY = 0, IPU_FW_PSYS_PROCESS_GROUP_PROTOCOL_PPG, IPU_FW_PSYS_PROCESS_GROUP_N_PROTOCOLS }; struct __packed ipu_fw_psys_process_group { u64 token; u64 private_token; u32 routing_bitmap[IPU_FW_PSYS_RBM_NOF_ELEMS]; u32 kernel_bitmap[IPU_FW_PSYS_KBM_NOF_ELEMS]; u32 size; u32 psys_server_init_cycles; u32 pg_load_start_ts; u32 pg_load_cycles; u32 pg_init_cycles; u32 pg_processing_cycles; u32 pg_next_frame_init_cycles; u32 pg_complete_cycles; u32 ID; u32 state; u32 ipu_virtual_address; u32 resource_bitmap; u16 fragment_count; u16 fragment_state; u16 fragment_limit; u16 processes_offset; u16 terminals_offset; u8 process_count; u8 terminal_count; u8 subgraph_count; u8 protocol_version; u8 base_queue_id; u8 num_queues; u8 mask_irq; u8 error_handling_enable; u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_GROUP_STRUCT]; }; struct ipu_fw_psys_srv_init { void *host_ddr_pkg_dir; u32 ddr_pkg_dir_address; u32 pkg_dir_size; u32 icache_prefetch_sp; u32 icache_prefetch_isp; }; struct __packed ipu_fw_psys_cmd { u16 command; u16 msg; u32 context_handle; }; struct __packed ipu_fw_psys_event { u16 status; u16 command; u32 context_handle; u64 token; }; struct ipu_fw_psys_terminal { u32 terminal_type; s16 parent_offset; u16 size; u16 tm_index; u8 ID; u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_TERMINAL_STRUCT]; }; struct ipu_fw_psys_param_payload { u64 host_buffer; u32 buffer; u32 terminal_index; }; struct ipu_fw_psys_param_terminal { struct ipu_fw_psys_terminal base; struct ipu_fw_psys_param_payload param_payload; u16 param_section_desc_offset; u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_PARAM_TERMINAL_STRUCT]; }; struct ipu_fw_psys_frame { u32 buffer_state; u32 access_type; u32 pointer_state; u32 access_scope; u32 data; u32 data_index; u32 data_bytes; u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_FRAME_STRUCT]; }; struct ipu_fw_psys_frame_descriptor { u32 frame_format_type; u32 plane_count; u32 plane_offsets[IPU_FW_PSYS_N_FRAME_PLANES]; u32 stride[1]; u32 ts_offsets[IPU_FW_PSYS_N_FRAME_PLANES]; u16 dimension[2]; u16 size; u8 bpp; u8 bpe; u8 is_compressed; u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_FRAME_DESC_STRUCT]; }; struct ipu_fw_psys_stream { u64 dummy; }; struct ipu_fw_psys_data_terminal { struct ipu_fw_psys_terminal base; struct ipu_fw_psys_frame_descriptor frame_descriptor; struct ipu_fw_psys_frame frame; struct ipu_fw_psys_stream stream; u32 reserved; u32 connection_type; u16 fragment_descriptor_offset; u8 kernel_id; u8 subgraph_id; u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_DATA_TERMINAL_STRUCT]; }; struct ipu_fw_psys_buffer_set { u64 token; u32 kernel_enable_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS]; u32 terminal_enable_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS]; u32 routing_enable_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS]; u32 rbm[IPU_FW_PSYS_RBM_NOF_ELEMS]; u32 ipu_virtual_address; u32 process_group_handle; u16 terminal_count; u8 frame_counter; u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_BUFFER_SET_STRUCT]; }; struct ipu_fw_psys_program_group_manifest { u32 kernel_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS]; u32 ID; u16 program_manifest_offset; u16 terminal_manifest_offset; u16 private_data_offset; u16 rbm_manifest_offset; u16 size; u8 alignment; u8 kernel_count; u8 program_count; u8 terminal_count; u8 subgraph_count; u8 reserved[5]; }; struct ipu_fw_generic_program_manifest { u16 *dev_chn_size; u16 *dev_chn_offset; u16 *ext_mem_size; u16 *ext_mem_offset; u8 cell_id; u8 cells[IPU_FW_PSYS_PROCESS_MAX_CELLS]; u8 cell_type_id; u8 *is_dfm_relocatable; u32 *dfm_port_bitmap; u32 *dfm_active_port_bitmap; }; struct ipu_fw_resource_definitions { u32 num_cells; u32 num_cells_type; const u8 *cells; u32 num_dev_channels; const u16 *dev_channels; u32 num_ext_mem_types; u32 num_ext_mem_ids; const u16 *ext_mem_ids; u32 num_dfm_ids; const u16 *dfms; u32 cell_mem_row; const u8 *cell_mem; }; struct ipu6_psys_hw_res_variant { unsigned int queue_num; unsigned int cell_num; int (*set_proc_dev_chn)(struct ipu_fw_psys_process *ptr, u16 offset, u16 value); int (*set_proc_dfm_bitmap)(struct ipu_fw_psys_process *ptr, u16 id, u32 bitmap, u32 active_bitmap); int (*set_proc_ext_mem)(struct ipu_fw_psys_process *ptr, u16 type_id, u16 mem_id, u16 offset); int (*get_pgm_by_proc)(struct ipu_fw_generic_program_manifest *gen_pm, const struct ipu_fw_psys_program_group_manifest *pg_manifest, struct ipu_fw_psys_process *process); }; struct ipu_psys_kcmd; struct ipu_psys; int ipu_fw_psys_pg_start(struct ipu_psys_kcmd *kcmd); int ipu_fw_psys_pg_disown(struct ipu_psys_kcmd *kcmd); int ipu_fw_psys_pg_abort(struct ipu_psys_kcmd *kcmd); int ipu_fw_psys_pg_submit(struct ipu_psys_kcmd *kcmd); int ipu_fw_psys_ppg_suspend(struct ipu_psys_kcmd *kcmd); int ipu_fw_psys_ppg_resume(struct ipu_psys_kcmd *kcmd); int ipu_fw_psys_pg_load_cycles(struct ipu_psys_kcmd *kcmd); int ipu_fw_psys_pg_init_cycles(struct ipu_psys_kcmd *kcmd); int ipu_fw_psys_pg_processing_cycles(struct ipu_psys_kcmd *kcmd); int ipu_fw_psys_pg_server_init_cycles(struct ipu_psys_kcmd *kcmd); int ipu_fw_psys_pg_next_frame_init_cycles(struct ipu_psys_kcmd *kcmd); int ipu_fw_psys_pg_complete_cycles(struct ipu_psys_kcmd *kcmd); int ipu_fw_psys_rcv_event(struct ipu_psys *psys, struct ipu_fw_psys_event *event); int ipu_fw_psys_terminal_set(struct ipu_fw_psys_terminal *terminal, int terminal_idx, struct ipu_psys_kcmd *kcmd, u32 buffer, unsigned int size); void ipu_fw_psys_pg_dump(struct ipu_psys *psys, struct ipu_psys_kcmd *kcmd, const char *note); int ipu_fw_psys_pg_get_id(struct ipu_psys_kcmd *kcmd); int ipu_fw_psys_pg_get_terminal_count(struct ipu_psys_kcmd *kcmd); int ipu_fw_psys_pg_get_size(struct ipu_psys_kcmd *kcmd); int ipu_fw_psys_pg_set_ipu_vaddress(struct ipu_psys_kcmd *kcmd, dma_addr_t vaddress); struct ipu_fw_psys_terminal *ipu_fw_psys_pg_get_terminal(struct ipu_psys_kcmd *kcmd, int index); void ipu_fw_psys_pg_set_token(struct ipu_psys_kcmd *kcmd, u64 token); u64 ipu_fw_psys_pg_get_token(struct ipu_psys_kcmd *kcmd); int ipu_fw_psys_ppg_set_buffer_set(struct ipu_psys_kcmd *kcmd, struct ipu_fw_psys_terminal *terminal, int terminal_idx, u32 buffer); size_t ipu_fw_psys_ppg_get_buffer_set_size(struct ipu_psys_kcmd *kcmd); int ipu_fw_psys_ppg_buffer_set_vaddress(struct ipu_fw_psys_buffer_set *buf_set, u32 vaddress); int ipu_fw_psys_ppg_buffer_set_set_kernel_enable_bitmap( struct ipu_fw_psys_buffer_set *buf_set, u32 *kernel_enable_bitmap); struct ipu_fw_psys_buffer_set * ipu_fw_psys_ppg_create_buffer_set(struct ipu_psys_kcmd *kcmd, void *kaddr, u32 frame_counter); int ipu_fw_psys_ppg_enqueue_bufs(struct ipu_psys_kcmd *kcmd); u8 ipu_fw_psys_ppg_get_base_queue_id(struct ipu_psys_kcmd *kcmd); void ipu_fw_psys_ppg_set_base_queue_id(struct ipu_psys_kcmd *kcmd, u8 queue_id); int ipu_fw_psys_pg_get_protocol(struct ipu_psys_kcmd *kcmd); int ipu_fw_psys_open(struct ipu_psys *psys); int ipu_fw_psys_close(struct ipu_psys *psys); /* common resource interface for both abi and api mode */ int ipu_fw_psys_set_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index, u8 value); u8 ipu_fw_psys_get_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index); int ipu_fw_psys_clear_process_cell(struct ipu_fw_psys_process *ptr); int ipu_fw_psys_set_proc_dev_chn(struct ipu_fw_psys_process *ptr, u16 offset, u16 value); int ipu_fw_psys_set_process_ext_mem(struct ipu_fw_psys_process *ptr, u16 type_id, u16 mem_id, u16 offset); int ipu_fw_psys_get_program_manifest_by_process( struct ipu_fw_generic_program_manifest *gen_pm, const struct ipu_fw_psys_program_group_manifest *pg_manifest, struct ipu_fw_psys_process *process); int ipu6_fw_psys_set_proc_dev_chn(struct ipu_fw_psys_process *ptr, u16 offset, u16 value); int ipu6_fw_psys_set_proc_dfm_bitmap(struct ipu_fw_psys_process *ptr, u16 id, u32 bitmap, u32 active_bitmap); int ipu6_fw_psys_set_process_ext_mem(struct ipu_fw_psys_process *ptr, u16 type_id, u16 mem_id, u16 offset); int ipu6_fw_psys_get_program_manifest_by_process( struct ipu_fw_generic_program_manifest *gen_pm, const struct ipu_fw_psys_program_group_manifest *pg_manifest, struct ipu_fw_psys_process *process); void ipu6_fw_psys_pg_dump(struct ipu_psys *psys, struct ipu_psys_kcmd *kcmd, const char *note); void ipu6_psys_hw_res_variant_init(void); #endif /* IPU_FW_PSYS_H */ ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu6/psys/ipu-fw-resources.c000066400000000000000000000052011471702545500305760ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2015 - 2024 Intel Corporation #include #include #include "ipu-psys.h" #include "ipu-fw-psys.h" #include "ipu6-platform-resources.h" #include "ipu6se-platform-resources.h" /********** Generic resource handling **********/ /* * Extension library gives byte offsets to its internal structures. * use those offsets to update fields. Without extension lib access * structures directly. */ const struct ipu6_psys_hw_res_variant *var = &hw_var; int ipu_fw_psys_set_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index, u8 value) { struct ipu_fw_psys_process_group *parent = (struct ipu_fw_psys_process_group *)((char *)ptr + ptr->parent_offset); ptr->cells[index] = value; parent->resource_bitmap |= 1 << value; return 0; } u8 ipu_fw_psys_get_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index) { return ptr->cells[index]; } int ipu_fw_psys_clear_process_cell(struct ipu_fw_psys_process *ptr) { struct ipu_fw_psys_process_group *parent; u8 cell_id = ipu_fw_psys_get_process_cell_id(ptr, 0); int retval = -1; u8 value; parent = (struct ipu_fw_psys_process_group *)((char *)ptr + ptr->parent_offset); value = var->cell_num; if ((1 << cell_id) != 0 && ((1 << cell_id) & parent->resource_bitmap)) { ipu_fw_psys_set_process_cell_id(ptr, 0, value); parent->resource_bitmap &= ~(1 << cell_id); retval = 0; } return retval; } int ipu_fw_psys_set_proc_dev_chn(struct ipu_fw_psys_process *ptr, u16 offset, u16 value) { if (var->set_proc_dev_chn) return var->set_proc_dev_chn(ptr, offset, value); WARN(1, "ipu6 psys res var is not initialised correctly."); return 0; } int ipu_fw_psys_set_proc_dfm_bitmap(struct ipu_fw_psys_process *ptr, u16 id, u32 bitmap, u32 active_bitmap) { if (var->set_proc_dfm_bitmap) return var->set_proc_dfm_bitmap(ptr, id, bitmap, active_bitmap); WARN(1, "ipu6 psys res var is not initialised correctly."); return 0; } int ipu_fw_psys_set_process_ext_mem(struct ipu_fw_psys_process *ptr, u16 type_id, u16 mem_id, u16 offset) { if (var->set_proc_ext_mem) return var->set_proc_ext_mem(ptr, type_id, mem_id, offset); WARN(1, "ipu6 psys res var is not initialised correctly."); return 0; } int ipu_fw_psys_get_program_manifest_by_process( struct ipu_fw_generic_program_manifest *gen_pm, const struct ipu_fw_psys_program_group_manifest *pg_manifest, struct ipu_fw_psys_process *process) { if (var->get_pgm_by_proc) return var->get_pgm_by_proc(gen_pm, pg_manifest, process); WARN(1, "ipu6 psys res var is not initialised correctly."); return 0; } ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu6/psys/ipu-platform-psys.h000066400000000000000000000036361471702545500310110ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2020 - 2024 Intel Corporation */ #ifndef IPU_PLATFORM_PSYS_H #define IPU_PLATFORM_PSYS_H #include "ipu-psys.h" #include #define IPU_PSYS_BUF_SET_POOL_SIZE 8 #define IPU_PSYS_BUF_SET_MAX_SIZE 1024 struct ipu_fw_psys_buffer_set; enum ipu_psys_cmd_state { KCMD_STATE_PPG_NEW, KCMD_STATE_PPG_START, KCMD_STATE_PPG_ENQUEUE, KCMD_STATE_PPG_STOP, KCMD_STATE_PPG_COMPLETE }; struct ipu_psys_scheduler { struct list_head ppgs; struct mutex bs_mutex; /* Protects buf_set field */ struct list_head buf_sets; }; enum ipu_psys_ppg_state { PPG_STATE_START = (1 << 0), PPG_STATE_STARTING = (1 << 1), PPG_STATE_STARTED = (1 << 2), PPG_STATE_RUNNING = (1 << 3), PPG_STATE_SUSPEND = (1 << 4), PPG_STATE_SUSPENDING = (1 << 5), PPG_STATE_SUSPENDED = (1 << 6), PPG_STATE_RESUME = (1 << 7), PPG_STATE_RESUMING = (1 << 8), PPG_STATE_RESUMED = (1 << 9), PPG_STATE_STOP = (1 << 10), PPG_STATE_STOPPING = (1 << 11), PPG_STATE_STOPPED = (1 << 12), }; struct ipu_psys_ppg { struct ipu_psys_pg *kpg; struct ipu_psys_fh *fh; struct list_head list; struct list_head sched_list; u64 token; void *manifest; struct mutex mutex; /* Protects kcmd and ppg state field */ struct list_head kcmds_new_list; struct list_head kcmds_processing_list; struct list_head kcmds_finished_list; enum ipu_psys_ppg_state state; u32 pri_base; int pri_dynamic; }; struct ipu_psys_buffer_set { struct list_head list; struct ipu_fw_psys_buffer_set *buf_set; size_t size; size_t buf_set_size; dma_addr_t dma_addr; void *kaddr; struct ipu_psys_kcmd *kcmd; }; int ipu_psys_kcmd_start(struct ipu_psys *psys, struct ipu_psys_kcmd *kcmd); void ipu_psys_kcmd_complete(struct ipu_psys_ppg *kppg, struct ipu_psys_kcmd *kcmd, int error); int ipu_psys_fh_init(struct ipu_psys_fh *fh); int ipu_psys_fh_deinit(struct ipu_psys_fh *fh); #endif /* IPU_PLATFORM_PSYS_H */ ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu6/psys/ipu-platform-resources.h000066400000000000000000000066101471702545500320200ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2018 - 2024 Intel Corporation */ #ifndef IPU_PLATFORM_RESOURCES_COMMON_H #define IPU_PLATFORM_RESOURCES_COMMON_H #define IPU_FW_PSYS_N_PADDING_UINT8_IN_PROGRAM_MANIFEST 0 #define IPU_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_STRUCT 0 #define IPU_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_GROUP_STRUCT 2 #define IPU_FW_PSYS_N_PADDING_UINT8_IN_PROGRAM_MANIFEST_EXT 2 #define IPU_FW_PSYS_N_PADDING_UINT8_IN_TERMINAL_STRUCT 5 #define IPU_FW_PSYS_N_PADDING_UINT8_IN_PARAM_TERMINAL_STRUCT 6 #define IPU_FW_PSYS_N_PADDING_UINT8_IN_DATA_TERMINAL_STRUCT 3 #define IPU_FW_PSYS_N_PADDING_UINT8_IN_FRAME_DESC_STRUCT 3 #define IPU_FW_PSYS_N_FRAME_PLANES 6 #define IPU_FW_PSYS_N_PADDING_UINT8_IN_FRAME_STRUCT 4 #define IPU_FW_PSYS_N_PADDING_UINT8_IN_BUFFER_SET_STRUCT 1 #define IPU_FW_PSYS_MAX_INPUT_DEC_RESOURCES 4 #define IPU_FW_PSYS_MAX_OUTPUT_DEC_RESOURCES 4 #define IPU_FW_PSYS_PROCESS_MAX_CELLS 1 #define IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS 4 #define IPU_FW_PSYS_RBM_NOF_ELEMS 5 #define IPU_FW_PSYS_KBM_NOF_ELEMS 4 struct ipu_fw_psys_process { s16 parent_offset; u8 size; u8 cell_dependencies_offset; u8 terminal_dependencies_offset; u8 process_extension_offset; u8 ID; u8 program_idx; u8 state; u8 cells[IPU_FW_PSYS_PROCESS_MAX_CELLS]; u8 cell_dependency_count; u8 terminal_dependency_count; }; struct ipu_fw_psys_program_manifest { u32 kernel_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS]; s16 parent_offset; u8 program_dependency_offset; u8 terminal_dependency_offset; u8 size; u8 program_extension_offset; u8 program_type; u8 ID; u8 cells[IPU_FW_PSYS_PROCESS_MAX_CELLS]; u8 cell_type_id; u8 program_dependency_count; u8 terminal_dependency_count; }; /* platform specific resource interface */ struct ipu_psys_resource_pool; struct ipu_psys_resource_alloc; struct ipu_fw_psys_process_group; int ipu_psys_allocate_resources(const struct device *dev, struct ipu_fw_psys_process_group *pg, void *pg_manifest, struct ipu_psys_resource_alloc *alloc, struct ipu_psys_resource_pool *pool); int ipu_psys_move_resources(const struct device *dev, struct ipu_psys_resource_alloc *alloc, struct ipu_psys_resource_pool *source_pool, struct ipu_psys_resource_pool *target_pool); void ipu_psys_resource_copy(struct ipu_psys_resource_pool *src, struct ipu_psys_resource_pool *dest); int ipu_psys_try_allocate_resources(struct device *dev, struct ipu_fw_psys_process_group *pg, void *pg_manifest, struct ipu_psys_resource_pool *pool); void ipu_psys_reset_process_cell(const struct device *dev, struct ipu_fw_psys_process_group *pg, void *pg_manifest, int process_count); void ipu_psys_free_resources(struct ipu_psys_resource_alloc *alloc, struct ipu_psys_resource_pool *pool); int ipu_fw_psys_set_proc_dfm_bitmap(struct ipu_fw_psys_process *ptr, u16 id, u32 bitmap, u32 active_bitmap); int ipu_psys_allocate_cmd_queue_resource(struct ipu_psys_resource_pool *pool); void ipu_psys_free_cmd_queue_resource(struct ipu_psys_resource_pool *pool, u8 queue_id); extern const struct ipu_fw_resource_definitions *ipu6_res_defs; extern const struct ipu_fw_resource_definitions *ipu6se_res_defs; extern const struct ipu_fw_resource_definitions *ipu6ep_res_defs; extern struct ipu6_psys_hw_res_variant hw_var; #endif /* IPU_PLATFORM_RESOURCES_COMMON_H */ ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu6/psys/ipu-psys-compat32.c000066400000000000000000000141601471702545500306020ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2013 - 2024 Intel Corporation #include #include #include #include #include "ipu-psys.h" static long native_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long ret = -ENOTTY; if (file->f_op->unlocked_ioctl) ret = file->f_op->unlocked_ioctl(file, cmd, arg); return ret; } struct ipu_psys_buffer32 { u64 len; union { int fd; compat_uptr_t userptr; u64 reserved; } base; u32 data_offset; u32 bytes_used; u32 flags; u32 reserved[2]; } __packed; struct ipu_psys_command32 { u64 issue_id; u64 user_token; u32 priority; compat_uptr_t pg_manifest; compat_uptr_t buffers; int pg; u32 pg_manifest_size; u32 bufcount; u32 min_psys_freq; u32 frame_counter; u32 reserved[2]; } __packed; struct ipu_psys_manifest32 { u32 index; u32 size; compat_uptr_t manifest; u32 reserved[5]; } __packed; static int get_ipu_psys_command32(struct ipu_psys_command *kp, struct ipu_psys_command32 __user *up) { compat_uptr_t pgm, bufs; bool access_ok; #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0) access_ok = access_ok(VERIFY_READ, up, sizeof(struct ipu_psys_command32)); #else access_ok = access_ok(up, sizeof(struct ipu_psys_command32)); #endif if (!access_ok || get_user(kp->issue_id, &up->issue_id) || get_user(kp->user_token, &up->user_token) || get_user(kp->priority, &up->priority) || get_user(pgm, &up->pg_manifest) || get_user(bufs, &up->buffers) || get_user(kp->pg, &up->pg) || get_user(kp->pg_manifest_size, &up->pg_manifest_size) || get_user(kp->bufcount, &up->bufcount) || get_user(kp->min_psys_freq, &up->min_psys_freq) || get_user(kp->frame_counter, &up->frame_counter) ) return -EFAULT; kp->pg_manifest = compat_ptr(pgm); kp->buffers = compat_ptr(bufs); return 0; } static int get_ipu_psys_buffer32(struct ipu_psys_buffer *kp, struct ipu_psys_buffer32 __user *up) { compat_uptr_t ptr; bool access_ok; #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0) access_ok = access_ok(VERIFY_READ, up, sizeof(struct ipu_psys_buffer32)); #else access_ok = access_ok(up, sizeof(struct ipu_psys_buffer32)); #endif if (!access_ok || get_user(kp->len, &up->len) || get_user(ptr, &up->base.userptr) || get_user(kp->data_offset, &up->data_offset) || get_user(kp->bytes_used, &up->bytes_used) || get_user(kp->flags, &up->flags)) return -EFAULT; kp->base.userptr = compat_ptr(ptr); return 0; } static int put_ipu_psys_buffer32(struct ipu_psys_buffer *kp, struct ipu_psys_buffer32 __user *up) { bool access_ok; #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0) access_ok = access_ok(VERIFY_WRITE, up, sizeof(struct ipu_psys_buffer32)); #else access_ok = access_ok(up, sizeof(struct ipu_psys_buffer32)); #endif if (!access_ok || put_user(kp->len, &up->len) || put_user(kp->base.fd, &up->base.fd) || put_user(kp->data_offset, &up->data_offset) || put_user(kp->bytes_used, &up->bytes_used) || put_user(kp->flags, &up->flags)) return -EFAULT; return 0; } static int get_ipu_psys_manifest32(struct ipu_psys_manifest *kp, struct ipu_psys_manifest32 __user *up) { compat_uptr_t ptr; bool access_ok; #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0) access_ok = access_ok(VERIFY_READ, up, sizeof(struct ipu_psys_manifest32)); #else access_ok = access_ok(up, sizeof(struct ipu_psys_manifest32)); #endif if (!access_ok || get_user(kp->index, &up->index) || get_user(kp->size, &up->size) || get_user(ptr, &up->manifest)) return -EFAULT; kp->manifest = compat_ptr(ptr); return 0; } static int put_ipu_psys_manifest32(struct ipu_psys_manifest *kp, struct ipu_psys_manifest32 __user *up) { compat_uptr_t ptr = (u32)((unsigned long)kp->manifest); bool access_ok; #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0) access_ok = access_ok(VERIFY_WRITE, up, sizeof(struct ipu_psys_manifest32)); #else access_ok = access_ok(up, sizeof(struct ipu_psys_manifest32)); #endif if (!access_ok || put_user(kp->index, &up->index) || put_user(kp->size, &up->size) || put_user(ptr, &up->manifest)) return -EFAULT; return 0; } #define IPU_IOC_GETBUF32 _IOWR('A', 4, struct ipu_psys_buffer32) #define IPU_IOC_PUTBUF32 _IOWR('A', 5, struct ipu_psys_buffer32) #define IPU_IOC_QCMD32 _IOWR('A', 6, struct ipu_psys_command32) #define IPU_IOC_CMD_CANCEL32 _IOWR('A', 8, struct ipu_psys_command32) #define IPU_IOC_GET_MANIFEST32 _IOWR('A', 9, struct ipu_psys_manifest32) long ipu_psys_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg) { union { struct ipu_psys_buffer buf; struct ipu_psys_command cmd; struct ipu_psys_event ev; struct ipu_psys_manifest m; } karg; int compatible_arg = 1; int err = 0; void __user *up = compat_ptr(arg); switch (cmd) { case IPU_IOC_GETBUF32: cmd = IPU_IOC_GETBUF; break; case IPU_IOC_PUTBUF32: cmd = IPU_IOC_PUTBUF; break; case IPU_IOC_QCMD32: cmd = IPU_IOC_QCMD; break; case IPU_IOC_GET_MANIFEST32: cmd = IPU_IOC_GET_MANIFEST; break; } switch (cmd) { case IPU_IOC_GETBUF: case IPU_IOC_PUTBUF: err = get_ipu_psys_buffer32(&karg.buf, up); compatible_arg = 0; break; case IPU_IOC_QCMD: err = get_ipu_psys_command32(&karg.cmd, up); compatible_arg = 0; break; case IPU_IOC_GET_MANIFEST: err = get_ipu_psys_manifest32(&karg.m, up); compatible_arg = 0; break; } if (err) return err; if (compatible_arg) { err = native_ioctl(file, cmd, (unsigned long)up); } else { #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0) mm_segment_t old_fs = get_fs(); set_fs(KERNEL_DS); err = native_ioctl(file, cmd, (unsigned long)&karg); set_fs(old_fs); #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 18, 0) mm_segment_t old_fs = force_uaccess_begin(); err = native_ioctl(file, cmd, (unsigned long)&karg); force_uaccess_end(old_fs); #else err = native_ioctl(file, cmd, (unsigned long)&karg); #endif } if (err) return err; switch (cmd) { case IPU_IOC_GETBUF: err = put_ipu_psys_buffer32(&karg.buf, up); break; case IPU_IOC_GET_MANIFEST: err = put_ipu_psys_manifest32(&karg.m, up); break; } return err; } ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu6/psys/ipu-psys.c000066400000000000000000002003621471702545500271550ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2013 - 2024 Intel Corporation #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) #include #else #include #endif #include #include #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) #include #else #include #endif #include #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) #include "ipu.h" #include "ipu-mmu.h" #include "ipu-bus.h" #include "ipu-platform.h" #include "ipu-buttress.h" #include "ipu-cpd.h" #include "ipu-fw-psys.h" #include "ipu-psys.h" #include "ipu-platform-psys.h" #include "ipu-platform-regs.h" #include "ipu-fw-com.h" #else #include "ipu6.h" #include "ipu6-mmu.h" #include "ipu6-bus.h" #include "ipu6-buttress.h" #include "ipu6-cpd.h" #include "ipu-fw-psys.h" #include "ipu-psys.h" #include "ipu6-platform-regs.h" #include "ipu6-fw-com.h" #endif static bool async_fw_init; module_param(async_fw_init, bool, 0664); MODULE_PARM_DESC(async_fw_init, "Enable asynchronous firmware initialization"); #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0) #define SYSCOM_BUTTRESS_FW_PARAMS_PSYS_OFFSET 7 #endif #define IPU_PSYS_NUM_DEVICES 4 #define IPU_PSYS_MAX_NUM_DESCS 1024 #define IPU_PSYS_MAX_NUM_BUFS 1024 #define IPU_PSYS_MAX_NUM_BUFS_LRU 12 static int psys_runtime_pm_resume(struct device *dev); static int psys_runtime_pm_suspend(struct device *dev); static dev_t ipu_psys_dev_t; static DECLARE_BITMAP(ipu_psys_devices, IPU_PSYS_NUM_DEVICES); static DEFINE_MUTEX(ipu_psys_mutex); static struct fw_init_task { struct delayed_work work; struct ipu_psys *psys; } fw_init_task; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) static void ipu_psys_remove(struct ipu_bus_device *adev); #else static void ipu6_psys_remove(struct auxiliary_device *auxdev); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) static struct bus_type ipu_psys_bus = { .name = IPU_PSYS_NAME, }; #else static const struct bus_type ipu6_psys_bus = { .name = "intel-ipu6-psys", }; #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0) #define PKG_DIR_ENT_LEN_FOR_PSYS 2 #define PKG_DIR_SIZE_MASK_FOR_PSYS GENMASK(23, 0) enum ipu6_version ipu_ver; static u32 ipu6_cpd_pkg_dir_get_address(const u64 *pkg_dir, int pkg_dir_idx) { return pkg_dir[++pkg_dir_idx * PKG_DIR_ENT_LEN_FOR_PSYS]; } static u32 ipu6_cpd_pkg_dir_get_num_entries(const u64 *pkg_dir) { return pkg_dir[1]; } static u32 ipu6_cpd_pkg_dir_get_size(const u64 *pkg_dir, int pkg_dir_idx) { return pkg_dir[++pkg_dir_idx * PKG_DIR_ENT_LEN_FOR_PSYS + 1] & PKG_DIR_SIZE_MASK_FOR_PSYS; } #define PKG_DIR_ID_SHIFT 48 #define PKG_DIR_ID_MASK 0x7f static u32 ipu6_cpd_pkg_dir_get_type(const u64 *pkg_dir, int pkg_dir_idx) { return pkg_dir[++pkg_dir_idx * PKG_DIR_ENT_LEN_FOR_PSYS + 1] >> PKG_DIR_ID_SHIFT & PKG_DIR_ID_MASK; } #endif /* * These are some trivial wrappers that save us from open-coding some * common patterns and also that's were we have some checking (for the * time being) */ static void ipu_desc_add(struct ipu_psys_fh *fh, struct ipu_psys_desc *desc) { fh->num_descs++; WARN_ON_ONCE(fh->num_descs >= IPU_PSYS_MAX_NUM_DESCS); list_add(&desc->list, &fh->descs_list); } static void ipu_desc_del(struct ipu_psys_fh *fh, struct ipu_psys_desc *desc) { fh->num_descs--; list_del_init(&desc->list); } static void ipu_buffer_add(struct ipu_psys_fh *fh, struct ipu_psys_kbuffer *kbuf) { fh->num_bufs++; WARN_ON_ONCE(fh->num_bufs >= IPU_PSYS_MAX_NUM_BUFS); list_add(&kbuf->list, &fh->bufs_list); } static void ipu_buffer_del(struct ipu_psys_fh *fh, struct ipu_psys_kbuffer *kbuf) { fh->num_bufs--; list_del_init(&kbuf->list); } static void ipu_buffer_lru_add(struct ipu_psys_fh *fh, struct ipu_psys_kbuffer *kbuf) { fh->num_bufs_lru++; list_add_tail(&kbuf->list, &fh->bufs_lru); } static void ipu_buffer_lru_del(struct ipu_psys_fh *fh, struct ipu_psys_kbuffer *kbuf) { fh->num_bufs_lru--; list_del_init(&kbuf->list); } static struct ipu_psys_kbuffer *ipu_psys_kbuffer_alloc(void) { struct ipu_psys_kbuffer *kbuf; kbuf = kzalloc(sizeof(*kbuf), GFP_KERNEL); if (!kbuf) return NULL; atomic_set(&kbuf->map_count, 0); INIT_LIST_HEAD(&kbuf->list); return kbuf; } static struct ipu_psys_desc *ipu_psys_desc_alloc(int fd) { struct ipu_psys_desc *desc; desc = kzalloc(sizeof(*desc), GFP_KERNEL); if (!desc) return NULL; desc->fd = fd; INIT_LIST_HEAD(&desc->list); return desc; } struct ipu_psys_pg *__get_pg_buf(struct ipu_psys *psys, size_t pg_size) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0) struct device *dev = &psys->adev->auxdev.dev; #endif struct ipu_psys_pg *kpg; unsigned long flags; spin_lock_irqsave(&psys->pgs_lock, flags); list_for_each_entry(kpg, &psys->pgs, list) { if (!kpg->pg_size && kpg->size >= pg_size) { kpg->pg_size = pg_size; spin_unlock_irqrestore(&psys->pgs_lock, flags); return kpg; } } spin_unlock_irqrestore(&psys->pgs_lock, flags); /* no big enough buffer available, allocate new one */ kpg = kzalloc(sizeof(*kpg), GFP_KERNEL); if (!kpg) return NULL; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) kpg->pg = dma_alloc_attrs(&psys->adev->dev, pg_size, &kpg->pg_dma_addr, GFP_KERNEL, 0); #else kpg->pg = dma_alloc_attrs(dev, pg_size, &kpg->pg_dma_addr, GFP_KERNEL, 0); #endif if (!kpg->pg) { kfree(kpg); return NULL; } kpg->pg_size = pg_size; kpg->size = pg_size; spin_lock_irqsave(&psys->pgs_lock, flags); list_add(&kpg->list, &psys->pgs); spin_unlock_irqrestore(&psys->pgs_lock, flags); return kpg; } static struct ipu_psys_desc *psys_desc_lookup(struct ipu_psys_fh *fh, int fd) { struct ipu_psys_desc *desc; list_for_each_entry(desc, &fh->descs_list, list) { if (desc->fd == fd) return desc; } return NULL; } static bool dmabuf_cmp(struct dma_buf *lb, struct dma_buf *rb) { return lb == rb && lb->size == rb->size; } static struct ipu_psys_kbuffer *psys_buf_lookup(struct ipu_psys_fh *fh, int fd) { struct ipu_psys_kbuffer *kbuf; struct dma_buf *dma_buf; dma_buf = dma_buf_get(fd); if (IS_ERR(dma_buf)) return NULL; /* * First lookup so-called `active` list, that is the list of * referenced buffers */ list_for_each_entry(kbuf, &fh->bufs_list, list) { if (dmabuf_cmp(kbuf->dbuf, dma_buf)) { dma_buf_put(dma_buf); return kbuf; } } /* * We didn't find anything on the `active` list, try the LRU list * (list of unreferenced buffers) and possibly resurrect a buffer */ list_for_each_entry(kbuf, &fh->bufs_lru, list) { if (dmabuf_cmp(kbuf->dbuf, dma_buf)) { dma_buf_put(dma_buf); ipu_buffer_lru_del(fh, kbuf); ipu_buffer_add(fh, kbuf); return kbuf; } } dma_buf_put(dma_buf); return NULL; } struct ipu_psys_kbuffer *ipu_psys_lookup_kbuffer(struct ipu_psys_fh *fh, int fd) { struct ipu_psys_desc *desc; desc = psys_desc_lookup(fh, fd); if (!desc) return NULL; return desc->kbuf; } struct ipu_psys_kbuffer * ipu_psys_lookup_kbuffer_by_kaddr(struct ipu_psys_fh *fh, void *kaddr) { struct ipu_psys_kbuffer *kbuffer; list_for_each_entry(kbuffer, &fh->bufs_list, list) { if (kbuffer->kaddr == kaddr) return kbuffer; } return NULL; } #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) static int ipu_psys_get_userpages(struct ipu_dma_buf_attach *attach) { struct vm_area_struct *vma; unsigned long start, end; int npages, array_size; struct page **pages; struct sg_table *sgt; int nr = 0; int ret = -ENOMEM; start = (unsigned long)attach->userptr; end = PAGE_ALIGN(start + attach->len); npages = (end - (start & PAGE_MASK)) >> PAGE_SHIFT; array_size = npages * sizeof(struct page *); sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); if (!sgt) return -ENOMEM; if (attach->npages != 0) { pages = attach->pages; npages = attach->npages; attach->vma_is_io = 1; goto skip_pages; } pages = kvzalloc(array_size, GFP_KERNEL); if (!pages) goto free_sgt; #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0) down_read(¤t->mm->mmap_sem); #else mmap_read_lock(current->mm); #endif vma = find_vma(current->mm, start); if (!vma) { ret = -EFAULT; goto error_up_read; } /* * For buffers from Gralloc, VM_PFNMAP is expected, * but VM_IO is set. Possibly bug in Gralloc. */ attach->vma_is_io = vma->vm_flags & (VM_IO | VM_PFNMAP); if (attach->vma_is_io) { unsigned long io_start = start; if (vma->vm_end < start + attach->len) { dev_err(attach->dev, "vma at %lu is too small for %llu bytes\n", start, attach->len); ret = -EFAULT; goto error_up_read; } for (nr = 0; nr < npages; nr++, io_start += PAGE_SIZE) { unsigned long pfn; ret = follow_pfn(vma, io_start, &pfn); if (ret) goto error_up_read; pages[nr] = pfn_to_page(pfn); } } else { #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) nr = get_user_pages(current, current->mm, start & PAGE_MASK, npages, 1, 0, pages, NULL); #elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0) nr = get_user_pages(start & PAGE_MASK, npages, 1, 0, pages, NULL); #elif LINUX_VERSION_CODE < KERNEL_VERSION(6, 5, 0) nr = get_user_pages(start & PAGE_MASK, npages, FOLL_WRITE, pages, NULL); #else nr = get_user_pages(start & PAGE_MASK, npages, FOLL_WRITE, pages); #endif if (nr < npages) goto error_up_read; } #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0) up_read(¤t->mm->mmap_sem); #else mmap_read_unlock(current->mm); #endif attach->pages = pages; attach->npages = npages; skip_pages: ret = sg_alloc_table_from_pages(sgt, pages, npages, start & ~PAGE_MASK, attach->len, GFP_KERNEL); if (ret < 0) goto error; attach->sgt = sgt; return 0; error_up_read: #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0) up_read(¤t->mm->mmap_sem); #else mmap_read_unlock(current->mm); #endif error: if (!attach->vma_is_io) while (nr > 0) put_page(pages[--nr]); if (array_size <= PAGE_SIZE) kfree(pages); else vfree(pages); free_sgt: kfree(sgt); dev_err(attach->dev, "failed to get userpages:%d\n", ret); return ret; } static void ipu_psys_put_userpages(struct ipu_dma_buf_attach *attach) { if (!attach || !attach->userptr || !attach->sgt) return; if (!attach->vma_is_io) { int i = attach->npages; while (--i >= 0) { set_page_dirty_lock(attach->pages[i]); put_page(attach->pages[i]); } } kvfree(attach->pages); sg_free_table(attach->sgt); kfree(attach->sgt); attach->sgt = NULL; } #else static int ipu_psys_get_userpages(struct ipu_dma_buf_attach *attach) { struct vm_area_struct *vma; unsigned long start, end; int npages, array_size; struct page **pages; struct sg_table *sgt; int ret = -ENOMEM; int nr = 0; u32 flags; start = (unsigned long)attach->userptr; end = PAGE_ALIGN(start + attach->len); npages = (end - (start & PAGE_MASK)) >> PAGE_SHIFT; array_size = npages * sizeof(struct page *); sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); if (!sgt) return -ENOMEM; WARN_ON_ONCE(attach->npages); pages = kvzalloc(array_size, GFP_KERNEL); if (!pages) goto free_sgt; mmap_read_lock(current->mm); vma = vma_lookup(current->mm, start); if (unlikely(!vma)) { ret = -EFAULT; goto error_up_read; } mmap_read_unlock(current->mm); flags = FOLL_WRITE | FOLL_FORCE | FOLL_LONGTERM; nr = pin_user_pages_fast(start & PAGE_MASK, npages, flags, pages); if (nr < npages) goto error; attach->pages = pages; attach->npages = npages; ret = sg_alloc_table_from_pages(sgt, pages, npages, start & ~PAGE_MASK, attach->len, GFP_KERNEL); if (ret < 0) goto error; attach->sgt = sgt; return 0; error_up_read: mmap_read_unlock(current->mm); error: if (nr) unpin_user_pages(pages, nr); kvfree(pages); free_sgt: kfree(sgt); pr_err("failed to get userpages:%d\n", ret); return ret; } static void ipu_psys_put_userpages(struct ipu_dma_buf_attach *attach) { if (!attach || !attach->userptr || !attach->sgt) return; unpin_user_pages(attach->pages, attach->npages); kvfree(attach->pages); sg_free_table(attach->sgt); kfree(attach->sgt); attach->sgt = NULL; } #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) static int ipu_dma_buf_attach(struct dma_buf *dbuf, struct dma_buf_attachment *attach) #else static int ipu_dma_buf_attach(struct dma_buf *dbuf, struct device *dev, struct dma_buf_attachment *attach) #endif { struct ipu_psys_kbuffer *kbuf = dbuf->priv; struct ipu_dma_buf_attach *ipu_attach; int ret; ipu_attach = kzalloc(sizeof(*ipu_attach), GFP_KERNEL); if (!ipu_attach) return -ENOMEM; #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0) ipu_attach->dev = dev; #endif ipu_attach->len = kbuf->len; ipu_attach->userptr = kbuf->userptr; ret = ipu_psys_get_userpages(ipu_attach); if (ret) { kfree(ipu_attach); return ret; } attach->priv = ipu_attach; return 0; } static void ipu_dma_buf_detach(struct dma_buf *dbuf, struct dma_buf_attachment *attach) { struct ipu_dma_buf_attach *ipu_attach = attach->priv; ipu_psys_put_userpages(ipu_attach); kfree(ipu_attach); attach->priv = NULL; } static struct sg_table *ipu_dma_buf_map(struct dma_buf_attachment *attach, enum dma_data_direction dir) { struct ipu_dma_buf_attach *ipu_attach = attach->priv; #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) DEFINE_DMA_ATTRS(attrs); #else unsigned long attrs; #endif int ret; #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); ret = dma_map_sg_attrs(attach->dev, ipu_attach->sgt->sgl, ipu_attach->sgt->orig_nents, dir, &attrs); if (!ret) { dev_dbg(attach->dev, "buf map failed\n"); return ERR_PTR(-EIO); } ipu_attach->sgt->nents = ret; #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) attrs = DMA_ATTR_SKIP_CPU_SYNC; ret = dma_map_sg_attrs(attach->dev, ipu_attach->sgt->sgl, ipu_attach->sgt->orig_nents, dir, attrs); if (!ret) { dev_dbg(attach->dev, "buf map failed\n"); return ERR_PTR(-EIO); } ipu_attach->sgt->nents = ret; #else attrs = DMA_ATTR_SKIP_CPU_SYNC; ret = dma_map_sgtable(attach->dev, ipu_attach->sgt, dir, attrs); if (ret < 0) { dev_dbg(attach->dev, "buf map failed\n"); return ERR_PTR(-EIO); } #endif /* * Initial cache flush to avoid writing dirty pages for buffers which * are later marked as IPU_BUFFER_FLAG_NO_FLUSH. */ dma_sync_sg_for_device(attach->dev, ipu_attach->sgt->sgl, ipu_attach->sgt->orig_nents, DMA_BIDIRECTIONAL); return ipu_attach->sgt; } static void ipu_dma_buf_unmap(struct dma_buf_attachment *attach, struct sg_table *sgt, enum dma_data_direction dir) { #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) dma_unmap_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir); #else dma_unmap_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC); #endif } static int ipu_dma_buf_mmap(struct dma_buf *dbuf, struct vm_area_struct *vma) { return -ENOTTY; } #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0) static void *ipu_dma_buf_kmap_atomic(struct dma_buf *dbuf, unsigned long pgnum) { return NULL; } #endif static void ipu_dma_buf_release(struct dma_buf *buf) { struct ipu_psys_kbuffer *kbuf = buf->priv; if (!kbuf) return; if (kbuf->db_attach) ipu_psys_put_userpages(kbuf->db_attach->priv); kfree(kbuf); } static int ipu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) size_t start, size_t len, #endif enum dma_data_direction dir) { return -ENOTTY; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0) || LINUX_VERSION_CODE == KERNEL_VERSION(5, 15, 255) \ || LINUX_VERSION_CODE == KERNEL_VERSION(5, 15, 71) static int ipu_dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map) { struct dma_buf_attachment *attach; struct ipu_dma_buf_attach *ipu_attach; if (list_empty(&dmabuf->attachments)) return -EINVAL; attach = list_last_entry(&dmabuf->attachments, struct dma_buf_attachment, node); ipu_attach = attach->priv; if (!ipu_attach || !ipu_attach->pages || !ipu_attach->npages) return -EINVAL; map->vaddr = vm_map_ram(ipu_attach->pages, ipu_attach->npages, 0); map->is_iomem = false; if (!map->vaddr) return -EINVAL; return 0; } #elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) && LINUX_VERSION_CODE != KERNEL_VERSION(5, 10, 46) static int ipu_dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map) { struct dma_buf_attachment *attach; struct ipu_dma_buf_attach *ipu_attach; if (list_empty(&dmabuf->attachments)) return -EINVAL; attach = list_last_entry(&dmabuf->attachments, struct dma_buf_attachment, node); ipu_attach = attach->priv; if (!ipu_attach || !ipu_attach->pages || !ipu_attach->npages) return -EINVAL; map->vaddr = vm_map_ram(ipu_attach->pages, ipu_attach->npages, 0); map->is_iomem = false; if (!map->vaddr) return -EINVAL; return 0; } #else static void *ipu_dma_buf_vmap(struct dma_buf *dmabuf) { struct dma_buf_attachment *attach; struct ipu_dma_buf_attach *ipu_attach; if (list_empty(&dmabuf->attachments)) return NULL; attach = list_last_entry(&dmabuf->attachments, struct dma_buf_attachment, node); ipu_attach = attach->priv; if (!ipu_attach || !ipu_attach->pages || !ipu_attach->npages) return NULL; #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0) return vm_map_ram(ipu_attach->pages, ipu_attach->npages, 0, PAGE_KERNEL); #else return vm_map_ram(ipu_attach->pages, ipu_attach->npages, 0); #endif } #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0) || LINUX_VERSION_CODE == KERNEL_VERSION(5, 15, 255) \ || LINUX_VERSION_CODE == KERNEL_VERSION(5, 15, 71) static void ipu_dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map) { struct dma_buf_attachment *attach; struct ipu_dma_buf_attach *ipu_attach; if (WARN_ON(list_empty(&dmabuf->attachments))) return; attach = list_last_entry(&dmabuf->attachments, struct dma_buf_attachment, node); ipu_attach = attach->priv; if (WARN_ON(!ipu_attach || !ipu_attach->pages || !ipu_attach->npages)) return; vm_unmap_ram(map->vaddr, ipu_attach->npages); } #elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) && LINUX_VERSION_CODE != KERNEL_VERSION(5, 10, 46) static void ipu_dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map) { struct dma_buf_attachment *attach; struct ipu_dma_buf_attach *ipu_attach; if (WARN_ON(list_empty(&dmabuf->attachments))) return; attach = list_last_entry(&dmabuf->attachments, struct dma_buf_attachment, node); ipu_attach = attach->priv; if (WARN_ON(!ipu_attach || !ipu_attach->pages || !ipu_attach->npages)) return; vm_unmap_ram(map->vaddr, ipu_attach->npages); } #else static void ipu_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr) { struct dma_buf_attachment *attach; struct ipu_dma_buf_attach *ipu_attach; if (WARN_ON(list_empty(&dmabuf->attachments))) return; attach = list_last_entry(&dmabuf->attachments, struct dma_buf_attachment, node); ipu_attach = attach->priv; if (WARN_ON(!ipu_attach || !ipu_attach->pages || !ipu_attach->npages)) return; vm_unmap_ram(vaddr, ipu_attach->npages); } #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) struct dma_buf_ops ipu_dma_buf_ops = { #else static const struct dma_buf_ops ipu_dma_buf_ops = { #endif .attach = ipu_dma_buf_attach, .detach = ipu_dma_buf_detach, .map_dma_buf = ipu_dma_buf_map, .unmap_dma_buf = ipu_dma_buf_unmap, .release = ipu_dma_buf_release, .begin_cpu_access = ipu_dma_buf_begin_cpu_access, #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) .kmap = ipu_dma_buf_kmap, .kmap_atomic = ipu_dma_buf_kmap_atomic, #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0) .map_atomic = ipu_dma_buf_kmap_atomic, #endif .mmap = ipu_dma_buf_mmap, .vmap = ipu_dma_buf_vmap, .vunmap = ipu_dma_buf_vunmap, }; static int ipu_psys_open(struct inode *inode, struct file *file) { struct ipu_psys *psys = inode_to_ipu_psys(inode); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) struct ipu_device *isp = psys->adev->isp; #endif struct ipu_psys_fh *fh; int rval; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) if (isp->flr_done) return -EIO; #endif fh = kzalloc(sizeof(*fh), GFP_KERNEL); if (!fh) return -ENOMEM; fh->psys = psys; file->private_data = fh; mutex_init(&fh->mutex); INIT_LIST_HEAD(&fh->bufs_list); INIT_LIST_HEAD(&fh->descs_list); INIT_LIST_HEAD(&fh->bufs_lru); init_waitqueue_head(&fh->wait); rval = ipu_psys_fh_init(fh); if (rval) goto open_failed; mutex_lock(&psys->mutex); list_add_tail(&fh->list, &psys->fhs); mutex_unlock(&psys->mutex); return 0; open_failed: mutex_destroy(&fh->mutex); kfree(fh); return rval; } static inline void ipu_psys_kbuf_unmap(struct ipu_psys_kbuffer *kbuf) { if (!kbuf) return; kbuf->valid = false; #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0) || LINUX_VERSION_CODE == KERNEL_VERSION(5, 15, 255) \ || LINUX_VERSION_CODE == KERNEL_VERSION(5, 15, 71) if (kbuf->kaddr) { struct iosys_map dmap; iosys_map_set_vaddr(&dmap, kbuf->kaddr); #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 255) dma_buf_vunmap_unlocked(kbuf->dbuf, &dmap); #else dma_buf_vunmap(kbuf->dbuf, &dmap); #endif } #elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) && LINUX_VERSION_CODE != KERNEL_VERSION(5, 10, 46) if (kbuf->kaddr) { struct dma_buf_map dmap; dma_buf_map_set_vaddr(&dmap, kbuf->kaddr); dma_buf_vunmap(kbuf->dbuf, &dmap); } #else if (kbuf->kaddr) dma_buf_vunmap(kbuf->dbuf, kbuf->kaddr); #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 255) if (!IS_ERR_OR_NULL(kbuf->sgt)) dma_buf_unmap_attachment_unlocked(kbuf->db_attach, kbuf->sgt, DMA_BIDIRECTIONAL); #else if (kbuf->sgt) dma_buf_unmap_attachment(kbuf->db_attach, kbuf->sgt, DMA_BIDIRECTIONAL); #endif if (!IS_ERR_OR_NULL(kbuf->db_attach)) dma_buf_detach(kbuf->dbuf, kbuf->db_attach); dma_buf_put(kbuf->dbuf); kbuf->db_attach = NULL; kbuf->dbuf = NULL; kbuf->sgt = NULL; } static void __ipu_psys_unmapbuf(struct ipu_psys_fh *fh, struct ipu_psys_kbuffer *kbuf) { /* From now on it is not safe to use this kbuffer */ ipu_psys_kbuf_unmap(kbuf); ipu_buffer_del(fh, kbuf); if (!kbuf->userptr) kfree(kbuf); } static int ipu_psys_unmapbuf_locked(int fd, struct ipu_psys_fh *fh) { struct ipu_psys *psys = fh->psys; #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0) struct device *dev = &psys->adev->auxdev.dev; #endif struct ipu_psys_kbuffer *kbuf; struct ipu_psys_desc *desc; desc = psys_desc_lookup(fh, fd); if (WARN_ON_ONCE(!desc)) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_err(&psys->adev->dev, "descriptor not found: %d\n", fd); #else dev_err(dev, "descriptor not found: %d\n", fd); #endif return -EINVAL; } kbuf = desc->kbuf; /* descriptor is gone now */ ipu_desc_del(fh, desc); kfree(desc); if (WARN_ON_ONCE(!kbuf || !kbuf->dbuf)) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_err(&psys->adev->dev, #else dev_err(dev, #endif "descriptor with no buffer: %d\n", fd); return -EINVAL; } /* Wait for final UNMAP */ if (!atomic_dec_and_test(&kbuf->map_count)) return 0; __ipu_psys_unmapbuf(fh, kbuf); return 0; } static int ipu_psys_release(struct inode *inode, struct file *file) { struct ipu_psys *psys = inode_to_ipu_psys(inode); struct ipu_psys_fh *fh = file->private_data; mutex_lock(&fh->mutex); while (!list_empty(&fh->descs_list)) { struct ipu_psys_desc *desc; desc = list_first_entry(&fh->descs_list, struct ipu_psys_desc, list); ipu_desc_del(fh, desc); kfree(desc); } while (!list_empty(&fh->bufs_lru)) { struct ipu_psys_kbuffer *kbuf; kbuf = list_first_entry(&fh->bufs_lru, struct ipu_psys_kbuffer, list); ipu_buffer_lru_del(fh, kbuf); __ipu_psys_unmapbuf(fh, kbuf); } while (!list_empty(&fh->bufs_list)) { struct dma_buf_attachment *db_attach; struct ipu_psys_kbuffer *kbuf; kbuf = list_first_entry(&fh->bufs_list, struct ipu_psys_kbuffer, list); ipu_buffer_del(fh, kbuf); db_attach = kbuf->db_attach; /* Unmap and release buffers */ if (kbuf->dbuf && db_attach) { ipu_psys_kbuf_unmap(kbuf); } else { if (db_attach) ipu_psys_put_userpages(db_attach->priv); kfree(kbuf); } } mutex_unlock(&fh->mutex); mutex_lock(&psys->mutex); list_del(&fh->list); mutex_unlock(&psys->mutex); ipu_psys_fh_deinit(fh); mutex_lock(&psys->mutex); if (list_empty(&psys->fhs)) psys->power_gating = 0; mutex_unlock(&psys->mutex); mutex_destroy(&fh->mutex); kfree(fh); return 0; } static int ipu_psys_getbuf(struct ipu_psys_buffer *buf, struct ipu_psys_fh *fh) { struct ipu_psys_kbuffer *kbuf; struct ipu_psys *psys = fh->psys; #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0) struct device *dev = &psys->adev->auxdev.dev; #endif struct ipu_psys_desc *desc; DEFINE_DMA_BUF_EXPORT_INFO(exp_info); struct dma_buf *dbuf; int ret; if (!buf->base.userptr) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_err(&psys->adev->dev, "Buffer allocation not supported\n"); #else dev_err(dev, "Buffer allocation not supported\n"); #endif return -EINVAL; } kbuf = ipu_psys_kbuffer_alloc(); if (!kbuf) return -ENOMEM; kbuf->len = buf->len; kbuf->userptr = buf->base.userptr; kbuf->flags = buf->flags; exp_info.ops = &ipu_dma_buf_ops; exp_info.size = kbuf->len; exp_info.flags = O_RDWR; exp_info.priv = kbuf; dbuf = dma_buf_export(&exp_info); if (IS_ERR(dbuf)) { kfree(kbuf); return PTR_ERR(dbuf); } ret = dma_buf_fd(dbuf, 0); if (ret < 0) { dma_buf_put(dbuf); return ret; } buf->base.fd = ret; buf->flags &= ~IPU_BUFFER_FLAG_USERPTR; buf->flags |= IPU_BUFFER_FLAG_DMA_HANDLE; kbuf->flags = buf->flags; desc = ipu_psys_desc_alloc(ret); if (!desc) { dma_buf_put(dbuf); return -ENOMEM; } kbuf->dbuf = dbuf; mutex_lock(&fh->mutex); ipu_desc_add(fh, desc); ipu_buffer_add(fh, kbuf); mutex_unlock(&fh->mutex); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_dbg(&psys->adev->dev, "IOC_GETBUF: userptr %p size %llu to fd %d", #else dev_dbg(dev, "IOC_GETBUF: userptr %p size %llu to fd %d", #endif buf->base.userptr, buf->len, buf->base.fd); return 0; } static int ipu_psys_putbuf(struct ipu_psys_buffer *buf, struct ipu_psys_fh *fh) { return 0; } static void ipu_psys_kbuffer_lru(struct ipu_psys_fh *fh, struct ipu_psys_kbuffer *kbuf) { ipu_buffer_del(fh, kbuf); ipu_buffer_lru_add(fh, kbuf); while (fh->num_bufs_lru > IPU_PSYS_MAX_NUM_BUFS_LRU) { kbuf = list_first_entry(&fh->bufs_lru, struct ipu_psys_kbuffer, list); ipu_buffer_lru_del(fh, kbuf); __ipu_psys_unmapbuf(fh, kbuf); } } struct ipu_psys_kbuffer *ipu_psys_mapbuf_locked(int fd, struct ipu_psys_fh *fh) { struct ipu_psys *psys = fh->psys; #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0) struct device *dev = &psys->adev->auxdev.dev; #endif struct ipu_psys_kbuffer *kbuf; struct ipu_psys_desc *desc; struct dma_buf *dbuf; #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0) || \ LINUX_VERSION_CODE == KERNEL_VERSION(5, 15, 255) || \ LINUX_VERSION_CODE == KERNEL_VERSION(5, 15, 71) struct iosys_map dmap = { .is_iomem = false, }; #elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) && LINUX_VERSION_CODE != KERNEL_VERSION(5, 10, 46) struct dma_buf_map dmap; #endif dbuf = dma_buf_get(fd); if (IS_ERR(dbuf)) return NULL; desc = psys_desc_lookup(fh, fd); if (!desc) { desc = ipu_psys_desc_alloc(fd); if (!desc) goto desc_alloc_fail; ipu_desc_add(fh, desc); } kbuf = psys_buf_lookup(fh, fd); if (!kbuf) { kbuf = ipu_psys_kbuffer_alloc(); if (!kbuf) goto buf_alloc_fail; ipu_buffer_add(fh, kbuf); } /* If this descriptor references no buffer or new buffer */ if (desc->kbuf != kbuf) { if (desc->kbuf) { /* * Un-reference old buffer and possibly put it on * the LRU list */ if (atomic_dec_and_test(&desc->kbuf->map_count)) ipu_psys_kbuffer_lru(fh, desc->kbuf); } /* Grab reference of the new buffer */ atomic_inc(&kbuf->map_count); } desc->kbuf = kbuf; if (kbuf->sgt) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_dbg(&psys->adev->dev, "fd %d has been mapped!\n", fd); #else dev_dbg(dev, "fd %d has been mapped!\n", fd); #endif dma_buf_put(dbuf); goto mapbuf_end; } kbuf->dbuf = dbuf; if (kbuf->len == 0) kbuf->len = kbuf->dbuf->size; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) kbuf->db_attach = dma_buf_attach(kbuf->dbuf, &psys->adev->dev); if (IS_ERR(kbuf->db_attach)) { dev_dbg(&psys->adev->dev, "dma buf attach failed\n"); goto kbuf_map_fail; } #else kbuf->db_attach = dma_buf_attach(kbuf->dbuf, dev); if (IS_ERR(kbuf->db_attach)) { dev_dbg(dev, "dma buf attach failed\n"); goto kbuf_map_fail; } #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 255) kbuf->sgt = dma_buf_map_attachment_unlocked(kbuf->db_attach, DMA_BIDIRECTIONAL); #else kbuf->sgt = dma_buf_map_attachment(kbuf->db_attach, DMA_BIDIRECTIONAL); #endif if (IS_ERR_OR_NULL(kbuf->sgt)) { kbuf->sgt = NULL; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_dbg(&psys->adev->dev, "dma buf map attachment failed\n"); #else dev_dbg(dev, "dma buf map attachment failed\n"); #endif goto kbuf_map_fail; } kbuf->dma_addr = sg_dma_address(kbuf->sgt->sgl); /* no need vmap for imported dmabufs */ if (!kbuf->userptr) goto mapbuf_end; #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) && LINUX_VERSION_CODE != KERNEL_VERSION(5, 10, 46) #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 255) if (dma_buf_vmap_unlocked(kbuf->dbuf, &dmap)) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_dbg(&psys->adev->dev, "dma buf vmap failed\n"); #else dev_dbg(dev, "dma buf vmap failed\n"); #endif goto kbuf_map_fail; } #else if (dma_buf_vmap(kbuf->dbuf, &dmap)) { dev_dbg(&psys->adev->dev, "dma buf vmap failed\n"); goto kbuf_map_fail; } #endif kbuf->kaddr = dmap.vaddr; #else kbuf->kaddr = dma_buf_vmap(kbuf->dbuf); if (!kbuf->kaddr) { dev_dbg(&psys->adev->dev, "dma buf vmap failed\n"); goto kbuf_map_fail; } #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_dbg(&psys->adev->dev, "%s kbuf %p fd %d with len %llu mapped\n", #else dev_dbg(dev, "%s kbuf %p fd %d with len %llu mapped\n", #endif __func__, kbuf, fd, kbuf->len); mapbuf_end: kbuf->valid = true; return kbuf; kbuf_map_fail: ipu_buffer_del(fh, kbuf); ipu_psys_kbuf_unmap(kbuf); dbuf = ERR_PTR(-EINVAL); if (!kbuf->userptr) kfree(kbuf); buf_alloc_fail: ipu_desc_del(fh, desc); kfree(desc); desc_alloc_fail: if (!IS_ERR(dbuf)) dma_buf_put(dbuf); return NULL; } static long ipu_psys_mapbuf(int fd, struct ipu_psys_fh *fh) { struct ipu_psys_kbuffer *kbuf; mutex_lock(&fh->mutex); kbuf = ipu_psys_mapbuf_locked(fd, fh); mutex_unlock(&fh->mutex); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_dbg(&fh->psys->adev->dev, "IOC_MAPBUF\n"); #else dev_dbg(&fh->psys->adev->auxdev.dev, "IOC_MAPBUF\n"); #endif return kbuf ? 0 : -EINVAL; } static long ipu_psys_unmapbuf(int fd, struct ipu_psys_fh *fh) { long ret; mutex_lock(&fh->mutex); ret = ipu_psys_unmapbuf_locked(fd, fh); mutex_unlock(&fh->mutex); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_dbg(&fh->psys->adev->dev, "IOC_UNMAPBUF\n"); #else dev_dbg(&fh->psys->adev->auxdev.dev, "IOC_UNMAPBUF\n"); #endif return ret; } #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) static unsigned int ipu_psys_poll(struct file *file, struct poll_table_struct *wait) { struct ipu_psys_fh *fh = file->private_data; struct ipu_psys *psys = fh->psys; unsigned int res = 0; dev_dbg(&psys->adev->dev, "ipu psys poll\n"); poll_wait(file, &fh->wait, wait); if (ipu_get_completed_kcmd(fh)) res = POLLIN; dev_dbg(&psys->adev->dev, "ipu psys poll res %u\n", res); return res; } #else static __poll_t ipu_psys_poll(struct file *file, struct poll_table_struct *wait) { struct ipu_psys_fh *fh = file->private_data; struct ipu_psys *psys = fh->psys; struct device *dev = &psys->adev->auxdev.dev; __poll_t ret = 0; dev_dbg(dev, "ipu psys poll\n"); poll_wait(file, &fh->wait, wait); if (ipu_get_completed_kcmd(fh)) ret = POLLIN; dev_dbg(dev, "ipu psys poll ret %u\n", ret); return ret; } #endif static long ipu_get_manifest(struct ipu_psys_manifest *manifest, struct ipu_psys_fh *fh) { struct ipu_psys *psys = fh->psys; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) struct ipu_device *isp = psys->adev->isp; struct ipu_cpd_client_pkg_hdr *client_pkg; #else struct device *dev = &psys->adev->auxdev.dev; struct ipu6_bus_device *adev = psys->adev; struct ipu6_device *isp = adev->isp; struct ipu6_cpd_client_pkg_hdr *client_pkg; #endif u32 entries; void *host_fw_data; dma_addr_t dma_fw_data; u32 client_pkg_offset; host_fw_data = (void *)isp->cpd_fw->data; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dma_fw_data = sg_dma_address(psys->fw_sgt.sgl); #else dma_fw_data = sg_dma_address(adev->fw_sgt.sgl); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) entries = ipu_cpd_pkg_dir_get_num_entries(psys->pkg_dir); if (!manifest || manifest->index > entries - 1) { dev_err(&psys->adev->dev, "invalid argument\n"); return -EINVAL; } if (!ipu_cpd_pkg_dir_get_size(psys->pkg_dir, manifest->index) || ipu_cpd_pkg_dir_get_type(psys->pkg_dir, manifest->index) < IPU_CPD_PKG_DIR_CLIENT_PG_TYPE) { dev_dbg(&psys->adev->dev, "invalid pkg dir entry\n"); return -ENOENT; } client_pkg_offset = ipu_cpd_pkg_dir_get_address(psys->pkg_dir, manifest->index); #else entries = ipu6_cpd_pkg_dir_get_num_entries(adev->pkg_dir); if (!manifest || manifest->index > entries - 1) { dev_err(dev, "invalid argument\n"); return -EINVAL; } if (!ipu6_cpd_pkg_dir_get_size(adev->pkg_dir, manifest->index) || ipu6_cpd_pkg_dir_get_type(adev->pkg_dir, manifest->index) < IPU6_CPD_PKG_DIR_CLIENT_PG_TYPE) { dev_dbg(dev, "invalid pkg dir entry\n"); return -ENOENT; } client_pkg_offset = ipu6_cpd_pkg_dir_get_address(adev->pkg_dir, manifest->index); #endif client_pkg_offset -= dma_fw_data; client_pkg = host_fw_data + client_pkg_offset; manifest->size = client_pkg->pg_manifest_size; if (!manifest->manifest) return 0; if (copy_to_user(manifest->manifest, (uint8_t *)client_pkg + client_pkg->pg_manifest_offs, manifest->size)) { return -EFAULT; } return 0; } static long ipu_psys_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { union { struct ipu_psys_buffer buf; struct ipu_psys_command cmd; struct ipu_psys_event ev; struct ipu_psys_capability caps; struct ipu_psys_manifest m; } karg; struct ipu_psys_fh *fh = file->private_data; long err = 0; void __user *up = (void __user *)arg; bool copy = (cmd != IPU_IOC_MAPBUF && cmd != IPU_IOC_UNMAPBUF); if (copy) { if (_IOC_SIZE(cmd) > sizeof(karg)) return -ENOTTY; if (_IOC_DIR(cmd) & _IOC_WRITE) { err = copy_from_user(&karg, up, _IOC_SIZE(cmd)); if (err) return -EFAULT; } } switch (cmd) { case IPU_IOC_MAPBUF: err = ipu_psys_mapbuf(arg, fh); break; case IPU_IOC_UNMAPBUF: err = ipu_psys_unmapbuf(arg, fh); break; case IPU_IOC_QUERYCAP: karg.caps = fh->psys->caps; break; case IPU_IOC_GETBUF: err = ipu_psys_getbuf(&karg.buf, fh); break; case IPU_IOC_PUTBUF: err = ipu_psys_putbuf(&karg.buf, fh); break; case IPU_IOC_QCMD: err = ipu_psys_kcmd_new(&karg.cmd, fh); break; case IPU_IOC_DQEVENT: err = ipu_ioctl_dqevent(&karg.ev, fh, file->f_flags); break; case IPU_IOC_GET_MANIFEST: err = ipu_get_manifest(&karg.m, fh); break; default: err = -ENOTTY; break; } if (err) return err; if (copy && _IOC_DIR(cmd) & _IOC_READ) if (copy_to_user(up, &karg, _IOC_SIZE(cmd))) return -EFAULT; return 0; } static const struct file_operations ipu_psys_fops = { .open = ipu_psys_open, .release = ipu_psys_release, .unlocked_ioctl = ipu_psys_ioctl, .poll = ipu_psys_poll, .owner = THIS_MODULE, }; static void ipu_psys_dev_release(struct device *dev) { } #ifdef CONFIG_PM static int psys_runtime_pm_resume(struct device *dev) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) struct ipu_bus_device *adev = to_ipu_bus_device(dev); struct ipu_psys *psys = ipu_bus_get_drvdata(adev); #else struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); struct ipu_psys *psys = ipu6_bus_get_drvdata(adev); #endif unsigned long flags; int retval; if (!psys) return 0; spin_lock_irqsave(&psys->ready_lock, flags); if (psys->ready) { spin_unlock_irqrestore(&psys->ready_lock, flags); return 0; } spin_unlock_irqrestore(&psys->ready_lock, flags); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) retval = ipu_mmu_hw_init(adev->mmu); #else retval = ipu6_mmu_hw_init(adev->mmu); #endif if (retval) return retval; if (async_fw_init && !psys->fwcom) { dev_err(dev, "%s: asynchronous firmware init not finished, skipping\n", __func__); return 0; } #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) if (!ipu_buttress_auth_done(adev->isp)) { #else if (!ipu6_buttress_auth_done(adev->isp)) { #endif dev_dbg(dev, "%s: not yet authenticated, skipping\n", __func__); return 0; } ipu_psys_setup_hw(psys); ipu_psys_subdomains_power(psys, 1); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) ipu_trace_restore(&psys->adev->dev); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) ipu_configure_spc(adev->isp, &psys->pdata->ipdata->hw_variant, IPU_CPD_PKG_DIR_PSYS_SERVER_IDX, psys->pdata->base, psys->pkg_dir, psys->pkg_dir_dma_addr); #else ipu6_configure_spc(adev->isp, &psys->pdata->ipdata->hw_variant, IPU6_CPD_PKG_DIR_PSYS_SERVER_IDX, psys->pdata->base, adev->pkg_dir, adev->pkg_dir_dma_addr); #endif retval = ipu_fw_psys_open(psys); if (retval) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_err(&psys->adev->dev, "Failed to open abi.\n"); #else dev_err(dev, "Failed to open abi.\n"); #endif return retval; } spin_lock_irqsave(&psys->ready_lock, flags); psys->ready = 1; spin_unlock_irqrestore(&psys->ready_lock, flags); return 0; } static int psys_runtime_pm_suspend(struct device *dev) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) struct ipu_bus_device *adev = to_ipu_bus_device(dev); struct ipu_psys *psys = ipu_bus_get_drvdata(adev); #else struct ipu6_bus_device *adev = to_ipu6_bus_device(dev); struct ipu_psys *psys = ipu6_bus_get_drvdata(adev); #endif unsigned long flags; int rval; if (!psys) return 0; if (!psys->ready) return 0; spin_lock_irqsave(&psys->ready_lock, flags); psys->ready = 0; spin_unlock_irqrestore(&psys->ready_lock, flags); /* * We can trace failure but better to not return an error. * At suspend we are progressing towards psys power gated state. * Any hang / failure inside psys will be forgotten soon. */ rval = ipu_fw_psys_close(psys); if (rval) dev_err(dev, "Device close failure: %d\n", rval); ipu_psys_subdomains_power(psys, 0); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) ipu_mmu_hw_cleanup(adev->mmu); #else ipu6_mmu_hw_cleanup(adev->mmu); #endif return 0; } /* The following PM callbacks are needed to enable runtime PM in IPU PCI * device resume, otherwise, runtime PM can't work in PCI resume from * S3 state. */ static int psys_resume(struct device *dev) { return 0; } static int psys_suspend(struct device *dev) { return 0; } static const struct dev_pm_ops psys_pm_ops = { .runtime_suspend = psys_runtime_pm_suspend, .runtime_resume = psys_runtime_pm_resume, .suspend = psys_suspend, .resume = psys_resume, }; #define PSYS_PM_OPS (&psys_pm_ops) #else #define PSYS_PM_OPS NULL #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) static int cpd_fw_reload(struct ipu_device *isp) { struct ipu_psys *psys = ipu_bus_get_drvdata(isp->psys); int rval; if (!isp->secure_mode) { dev_warn(&isp->pdev->dev, "CPD firmware reload was only supported for secure mode.\n"); return -EINVAL; } if (isp->cpd_fw) { ipu_cpd_free_pkg_dir(isp->psys, psys->pkg_dir, psys->pkg_dir_dma_addr, psys->pkg_dir_size); ipu_buttress_unmap_fw_image(isp->psys, &psys->fw_sgt); release_firmware(isp->cpd_fw); isp->cpd_fw = NULL; dev_info(&isp->pdev->dev, "Old FW removed\n"); } rval = request_cpd_fw(&isp->cpd_fw, isp->cpd_fw_name, &isp->pdev->dev); if (rval) { dev_err(&isp->pdev->dev, "Requesting firmware(%s) failed\n", isp->cpd_fw_name); return rval; } rval = ipu_cpd_validate_cpd_file(isp, isp->cpd_fw->data, isp->cpd_fw->size); if (rval) { dev_err(&isp->pdev->dev, "Failed to validate cpd file\n"); goto out_release_firmware; } rval = ipu_buttress_map_fw_image(isp->psys, isp->cpd_fw, &psys->fw_sgt); if (rval) goto out_release_firmware; psys->pkg_dir = ipu_cpd_create_pkg_dir(isp->psys, isp->cpd_fw->data, sg_dma_address(psys->fw_sgt.sgl), &psys->pkg_dir_dma_addr, &psys->pkg_dir_size); if (!psys->pkg_dir) { rval = -EINVAL; goto out_unmap_fw_image; } isp->pkg_dir = psys->pkg_dir; isp->pkg_dir_dma_addr = psys->pkg_dir_dma_addr; isp->pkg_dir_size = psys->pkg_dir_size; if (!isp->secure_mode) return 0; rval = ipu_fw_authenticate(isp, 1); if (rval) goto out_free_pkg_dir; return 0; out_free_pkg_dir: ipu_cpd_free_pkg_dir(isp->psys, psys->pkg_dir, psys->pkg_dir_dma_addr, psys->pkg_dir_size); out_unmap_fw_image: ipu_buttress_unmap_fw_image(isp->psys, &psys->fw_sgt); out_release_firmware: release_firmware(isp->cpd_fw); isp->cpd_fw = NULL; return rval; } #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) #ifdef CONFIG_DEBUG_FS static int ipu_psys_icache_prefetch_sp_get(void *data, u64 *val) { struct ipu_psys *psys = data; *val = psys->icache_prefetch_sp; return 0; } static int ipu_psys_icache_prefetch_sp_set(void *data, u64 val) { struct ipu_psys *psys = data; if (val != !!val) return -EINVAL; psys->icache_prefetch_sp = val; return 0; } DEFINE_SIMPLE_ATTRIBUTE(psys_icache_prefetch_sp_fops, ipu_psys_icache_prefetch_sp_get, ipu_psys_icache_prefetch_sp_set, "%llu\n"); static int ipu_psys_icache_prefetch_isp_get(void *data, u64 *val) { struct ipu_psys *psys = data; *val = psys->icache_prefetch_isp; return 0; } static int ipu_psys_icache_prefetch_isp_set(void *data, u64 val) { struct ipu_psys *psys = data; if (val != !!val) return -EINVAL; psys->icache_prefetch_isp = val; return 0; } DEFINE_SIMPLE_ATTRIBUTE(psys_icache_prefetch_isp_fops, ipu_psys_icache_prefetch_isp_get, ipu_psys_icache_prefetch_isp_set, "%llu\n"); static int ipu_psys_init_debugfs(struct ipu_psys *psys) { struct dentry *file; struct dentry *dir; dir = debugfs_create_dir("psys", psys->adev->isp->ipu_dir); if (IS_ERR(dir)) return -ENOMEM; file = debugfs_create_file("icache_prefetch_sp", 0600, dir, psys, &psys_icache_prefetch_sp_fops); if (IS_ERR(file)) goto err; file = debugfs_create_file("icache_prefetch_isp", 0600, dir, psys, &psys_icache_prefetch_isp_fops); if (IS_ERR(file)) goto err; psys->debugfsdir = dir; return 0; err: debugfs_remove_recursive(dir); return -ENOMEM; } #endif #endif static int ipu_psys_sched_cmd(void *ptr) { struct ipu_psys *psys = ptr; size_t pending = 0; while (1) { wait_event_interruptible(psys->sched_cmd_wq, (kthread_should_stop() || (pending = atomic_read(&psys->wakeup_count)))); if (kthread_should_stop()) break; if (pending == 0) continue; mutex_lock(&psys->mutex); atomic_set(&psys->wakeup_count, 0); ipu_psys_run_next(psys); mutex_unlock(&psys->mutex); } return 0; } #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) static void start_sp(struct ipu_bus_device *adev) { struct ipu_psys *psys = ipu_bus_get_drvdata(adev); void __iomem *spc_regs_base = psys->pdata->base + psys->pdata->ipdata->hw_variant.spc_offset; u32 val = 0; val |= IPU_PSYS_SPC_STATUS_START | IPU_PSYS_SPC_STATUS_RUN | IPU_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE; val |= psys->icache_prefetch_sp ? IPU_PSYS_SPC_STATUS_ICACHE_PREFETCH : 0; writel(val, spc_regs_base + IPU_PSYS_REG_SPC_STATUS_CTRL); } static int query_sp(struct ipu_bus_device *adev) { struct ipu_psys *psys = ipu_bus_get_drvdata(adev); void __iomem *spc_regs_base = psys->pdata->base + psys->pdata->ipdata->hw_variant.spc_offset; u32 val = readl(spc_regs_base + IPU_PSYS_REG_SPC_STATUS_CTRL); /* return true when READY == 1, START == 0 */ val &= IPU_PSYS_SPC_STATUS_READY | IPU_PSYS_SPC_STATUS_START; return val == IPU_PSYS_SPC_STATUS_READY; } #else static void start_sp(struct ipu6_bus_device *adev) { struct ipu_psys *psys = ipu6_bus_get_drvdata(adev); void __iomem *spc_regs_base = psys->pdata->base + psys->pdata->ipdata->hw_variant.spc_offset; u32 val = 0; val |= IPU6_PSYS_SPC_STATUS_START | IPU6_PSYS_SPC_STATUS_RUN | IPU6_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE; val |= psys->icache_prefetch_sp ? IPU6_PSYS_SPC_STATUS_ICACHE_PREFETCH : 0; writel(val, spc_regs_base + IPU6_PSYS_REG_SPC_STATUS_CTRL); } static int query_sp(struct ipu6_bus_device *adev) { struct ipu_psys *psys = ipu6_bus_get_drvdata(adev); void __iomem *spc_regs_base = psys->pdata->base + psys->pdata->ipdata->hw_variant.spc_offset; u32 val = readl(spc_regs_base + IPU6_PSYS_REG_SPC_STATUS_CTRL); /* return true when READY == 1, START == 0 */ val &= IPU6_PSYS_SPC_STATUS_READY | IPU6_PSYS_SPC_STATUS_START; return val == IPU6_PSYS_SPC_STATUS_READY; } #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) static int ipu_psys_fw_init(struct ipu_psys *psys) { unsigned int size; struct ipu_fw_syscom_queue_config *queue_cfg; struct ipu_fw_syscom_queue_config fw_psys_event_queue_cfg[] = { { IPU_FW_PSYS_EVENT_QUEUE_SIZE, sizeof(struct ipu_fw_psys_event) } }; struct ipu_fw_psys_srv_init server_init = { .ddr_pkg_dir_address = 0, .host_ddr_pkg_dir = NULL, .pkg_dir_size = 0, .icache_prefetch_sp = psys->icache_prefetch_sp, .icache_prefetch_isp = psys->icache_prefetch_isp, }; struct ipu_fw_com_cfg fwcom = { .num_output_queues = IPU_FW_PSYS_N_PSYS_EVENT_QUEUE_ID, .output = fw_psys_event_queue_cfg, .specific_addr = &server_init, .specific_size = sizeof(server_init), .cell_start = start_sp, .cell_ready = query_sp, .buttress_boot_offset = SYSCOM_BUTTRESS_FW_PARAMS_PSYS_OFFSET, }; int i; size = IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID; if (ipu_ver == IPU_VER_6 || ipu_ver == IPU_VER_6EP || ipu_ver == IPU_VER_6EP_MTL) size = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID; queue_cfg = devm_kzalloc(&psys->adev->dev, sizeof(*queue_cfg) * size, GFP_KERNEL); if (!queue_cfg) return -ENOMEM; for (i = 0; i < size; i++) { queue_cfg[i].queue_size = IPU_FW_PSYS_CMD_QUEUE_SIZE; queue_cfg[i].token_size = sizeof(struct ipu_fw_psys_cmd); } fwcom.input = queue_cfg; fwcom.num_input_queues = size; fwcom.dmem_addr = psys->pdata->ipdata->hw_variant.dmem_offset; psys->fwcom = ipu_fw_com_prepare(&fwcom, psys->adev, psys->pdata->base); if (!psys->fwcom) { dev_err(&psys->adev->dev, "psys fw com prepare failed\n"); return -EIO; } return 0; } static void run_fw_init_work(struct work_struct *work) { struct fw_init_task *task = (struct fw_init_task *)work; struct ipu_psys *psys = task->psys; int rval; rval = ipu_psys_fw_init(psys); if (rval) { dev_err(&psys->adev->dev, "FW init failed(%d)\n", rval); ipu_psys_remove(psys->adev); } else { dev_info(&psys->adev->dev, "FW init done\n"); } } #else static int ipu_psys_fw_init(struct ipu_psys *psys) { struct ipu6_fw_syscom_queue_config *queue_cfg; struct device *dev = &psys->adev->auxdev.dev; unsigned int size; struct ipu6_fw_syscom_queue_config fw_psys_event_queue_cfg[] = { { IPU_FW_PSYS_EVENT_QUEUE_SIZE, sizeof(struct ipu_fw_psys_event) } }; struct ipu_fw_psys_srv_init server_init = { .ddr_pkg_dir_address = 0, .host_ddr_pkg_dir = NULL, .pkg_dir_size = 0, .icache_prefetch_sp = psys->icache_prefetch_sp, .icache_prefetch_isp = psys->icache_prefetch_isp, }; struct ipu6_fw_com_cfg fwcom = { .num_output_queues = IPU_FW_PSYS_N_PSYS_EVENT_QUEUE_ID, .output = fw_psys_event_queue_cfg, .specific_addr = &server_init, .specific_size = sizeof(server_init), .cell_start = start_sp, .cell_ready = query_sp, .buttress_boot_offset = SYSCOM_BUTTRESS_FW_PARAMS_PSYS_OFFSET, }; int i; size = IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID; if (ipu_ver == IPU6_VER_6 || ipu_ver == IPU6_VER_6EP || ipu_ver == IPU6_VER_6EP_MTL) size = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID; queue_cfg = devm_kzalloc(dev, sizeof(*queue_cfg) * size, GFP_KERNEL); if (!queue_cfg) return -ENOMEM; for (i = 0; i < size; i++) { queue_cfg[i].queue_size = IPU_FW_PSYS_CMD_QUEUE_SIZE; queue_cfg[i].token_size = sizeof(struct ipu_fw_psys_cmd); } fwcom.input = queue_cfg; fwcom.num_input_queues = size; fwcom.dmem_addr = psys->pdata->ipdata->hw_variant.dmem_offset; psys->fwcom = ipu6_fw_com_prepare(&fwcom, psys->adev, psys->pdata->base); if (!psys->fwcom) { dev_err(dev, "psys fw com prepare failed\n"); return -EIO; } return 0; } static void run_fw_init_work(struct work_struct *work) { struct fw_init_task *task = (struct fw_init_task *)work; struct ipu_psys *psys = task->psys; struct device *dev = &psys->adev->auxdev.dev; int rval; rval = ipu_psys_fw_init(psys); if (rval) { dev_err(dev, "FW init failed(%d)\n", rval); ipu6_psys_remove(&psys->adev->auxdev); } else { dev_info(dev, "FW init done\n"); } } #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) static int ipu_psys_probe(struct ipu_bus_device *adev) { struct ipu_device *isp = adev->isp; struct ipu_psys_pg *kpg, *kpg0; struct ipu_psys *psys; unsigned int minor; int i, rval = -E2BIG; /* firmware is not ready, so defer the probe */ if (!isp->pkg_dir) return -EPROBE_DEFER; rval = ipu_mmu_hw_init(adev->mmu); if (rval) return rval; mutex_lock(&ipu_psys_mutex); minor = find_next_zero_bit(ipu_psys_devices, IPU_PSYS_NUM_DEVICES, 0); if (minor == IPU_PSYS_NUM_DEVICES) { dev_err(&adev->dev, "too many devices\n"); goto out_unlock; } psys = devm_kzalloc(&adev->dev, sizeof(*psys), GFP_KERNEL); if (!psys) { rval = -ENOMEM; goto out_unlock; } psys->adev = adev; psys->pdata = adev->pdata; psys->icache_prefetch_sp = 0; psys->power_gating = 0; ipu_trace_init(adev->isp, psys->pdata->base, &adev->dev, psys_trace_blocks); cdev_init(&psys->cdev, &ipu_psys_fops); psys->cdev.owner = ipu_psys_fops.owner; rval = cdev_add(&psys->cdev, MKDEV(MAJOR(ipu_psys_dev_t), minor), 1); if (rval) { dev_err(&adev->dev, "cdev_add failed (%d)\n", rval); goto out_unlock; } set_bit(minor, ipu_psys_devices); spin_lock_init(&psys->ready_lock); spin_lock_init(&psys->pgs_lock); psys->ready = 0; psys->timeout = IPU_PSYS_CMD_TIMEOUT_MS; mutex_init(&psys->mutex); INIT_LIST_HEAD(&psys->fhs); INIT_LIST_HEAD(&psys->pgs); INIT_LIST_HEAD(&psys->started_kcmds_list); init_waitqueue_head(&psys->sched_cmd_wq); atomic_set(&psys->wakeup_count, 0); /* * Create a thread to schedule commands sent to IPU firmware. * The thread reduces the coupling between the command scheduler * and queueing commands from the user to driver. */ psys->sched_cmd_thread = kthread_run(ipu_psys_sched_cmd, psys, "psys_sched_cmd"); if (IS_ERR(psys->sched_cmd_thread)) { psys->sched_cmd_thread = NULL; mutex_destroy(&psys->mutex); goto out_unlock; } ipu_bus_set_drvdata(adev, psys); rval = ipu_psys_resource_pool_init(&psys->resource_pool_running); if (rval < 0) { dev_err(&psys->dev, "unable to alloc process group resources\n"); goto out_mutex_destroy; } ipu6_psys_hw_res_variant_init(); psys->pkg_dir = isp->pkg_dir; psys->pkg_dir_dma_addr = isp->pkg_dir_dma_addr; psys->pkg_dir_size = isp->pkg_dir_size; psys->fw_sgt = isp->fw_sgt; /* allocate and map memory for process groups */ for (i = 0; i < IPU_PSYS_PG_POOL_SIZE; i++) { kpg = kzalloc(sizeof(*kpg), GFP_KERNEL); if (!kpg) goto out_free_pgs; kpg->pg = dma_alloc_attrs(&adev->dev, IPU_PSYS_PG_MAX_SIZE, &kpg->pg_dma_addr, GFP_KERNEL, 0); if (!kpg->pg) { kfree(kpg); goto out_free_pgs; } kpg->size = IPU_PSYS_PG_MAX_SIZE; list_add(&kpg->list, &psys->pgs); } psys->caps.pg_count = ipu_cpd_pkg_dir_get_num_entries(psys->pkg_dir); dev_info(&adev->dev, "pkg_dir entry count:%d\n", psys->caps.pg_count); if (async_fw_init) { INIT_DELAYED_WORK((struct delayed_work *)&fw_init_task, run_fw_init_work); fw_init_task.psys = psys; schedule_delayed_work((struct delayed_work *)&fw_init_task, 0); } else { rval = ipu_psys_fw_init(psys); } if (rval) { dev_err(&adev->dev, "FW init failed(%d)\n", rval); goto out_free_pgs; } psys->dev.parent = &adev->dev; psys->dev.bus = &ipu_psys_bus; psys->dev.devt = MKDEV(MAJOR(ipu_psys_dev_t), minor); psys->dev.release = ipu_psys_dev_release; dev_set_name(&psys->dev, "ipu-psys%d", minor); rval = device_register(&psys->dev); if (rval < 0) { dev_err(&psys->dev, "psys device_register failed\n"); goto out_release_fw_com; } /* Add the hw stepping information to caps */ strscpy(psys->caps.dev_model, IPU_MEDIA_DEV_MODEL_NAME, sizeof(psys->caps.dev_model)); mutex_unlock(&ipu_psys_mutex); #ifdef CONFIG_DEBUG_FS /* Debug fs failure is not fatal. */ ipu_psys_init_debugfs(psys); #endif adev->isp->cpd_fw_reload = &cpd_fw_reload; dev_info(&adev->dev, "psys probe minor: %d\n", minor); ipu_mmu_hw_cleanup(adev->mmu); return 0; out_release_fw_com: ipu_fw_com_release(psys->fwcom, 1); out_free_pgs: list_for_each_entry_safe(kpg, kpg0, &psys->pgs, list) { dma_free_attrs(&adev->dev, kpg->size, kpg->pg, kpg->pg_dma_addr, 0); kfree(kpg); } ipu_psys_resource_pool_cleanup(&psys->resource_pool_running); out_mutex_destroy: mutex_destroy(&psys->mutex); cdev_del(&psys->cdev); if (psys->sched_cmd_thread) { kthread_stop(psys->sched_cmd_thread); psys->sched_cmd_thread = NULL; } out_unlock: /* Safe to call even if the init is not called */ ipu_trace_uninit(&adev->dev); mutex_unlock(&ipu_psys_mutex); ipu_mmu_hw_cleanup(adev->mmu); return rval; } #else static int ipu6_psys_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *auxdev_id) { struct ipu6_bus_device *adev = auxdev_to_adev(auxdev); struct device *dev = &auxdev->dev; struct ipu_psys_pg *kpg, *kpg0; struct ipu_psys *psys; unsigned int minor; int i, rval = -E2BIG; if (!adev->isp->bus_ready_to_probe) return -EPROBE_DEFER; if (!adev->pkg_dir) return -EPROBE_DEFER; ipu_ver = adev->isp->hw_ver; rval = alloc_chrdev_region(&ipu_psys_dev_t, 0, IPU_PSYS_NUM_DEVICES, IPU6_PSYS_NAME); if (rval) { dev_err(dev, "can't alloc psys chrdev region (%d)\n", rval); return rval; } rval = ipu6_mmu_hw_init(adev->mmu); if (rval) goto out_unregister_chr_region; mutex_lock(&ipu_psys_mutex); minor = find_next_zero_bit(ipu_psys_devices, IPU_PSYS_NUM_DEVICES, 0); if (minor == IPU_PSYS_NUM_DEVICES) { dev_err(dev, "too many devices\n"); goto out_unlock; } psys = devm_kzalloc(dev, sizeof(*psys), GFP_KERNEL); if (!psys) { rval = -ENOMEM; goto out_unlock; } adev->auxdrv_data = (const struct ipu6_auxdrv_data *)auxdev_id->driver_data; adev->auxdrv = to_auxiliary_drv(dev->driver); psys->adev = adev; psys->pdata = adev->pdata; psys->icache_prefetch_sp = 0; psys->power_gating = 0; cdev_init(&psys->cdev, &ipu_psys_fops); psys->cdev.owner = ipu_psys_fops.owner; rval = cdev_add(&psys->cdev, MKDEV(MAJOR(ipu_psys_dev_t), minor), 1); if (rval) { dev_err(dev, "cdev_add failed (%d)\n", rval); goto out_unlock; } set_bit(minor, ipu_psys_devices); spin_lock_init(&psys->ready_lock); spin_lock_init(&psys->pgs_lock); psys->ready = 0; psys->timeout = IPU_PSYS_CMD_TIMEOUT_MS; mutex_init(&psys->mutex); INIT_LIST_HEAD(&psys->fhs); INIT_LIST_HEAD(&psys->pgs); INIT_LIST_HEAD(&psys->started_kcmds_list); init_waitqueue_head(&psys->sched_cmd_wq); atomic_set(&psys->wakeup_count, 0); /* * Create a thread to schedule commands sent to IPU firmware. * The thread reduces the coupling between the command scheduler * and queueing commands from the user to driver. */ psys->sched_cmd_thread = kthread_run(ipu_psys_sched_cmd, psys, "psys_sched_cmd"); if (IS_ERR(psys->sched_cmd_thread)) { psys->sched_cmd_thread = NULL; mutex_destroy(&psys->mutex); goto out_unlock; } dev_set_drvdata(dev, psys); rval = ipu_psys_resource_pool_init(&psys->resource_pool_running); if (rval < 0) { dev_err(&psys->dev, "unable to alloc process group resources\n"); goto out_mutex_destroy; } ipu6_psys_hw_res_variant_init(); /* allocate and map memory for process groups */ for (i = 0; i < IPU_PSYS_PG_POOL_SIZE; i++) { kpg = kzalloc(sizeof(*kpg), GFP_KERNEL); if (!kpg) goto out_free_pgs; kpg->pg = dma_alloc_attrs(dev, IPU_PSYS_PG_MAX_SIZE, &kpg->pg_dma_addr, GFP_KERNEL, 0); if (!kpg->pg) { kfree(kpg); goto out_free_pgs; } kpg->size = IPU_PSYS_PG_MAX_SIZE; list_add(&kpg->list, &psys->pgs); } psys->caps.pg_count = ipu6_cpd_pkg_dir_get_num_entries(adev->pkg_dir); dev_info(dev, "pkg_dir entry count:%d\n", psys->caps.pg_count); if (async_fw_init) { INIT_DELAYED_WORK((struct delayed_work *)&fw_init_task, run_fw_init_work); fw_init_task.psys = psys; schedule_delayed_work((struct delayed_work *)&fw_init_task, 0); } else { rval = ipu_psys_fw_init(psys); } if (rval) { dev_err(dev, "FW init failed(%d)\n", rval); goto out_free_pgs; } psys->dev.bus = &ipu6_psys_bus; psys->dev.parent = dev; psys->dev.devt = MKDEV(MAJOR(ipu_psys_dev_t), minor); psys->dev.release = ipu_psys_dev_release; dev_set_name(&psys->dev, "ipu-psys%d", minor); rval = device_register(&psys->dev); if (rval < 0) { dev_err(dev, "psys device_register failed\n"); goto out_release_fw_com; } /* Add the hw stepping information to caps */ strscpy(psys->caps.dev_model, IPU6_MEDIA_DEV_MODEL_NAME, sizeof(psys->caps.dev_model)); mutex_unlock(&ipu_psys_mutex); dev_info(dev, "psys probe minor: %d\n", minor); ipu6_mmu_hw_cleanup(adev->mmu); return 0; out_release_fw_com: ipu6_fw_com_release(psys->fwcom, 1); out_free_pgs: list_for_each_entry_safe(kpg, kpg0, &psys->pgs, list) { dma_free_attrs(dev, kpg->size, kpg->pg, kpg->pg_dma_addr, 0); kfree(kpg); } ipu_psys_resource_pool_cleanup(&psys->resource_pool_running); out_mutex_destroy: mutex_destroy(&psys->mutex); cdev_del(&psys->cdev); if (psys->sched_cmd_thread) { kthread_stop(psys->sched_cmd_thread); psys->sched_cmd_thread = NULL; } out_unlock: /* Safe to call even if the init is not called */ mutex_unlock(&ipu_psys_mutex); ipu6_mmu_hw_cleanup(adev->mmu); out_unregister_chr_region: unregister_chrdev_region(ipu_psys_dev_t, IPU_PSYS_NUM_DEVICES); return rval; } #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) static void ipu_psys_remove(struct ipu_bus_device *adev) { struct ipu_device *isp = adev->isp; struct ipu_psys *psys = ipu_bus_get_drvdata(adev); #else static void ipu6_psys_remove(struct auxiliary_device *auxdev) { struct device *dev = &auxdev->dev; struct ipu_psys *psys = dev_get_drvdata(&auxdev->dev); #endif struct ipu_psys_pg *kpg, *kpg0; #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0) unregister_chrdev_region(ipu_psys_dev_t, IPU_PSYS_NUM_DEVICES); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) #ifdef CONFIG_DEBUG_FS if (isp->ipu_dir) debugfs_remove_recursive(psys->debugfsdir); #endif #endif if (psys->sched_cmd_thread) { kthread_stop(psys->sched_cmd_thread); psys->sched_cmd_thread = NULL; } mutex_lock(&ipu_psys_mutex); list_for_each_entry_safe(kpg, kpg0, &psys->pgs, list) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dma_free_attrs(&adev->dev, kpg->size, kpg->pg, kpg->pg_dma_addr, 0); #else dma_free_attrs(dev, kpg->size, kpg->pg, kpg->pg_dma_addr, 0); #endif kfree(kpg); } #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) if (psys->fwcom && ipu_fw_com_release(psys->fwcom, 1)) dev_err(&adev->dev, "fw com release failed.\n"); #else if (psys->fwcom && ipu6_fw_com_release(psys->fwcom, 1)) dev_err(dev, "fw com release failed.\n"); #endif kfree(psys->server_init); kfree(psys->syscom_config); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) ipu_trace_uninit(&adev->dev); #endif ipu_psys_resource_pool_cleanup(&psys->resource_pool_running); device_unregister(&psys->dev); clear_bit(MINOR(psys->cdev.dev), ipu_psys_devices); cdev_del(&psys->cdev); mutex_unlock(&ipu_psys_mutex); mutex_destroy(&psys->mutex); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_info(&adev->dev, "removed\n"); #else dev_info(dev, "removed\n"); #endif } #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) static irqreturn_t psys_isr_threaded(struct ipu_bus_device *adev) { struct ipu_psys *psys = ipu_bus_get_drvdata(adev); void __iomem *base = psys->pdata->base; u32 status; int r; mutex_lock(&psys->mutex); #ifdef CONFIG_PM r = pm_runtime_get_if_in_use(&psys->adev->dev); if (!r || WARN_ON_ONCE(r < 0)) { mutex_unlock(&psys->mutex); return IRQ_NONE; } #endif status = readl(base + IPU_REG_PSYS_GPDEV_IRQ_STATUS); writel(status, base + IPU_REG_PSYS_GPDEV_IRQ_CLEAR); if (status & IPU_PSYS_GPDEV_IRQ_FWIRQ(IPU_PSYS_GPDEV_FWIRQ0)) { writel(0, base + IPU_REG_PSYS_GPDEV_FWIRQ(0)); ipu_psys_handle_events(psys); } pm_runtime_put(&psys->adev->dev); mutex_unlock(&psys->mutex); return status ? IRQ_HANDLED : IRQ_NONE; } #else static irqreturn_t psys_isr_threaded(struct ipu6_bus_device *adev) { struct ipu_psys *psys = ipu6_bus_get_drvdata(adev); struct device *dev = &psys->adev->auxdev.dev; void __iomem *base = psys->pdata->base; u32 status; int r; mutex_lock(&psys->mutex); r = pm_runtime_get_if_in_use(dev); if (!r || WARN_ON_ONCE(r < 0)) { mutex_unlock(&psys->mutex); return IRQ_NONE; } status = readl(base + IPU6_REG_PSYS_GPDEV_IRQ_STATUS); writel(status, base + IPU6_REG_PSYS_GPDEV_IRQ_CLEAR); if (status & IPU6_PSYS_GPDEV_IRQ_FWIRQ(IPU6_PSYS_GPDEV_FWIRQ0)) { writel(0, base + IPU6_REG_PSYS_GPDEV_FWIRQ(0)); ipu_psys_handle_events(psys); } pm_runtime_put(dev); mutex_unlock(&psys->mutex); return status ? IRQ_HANDLED : IRQ_NONE; } #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) static struct ipu_bus_driver ipu_psys_driver = { .probe = ipu_psys_probe, .remove = ipu_psys_remove, .isr_threaded = psys_isr_threaded, .wanted = IPU_PSYS_NAME, .drv = { .name = IPU_PSYS_NAME, .owner = THIS_MODULE, .pm = PSYS_PM_OPS, .probe_type = PROBE_PREFER_ASYNCHRONOUS, }, }; static int __init ipu_psys_init(void) { int rval = alloc_chrdev_region(&ipu_psys_dev_t, 0, IPU_PSYS_NUM_DEVICES, IPU_PSYS_NAME); if (rval) { pr_err("can't alloc psys chrdev region (%d)\n", rval); return rval; } rval = bus_register(&ipu_psys_bus); if (rval) { pr_warn("can't register psys bus (%d)\n", rval); goto out_bus_register; } ipu_bus_register_driver(&ipu_psys_driver); return rval; out_bus_register: unregister_chrdev_region(ipu_psys_dev_t, IPU_PSYS_NUM_DEVICES); return rval; } static void __exit ipu_psys_exit(void) { ipu_bus_unregister_driver(&ipu_psys_driver); bus_unregister(&ipu_psys_bus); unregister_chrdev_region(ipu_psys_dev_t, IPU_PSYS_NUM_DEVICES); } static const struct pci_device_id ipu_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6_PCI_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6SE_PCI_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6EP_ADL_P_PCI_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6EP_ADL_N_PCI_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6EP_RPL_P_PCI_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6EP_MTL_PCI_ID)}, {0,} }; MODULE_DEVICE_TABLE(pci, ipu_pci_tbl); module_init(ipu_psys_init); module_exit(ipu_psys_exit); #else static const struct ipu6_auxdrv_data ipu6_psys_auxdrv_data = { .isr_threaded = psys_isr_threaded, .wake_isr_thread = true, }; static const struct auxiliary_device_id ipu6_psys_id_table[] = { { .name = "intel_ipu6.psys", .driver_data = (kernel_ulong_t)&ipu6_psys_auxdrv_data, }, { } }; MODULE_DEVICE_TABLE(auxiliary, ipu6_psys_id_table); static struct auxiliary_driver ipu6_psys_aux_driver = { .name = IPU6_PSYS_NAME, .probe = ipu6_psys_probe, .remove = ipu6_psys_remove, .id_table = ipu6_psys_id_table, .driver = { .pm = &psys_pm_ops, }, }; module_auxiliary_driver(ipu6_psys_aux_driver); #endif MODULE_AUTHOR("Antti Laakso "); MODULE_AUTHOR("Bin Han "); MODULE_AUTHOR("Renwei Wu "); MODULE_AUTHOR("Jianxu Zheng "); MODULE_AUTHOR("Xia Wu "); MODULE_AUTHOR("Bingbu Cao "); MODULE_AUTHOR("Zaikuo Wang "); MODULE_AUTHOR("Yunliang Ding "); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Intel ipu processing system driver"); #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 16, 0) || IS_ENABLED(CONFIG_DRM_I915_HAS_SRIOV) MODULE_IMPORT_NS(DMA_BUF); #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0) MODULE_IMPORT_NS(INTEL_IPU6); #endif ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu6/psys/ipu-psys.h000066400000000000000000000247561471702545500271750ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2013 - 2024 Intel Corporation */ #ifndef IPU_PSYS_H #define IPU_PSYS_H #include #include #include #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) #include "ipu.h" #include "ipu-pdata.h" #else #include "ipu6.h" #include "ipu6-bus.h" #endif #include "ipu-fw-psys.h" #include "ipu-platform-psys.h" #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0) /* PSYS Info bits*/ #define IPU_REG_PSYS_INFO_SEG_CMEM_MASTER(a) (0x2c + ((a) * 12)) #define IPU_REG_PSYS_INFO_SEG_XMEM_MASTER(a) (0x5c + ((a) * 12)) #define IPU_PSYS_REG_SPC_STATUS_CTRL 0x0 #define IPU_PSYS_REG_SPC_START_PC 0x4 #define IPU_PSYS_REG_SPC_ICACHE_BASE 0x10 #define IPU_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER 0x14 #define IPU_PSYS_SPC_STATUS_START BIT(1) #define IPU_PSYS_SPC_STATUS_RUN BIT(3) #define IPU_PSYS_SPC_STATUS_READY BIT(5) #define IPU_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE BIT(12) #define IPU_PSYS_SPC_STATUS_ICACHE_PREFETCH BIT(13) #define IPU_PSYS_REG_SPP0_STATUS_CTRL 0x20000 #define IPU_INFO_ENABLE_SNOOP BIT(0) #define IPU_INFO_DEC_FORCE_FLUSH BIT(1) #define IPU_INFO_DEC_PASS_THRU BIT(2) #define IPU_INFO_ZLW BIT(3) #define IPU_INFO_STREAM_ID_SET(id) (((id) & 0x1f) << 4) #define IPU_INFO_REQUEST_DESTINATION_IOSF BIT(9) #define IPU_INFO_IMR_BASE BIT(10) #define IPU_INFO_IMR_DESTINED BIT(11) #define IPU_INFO_REQUEST_DESTINATION_PRIMARY IPU_INFO_REQUEST_DESTINATION_IOSF #define IPU_REG_DMA_TOP_AB_GROUP1_BASE_ADDR 0x1ae000 #define IPU_REG_DMA_TOP_AB_GROUP2_BASE_ADDR 0x1af000 #define IPU_REG_DMA_TOP_AB_RING_MIN_OFFSET(n) (0x4 + (n) * 0xc) #define IPU_REG_DMA_TOP_AB_RING_MAX_OFFSET(n) (0x8 + (n) * 0xc) #define IPU_REG_DMA_TOP_AB_RING_ACCESS_OFFSET(n) (0xc + (n) * 0xc) enum ipu_device_ab_group1_target_id { IPU_DEVICE_AB_GROUP1_TARGET_ID_R0_SPC_DMEM, IPU_DEVICE_AB_GROUP1_TARGET_ID_R1_SPC_DMEM, IPU_DEVICE_AB_GROUP1_TARGET_ID_R2_SPC_DMEM, IPU_DEVICE_AB_GROUP1_TARGET_ID_R3_SPC_STATUS_REG, IPU_DEVICE_AB_GROUP1_TARGET_ID_R4_SPC_MASTER_BASE_ADDR, IPU_DEVICE_AB_GROUP1_TARGET_ID_R5_SPC_PC_STALL, IPU_DEVICE_AB_GROUP1_TARGET_ID_R6_SPC_EQ, IPU_DEVICE_AB_GROUP1_TARGET_ID_R7_SPC_RESERVED, IPU_DEVICE_AB_GROUP1_TARGET_ID_R8_SPC_RESERVED, IPU_DEVICE_AB_GROUP1_TARGET_ID_R9_SPP0, IPU_DEVICE_AB_GROUP1_TARGET_ID_R10_SPP1, IPU_DEVICE_AB_GROUP1_TARGET_ID_R11_CENTRAL_R1, IPU_DEVICE_AB_GROUP1_TARGET_ID_R12_IRQ, IPU_DEVICE_AB_GROUP1_TARGET_ID_R13_CENTRAL_R2, IPU_DEVICE_AB_GROUP1_TARGET_ID_R14_DMA, IPU_DEVICE_AB_GROUP1_TARGET_ID_R15_DMA, IPU_DEVICE_AB_GROUP1_TARGET_ID_R16_GP, IPU_DEVICE_AB_GROUP1_TARGET_ID_R17_ZLW_INSERTER, IPU_DEVICE_AB_GROUP1_TARGET_ID_R18_AB, }; /* IRQ-related registers in PSYS */ #define IPU_REG_PSYS_GPDEV_IRQ_EDGE 0x1aa200 #define IPU_REG_PSYS_GPDEV_IRQ_MASK 0x1aa204 #define IPU_REG_PSYS_GPDEV_IRQ_STATUS 0x1aa208 #define IPU_REG_PSYS_GPDEV_IRQ_CLEAR 0x1aa20c #define IPU_REG_PSYS_GPDEV_IRQ_ENABLE 0x1aa210 #define IPU_REG_PSYS_GPDEV_IRQ_LEVEL_NOT_PULSE 0x1aa214 /* There are 8 FW interrupts, n = 0..7 */ #define IPU_PSYS_GPDEV_FWIRQ0 5 #define IPU_PSYS_GPDEV_FWIRQ1 6 #define IPU_PSYS_GPDEV_FWIRQ2 7 #define IPU_PSYS_GPDEV_FWIRQ3 8 #define IPU_PSYS_GPDEV_FWIRQ4 9 #define IPU_PSYS_GPDEV_FWIRQ5 10 #define IPU_PSYS_GPDEV_FWIRQ6 11 #define IPU_PSYS_GPDEV_FWIRQ7 12 #define IPU_PSYS_GPDEV_IRQ_FWIRQ(n) (1 << (n)) #define IPU_REG_PSYS_GPDEV_FWIRQ(n) (4 * (n) + 0x1aa100) /* * psys subdomains power request regs */ enum ipu_device_buttress_psys_domain_pos { IPU_PSYS_SUBDOMAIN_ISA = 0, IPU_PSYS_SUBDOMAIN_PSA = 1, IPU_PSYS_SUBDOMAIN_BB = 2, IPU_PSYS_SUBDOMAIN_IDSP1 = 3, /* only in IPU6M */ IPU_PSYS_SUBDOMAIN_IDSP2 = 4, /* only in IPU6M */ }; #define IPU_PSYS_SUBDOMAINS_POWER_MASK (BIT(IPU_PSYS_SUBDOMAIN_ISA) | \ BIT(IPU_PSYS_SUBDOMAIN_PSA) | \ BIT(IPU_PSYS_SUBDOMAIN_BB)) #define IPU_PSYS_SUBDOMAINS_POWER_REQ 0xa0 #define IPU_PSYS_SUBDOMAINS_POWER_STATUS 0xa4 #define IPU_PSYS_CMD_TIMEOUT_MS 2000 #define IPU_PSYS_OPEN_TIMEOUT_US 50 #define IPU_PSYS_OPEN_RETRY (10000 / IPU_PSYS_OPEN_TIMEOUT_US) #endif #define IPU_PSYS_PG_POOL_SIZE 16 #define IPU_PSYS_PG_MAX_SIZE 8192 #define IPU_MAX_PSYS_CMD_BUFFERS 32 #define IPU_PSYS_EVENT_CMD_COMPLETE IPU_FW_PSYS_EVENT_TYPE_SUCCESS #define IPU_PSYS_EVENT_FRAGMENT_COMPLETE IPU_FW_PSYS_EVENT_TYPE_SUCCESS #define IPU_PSYS_CLOSE_TIMEOUT_US 50 #define IPU_PSYS_CLOSE_TIMEOUT (100000 / IPU_PSYS_CLOSE_TIMEOUT_US) #define IPU_MAX_RESOURCES 128 #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0) extern enum ipu6_version ipu_ver; #endif /* Opaque structure. Do not access fields. */ struct ipu_resource { u32 id; int elements; /* Number of elements available to allocation */ unsigned long *bitmap; /* Allocation bitmap, a bit for each element */ }; enum ipu_resource_type { IPU_RESOURCE_DEV_CHN = 0, IPU_RESOURCE_EXT_MEM, IPU_RESOURCE_DFM }; /* Allocation of resource(s) */ /* Opaque structure. Do not access fields. */ struct ipu_resource_alloc { enum ipu_resource_type type; struct ipu_resource *resource; int elements; int pos; }; /* * This struct represents all of the currently allocated * resources from IPU model. It is used also for allocating * resources for the next set of PGs to be run on IPU * (ie. those PGs which are not yet being run and which don't * yet reserve real IPU resources). * Use larger array to cover existing resource quantity */ /* resource size may need expand for new resource model */ struct ipu_psys_resource_pool { u32 cells; /* Bitmask of cells allocated */ struct ipu_resource dev_channels[16]; struct ipu_resource ext_memory[32]; struct ipu_resource dfms[16]; DECLARE_BITMAP(cmd_queues, 32); /* Protects cmd_queues bitmap */ spinlock_t queues_lock; }; /* * This struct keeps book of the resources allocated for a specific PG. * It is used for freeing up resources from struct ipu_psys_resources * when the PG is released from IPU (or model of IPU). */ struct ipu_psys_resource_alloc { u32 cells; /* Bitmask of cells needed */ struct ipu_resource_alloc resource_alloc[IPU_MAX_RESOURCES]; int resources; }; struct task_struct; struct ipu_psys { struct ipu_psys_capability caps; struct cdev cdev; struct device dev; struct mutex mutex; /* Psys various */ int ready; /* psys fw status */ bool icache_prefetch_sp; bool icache_prefetch_isp; spinlock_t ready_lock; /* protect psys firmware state */ spinlock_t pgs_lock; /* Protect pgs list access */ struct list_head fhs; struct list_head pgs; struct list_head started_kcmds_list; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) struct ipu_psys_pdata *pdata; struct ipu_bus_device *adev; #else struct ipu6_psys_pdata *pdata; struct ipu6_bus_device *adev; #endif struct ia_css_syscom_context *dev_ctx; struct ia_css_syscom_config *syscom_config; struct ia_css_psys_server_init *server_init; struct task_struct *sched_cmd_thread; wait_queue_head_t sched_cmd_wq; atomic_t wakeup_count; /* Psys schedule thread wakeup count */ #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) #ifdef CONFIG_DEBUG_FS struct dentry *debugfsdir; #endif #endif /* Resources needed to be managed for process groups */ struct ipu_psys_resource_pool resource_pool_running; const struct firmware *fw; struct sg_table fw_sgt; u64 *pkg_dir; dma_addr_t pkg_dir_dma_addr; unsigned int pkg_dir_size; unsigned long timeout; int active_kcmds, started_kcmds; void *fwcom; int power_gating; }; struct ipu_psys_fh { struct ipu_psys *psys; struct mutex mutex; /* Protects bufs_list & kcmds fields */ struct list_head list; /* Holds all buffers that this fh owns */ struct list_head bufs_list; /* Holds all descriptors (fd:kbuffer associations) */ struct list_head descs_list; struct list_head bufs_lru; wait_queue_head_t wait; struct ipu_psys_scheduler sched; u32 num_bufs; u32 num_descs; u32 num_bufs_lru; }; struct ipu_psys_pg { struct ipu_fw_psys_process_group *pg; size_t size; size_t pg_size; dma_addr_t pg_dma_addr; struct list_head list; struct ipu_psys_resource_alloc resource_alloc; }; #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0) struct ipu6_psys_constraint { struct list_head list; unsigned int min_freq; }; #endif struct ipu_psys_kcmd { struct ipu_psys_fh *fh; struct list_head list; struct ipu_psys_buffer_set *kbuf_set; enum ipu_psys_cmd_state state; void *pg_manifest; size_t pg_manifest_size; struct ipu_psys_kbuffer **kbufs; struct ipu_psys_buffer *buffers; size_t nbuffers; struct ipu_fw_psys_process_group *pg_user; struct ipu_psys_pg *kpg; u64 user_token; u64 issue_id; u32 priority; u32 kernel_enable_bitmap[4]; u32 terminal_enable_bitmap[4]; u32 routing_enable_bitmap[4]; u32 rbm[5]; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) struct ipu_buttress_constraint constraint; #else struct ipu6_psys_constraint constraint; #endif struct ipu_psys_event ev; struct timer_list watchdog; }; struct ipu_dma_buf_attach { struct device *dev; u64 len; void *userptr; struct sg_table *sgt; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) bool vma_is_io; #endif struct page **pages; size_t npages; }; struct ipu_psys_kbuffer { u64 len; void *userptr; void *kaddr; struct list_head list; dma_addr_t dma_addr; struct sg_table *sgt; struct dma_buf_attachment *db_attach; struct dma_buf *dbuf; u32 flags; /* The number of times this buffer is mapped */ atomic_t map_count; bool valid; /* True when buffer is usable */ }; struct ipu_psys_desc { struct ipu_psys_kbuffer *kbuf; struct list_head list; u32 fd; }; #define inode_to_ipu_psys(inode) \ container_of((inode)->i_cdev, struct ipu_psys, cdev) void ipu_psys_setup_hw(struct ipu_psys *psys); void ipu_psys_subdomains_power(struct ipu_psys *psys, bool on); void ipu_psys_handle_events(struct ipu_psys *psys); int ipu_psys_kcmd_new(struct ipu_psys_command *cmd, struct ipu_psys_fh *fh); void ipu_psys_run_next(struct ipu_psys *psys); struct ipu_psys_pg *__get_pg_buf(struct ipu_psys *psys, size_t pg_size); struct ipu_psys_kbuffer * ipu_psys_lookup_kbuffer(struct ipu_psys_fh *fh, int fd); struct ipu_psys_kbuffer * ipu_psys_mapbuf_locked(int fd, struct ipu_psys_fh *fh); struct ipu_psys_kbuffer * ipu_psys_lookup_kbuffer_by_kaddr(struct ipu_psys_fh *fh, void *kaddr); int ipu_psys_resource_pool_init(struct ipu_psys_resource_pool *pool); void ipu_psys_resource_pool_cleanup(struct ipu_psys_resource_pool *pool); struct ipu_psys_kcmd *ipu_get_completed_kcmd(struct ipu_psys_fh *fh); long ipu_ioctl_dqevent(struct ipu_psys_event *event, struct ipu_psys_fh *fh, unsigned int f_flags); #endif /* IPU_PSYS_H */ ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu6/psys/ipu-resources.c000066400000000000000000000552541471702545500302010ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2015 - 2024 Intel Corporation #include #include #include #include #include #include #include "ipu-fw-psys.h" #include "ipu-psys.h" struct ipu6_psys_hw_res_variant hw_var; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) void ipu6_psys_hw_res_variant_init(void) { if (ipu_ver == IPU_VER_6SE) { hw_var.queue_num = IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID; hw_var.cell_num = IPU6SE_FW_PSYS_N_CELL_ID; } else if (ipu_ver == IPU_VER_6) { hw_var.queue_num = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID; hw_var.cell_num = IPU6_FW_PSYS_N_CELL_ID; } else if (ipu_ver == IPU_VER_6EP || ipu_ver == IPU_VER_6EP_MTL) { hw_var.queue_num = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID; hw_var.cell_num = IPU6EP_FW_PSYS_N_CELL_ID; } else { WARN(1, "ipu6 psys res var is not initialised correctly."); } hw_var.set_proc_dev_chn = ipu6_fw_psys_set_proc_dev_chn; hw_var.set_proc_dfm_bitmap = ipu6_fw_psys_set_proc_dfm_bitmap; hw_var.set_proc_ext_mem = ipu6_fw_psys_set_process_ext_mem; hw_var.get_pgm_by_proc = ipu6_fw_psys_get_program_manifest_by_process; } static const struct ipu_fw_resource_definitions *get_res(void) { if (ipu_ver == IPU_VER_6SE) return ipu6se_res_defs; if (ipu_ver == IPU_VER_6EP || ipu_ver == IPU_VER_6EP_MTL) return ipu6ep_res_defs; return ipu6_res_defs; } #else void ipu6_psys_hw_res_variant_init(void) { if (ipu_ver == IPU6_VER_6SE) { hw_var.queue_num = IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID; hw_var.cell_num = IPU6SE_FW_PSYS_N_CELL_ID; } else if (ipu_ver == IPU6_VER_6) { hw_var.queue_num = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID; hw_var.cell_num = IPU6_FW_PSYS_N_CELL_ID; } else if (ipu_ver == IPU6_VER_6EP || ipu_ver == IPU6_VER_6EP_MTL) { hw_var.queue_num = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID; hw_var.cell_num = IPU6EP_FW_PSYS_N_CELL_ID; } else { WARN(1, "ipu6 psys res var is not initialised correctly."); } hw_var.set_proc_dev_chn = ipu6_fw_psys_set_proc_dev_chn; hw_var.set_proc_dfm_bitmap = ipu6_fw_psys_set_proc_dfm_bitmap; hw_var.set_proc_ext_mem = ipu6_fw_psys_set_process_ext_mem; hw_var.get_pgm_by_proc = ipu6_fw_psys_get_program_manifest_by_process; } static const struct ipu_fw_resource_definitions *get_res(void) { if (ipu_ver == IPU6_VER_6SE) return ipu6se_res_defs; if (ipu_ver == IPU6_VER_6EP || ipu_ver == IPU6_VER_6EP_MTL) return ipu6ep_res_defs; return ipu6_res_defs; } #endif static int ipu_resource_init(struct ipu_resource *res, u32 id, int elements) { if (elements <= 0) { res->bitmap = NULL; return 0; } res->bitmap = bitmap_zalloc(elements, GFP_KERNEL); if (!res->bitmap) return -ENOMEM; res->elements = elements; res->id = id; return 0; } static unsigned long ipu_resource_alloc_with_pos(struct ipu_resource *res, int n, int pos, struct ipu_resource_alloc *alloc, enum ipu_resource_type type) { unsigned long p; if (n <= 0) { alloc->elements = 0; return 0; } if (!res->bitmap || pos >= res->elements) return (unsigned long)(-ENOSPC); p = bitmap_find_next_zero_area(res->bitmap, res->elements, pos, n, 0); alloc->resource = NULL; if (p != pos) return (unsigned long)(-ENOSPC); bitmap_set(res->bitmap, p, n); alloc->resource = res; alloc->elements = n; alloc->pos = p; alloc->type = type; return pos; } static unsigned long ipu_resource_alloc(struct ipu_resource *res, int n, struct ipu_resource_alloc *alloc, enum ipu_resource_type type) { unsigned long p; if (n <= 0) { alloc->elements = 0; return 0; } if (!res->bitmap) return (unsigned long)(-ENOSPC); p = bitmap_find_next_zero_area(res->bitmap, res->elements, 0, n, 0); alloc->resource = NULL; if (p >= res->elements) return (unsigned long)(-ENOSPC); bitmap_set(res->bitmap, p, n); alloc->resource = res; alloc->elements = n; alloc->pos = p; alloc->type = type; return p; } static void ipu_resource_free(struct ipu_resource_alloc *alloc) { if (alloc->elements <= 0) return; if (alloc->type == IPU_RESOURCE_DFM) *alloc->resource->bitmap &= ~(unsigned long)(alloc->elements); else bitmap_clear(alloc->resource->bitmap, alloc->pos, alloc->elements); alloc->resource = NULL; } static void ipu_resource_cleanup(struct ipu_resource *res) { bitmap_free(res->bitmap); res->bitmap = NULL; } /********** IPU PSYS-specific resource handling **********/ int ipu_psys_resource_pool_init(struct ipu_psys_resource_pool *pool) { int i, j, k, ret; const struct ipu_fw_resource_definitions *res_defs; res_defs = get_res(); spin_lock_init(&pool->queues_lock); pool->cells = 0; for (i = 0; i < res_defs->num_dev_channels; i++) { ret = ipu_resource_init(&pool->dev_channels[i], i, res_defs->dev_channels[i]); if (ret) goto error; } for (j = 0; j < res_defs->num_ext_mem_ids; j++) { ret = ipu_resource_init(&pool->ext_memory[j], j, res_defs->ext_mem_ids[j]); if (ret) goto memory_error; } for (k = 0; k < res_defs->num_dfm_ids; k++) { ret = ipu_resource_init(&pool->dfms[k], k, res_defs->dfms[k]); if (ret) goto dfm_error; } spin_lock(&pool->queues_lock); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) if (ipu_ver == IPU_VER_6SE) #else if (ipu_ver == IPU6_VER_6SE) #endif bitmap_zero(pool->cmd_queues, IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID); else bitmap_zero(pool->cmd_queues, IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID); spin_unlock(&pool->queues_lock); return 0; dfm_error: for (k--; k >= 0; k--) ipu_resource_cleanup(&pool->dfms[k]); memory_error: for (j--; j >= 0; j--) ipu_resource_cleanup(&pool->ext_memory[j]); error: for (i--; i >= 0; i--) ipu_resource_cleanup(&pool->dev_channels[i]); return ret; } void ipu_psys_resource_copy(struct ipu_psys_resource_pool *src, struct ipu_psys_resource_pool *dest) { int i; const struct ipu_fw_resource_definitions *res_defs; res_defs = get_res(); dest->cells = src->cells; for (i = 0; i < res_defs->num_dev_channels; i++) *dest->dev_channels[i].bitmap = *src->dev_channels[i].bitmap; for (i = 0; i < res_defs->num_ext_mem_ids; i++) *dest->ext_memory[i].bitmap = *src->ext_memory[i].bitmap; for (i = 0; i < res_defs->num_dfm_ids; i++) *dest->dfms[i].bitmap = *src->dfms[i].bitmap; } void ipu_psys_resource_pool_cleanup(struct ipu_psys_resource_pool *pool) { u32 i; const struct ipu_fw_resource_definitions *res_defs; res_defs = get_res(); for (i = 0; i < res_defs->num_dev_channels; i++) ipu_resource_cleanup(&pool->dev_channels[i]); for (i = 0; i < res_defs->num_ext_mem_ids; i++) ipu_resource_cleanup(&pool->ext_memory[i]); for (i = 0; i < res_defs->num_dfm_ids; i++) ipu_resource_cleanup(&pool->dfms[i]); } static int __alloc_one_resrc(const struct device *dev, struct ipu_fw_psys_process *process, struct ipu_resource *resource, struct ipu_fw_generic_program_manifest *pm, u32 resource_id, struct ipu_psys_resource_alloc *alloc) { const u16 resource_req = pm->dev_chn_size[resource_id]; const u16 resource_offset_req = pm->dev_chn_offset[resource_id]; unsigned long retl; if (!resource_req) return -ENXIO; if (alloc->resources >= IPU_MAX_RESOURCES) { dev_err(dev, "out of resource handles\n"); return -ENOSPC; } if (resource_offset_req != (u16)(-1)) retl = ipu_resource_alloc_with_pos (resource, resource_req, resource_offset_req, &alloc->resource_alloc[alloc->resources], IPU_RESOURCE_DEV_CHN); else retl = ipu_resource_alloc (resource, resource_req, &alloc->resource_alloc[alloc->resources], IPU_RESOURCE_DEV_CHN); if (IS_ERR_VALUE(retl)) { dev_dbg(dev, "out of device channel resources\n"); return (int)retl; } alloc->resources++; return 0; } static int ipu_psys_allocate_one_dfm(const struct device *dev, struct ipu_fw_psys_process *process, struct ipu_resource *resource, struct ipu_fw_generic_program_manifest *pm, u32 resource_id, struct ipu_psys_resource_alloc *alloc) { u32 dfm_bitmap_req = pm->dfm_port_bitmap[resource_id]; u32 active_dfm_bitmap_req = pm->dfm_active_port_bitmap[resource_id]; const u8 is_relocatable = pm->is_dfm_relocatable[resource_id]; struct ipu_resource_alloc *alloc_resource; unsigned long p = 0; if (!dfm_bitmap_req) return -ENXIO; if (alloc->resources >= IPU_MAX_RESOURCES) { dev_err(dev, "out of resource handles\n"); return -ENOSPC; } if (!resource->bitmap) return -ENOSPC; if (!is_relocatable) { if (*resource->bitmap & dfm_bitmap_req) { dev_warn(dev, "out of dfm resources, req 0x%x, get 0x%lx\n", dfm_bitmap_req, *resource->bitmap); return -ENOSPC; } *resource->bitmap |= dfm_bitmap_req; } else { unsigned int n = hweight32(dfm_bitmap_req); p = bitmap_find_next_zero_area(resource->bitmap, resource->elements, 0, n, 0); if (p >= resource->elements) return -ENOSPC; bitmap_set(resource->bitmap, p, n); dfm_bitmap_req = dfm_bitmap_req << p; active_dfm_bitmap_req = active_dfm_bitmap_req << p; } alloc_resource = &alloc->resource_alloc[alloc->resources]; alloc_resource->resource = resource; /* Using elements to indicate the bitmap */ alloc_resource->elements = dfm_bitmap_req; alloc_resource->pos = p; alloc_resource->type = IPU_RESOURCE_DFM; alloc->resources++; return 0; } /* * ext_mem_type_id is a generic type id for memory (like DMEM, VMEM) * ext_mem_bank_id is detailed type id for memory (like DMEM0, DMEM1 etc.) */ static int __alloc_mem_resrc(const struct device *dev, struct ipu_fw_psys_process *process, struct ipu_resource *resource, struct ipu_fw_generic_program_manifest *pm, u32 ext_mem_type_id, u32 ext_mem_bank_id, struct ipu_psys_resource_alloc *alloc) { const u16 memory_resource_req = pm->ext_mem_size[ext_mem_type_id]; const u16 memory_offset_req = pm->ext_mem_offset[ext_mem_type_id]; unsigned long retl; if (!memory_resource_req) return -ENXIO; if (alloc->resources >= IPU_MAX_RESOURCES) { dev_err(dev, "out of resource handles\n"); return -ENOSPC; } if (memory_offset_req != (u16)(-1)) retl = ipu_resource_alloc_with_pos (resource, memory_resource_req, memory_offset_req, &alloc->resource_alloc[alloc->resources], IPU_RESOURCE_EXT_MEM); else retl = ipu_resource_alloc (resource, memory_resource_req, &alloc->resource_alloc[alloc->resources], IPU_RESOURCE_EXT_MEM); if (IS_ERR_VALUE(retl)) { dev_dbg(dev, "out of memory resources\n"); return (int)retl; } alloc->resources++; return 0; } int ipu_psys_allocate_cmd_queue_resource(struct ipu_psys_resource_pool *pool) { unsigned long p; int size, start; size = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID; start = IPU6_FW_PSYS_CMD_QUEUE_PPG0_COMMAND_ID; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) if (ipu_ver == IPU_VER_6SE) { #else if (ipu_ver == IPU6_VER_6SE) { #endif size = IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID; start = IPU6SE_FW_PSYS_CMD_QUEUE_PPG0_COMMAND_ID; } spin_lock(&pool->queues_lock); /* find available cmd queue from ppg0_cmd_id */ p = bitmap_find_next_zero_area(pool->cmd_queues, size, start, 1, 0); if (p >= size) { spin_unlock(&pool->queues_lock); return -ENOSPC; } bitmap_set(pool->cmd_queues, p, 1); spin_unlock(&pool->queues_lock); return p; } void ipu_psys_free_cmd_queue_resource(struct ipu_psys_resource_pool *pool, u8 queue_id) { spin_lock(&pool->queues_lock); bitmap_clear(pool->cmd_queues, queue_id, 1); spin_unlock(&pool->queues_lock); } int ipu_psys_try_allocate_resources(struct device *dev, struct ipu_fw_psys_process_group *pg, void *pg_manifest, struct ipu_psys_resource_pool *pool) { u32 id, idx; u32 mem_type_id; int ret, i; u16 *process_offset_table; u8 processes; u32 cells = 0; struct ipu_psys_resource_alloc *alloc; const struct ipu_fw_resource_definitions *res_defs; if (!pg) return -EINVAL; process_offset_table = (u16 *)((u8 *)pg + pg->processes_offset); processes = pg->process_count; alloc = kzalloc(sizeof(*alloc), GFP_KERNEL); if (!alloc) return -ENOMEM; res_defs = get_res(); for (i = 0; i < processes; i++) { u32 cell; struct ipu_fw_psys_process *process = (struct ipu_fw_psys_process *) ((char *)pg + process_offset_table[i]); struct ipu_fw_generic_program_manifest pm; memset(&pm, 0, sizeof(pm)); if (!process) { dev_err(dev, "can not get process\n"); ret = -ENOENT; goto free_out; } ret = ipu_fw_psys_get_program_manifest_by_process (&pm, pg_manifest, process); if (ret < 0) { dev_err(dev, "can not get manifest\n"); goto free_out; } if (pm.cell_id == res_defs->num_cells && pm.cell_type_id == res_defs->num_cells_type) { cell = res_defs->num_cells; } else if ((pm.cell_id != res_defs->num_cells && pm.cell_type_id == res_defs->num_cells_type)) { cell = pm.cell_id; } else { /* Find a free cell of desired type */ u32 type = pm.cell_type_id; for (cell = 0; cell < res_defs->num_cells; cell++) if (res_defs->cells[cell] == type && ((pool->cells | cells) & (1 << cell)) == 0) break; if (cell >= res_defs->num_cells) { dev_dbg(dev, "no free cells of right type\n"); ret = -ENOSPC; goto free_out; } } if (cell < res_defs->num_cells) cells |= 1 << cell; if (pool->cells & cells) { dev_dbg(dev, "out of cell resources\n"); ret = -ENOSPC; goto free_out; } if (pm.dev_chn_size) { for (id = 0; id < res_defs->num_dev_channels; id++) { ret = __alloc_one_resrc(dev, process, &pool->dev_channels[id], &pm, id, alloc); if (ret == -ENXIO) continue; if (ret) goto free_out; } } if (pm.dfm_port_bitmap) { for (id = 0; id < res_defs->num_dfm_ids; id++) { ret = ipu_psys_allocate_one_dfm (dev, process, &pool->dfms[id], &pm, id, alloc); if (ret == -ENXIO) continue; if (ret) goto free_out; } } if (pm.ext_mem_size) { for (mem_type_id = 0; mem_type_id < res_defs->num_ext_mem_types; mem_type_id++) { u32 bank = res_defs->num_ext_mem_ids; if (cell != res_defs->num_cells) { idx = res_defs->cell_mem_row * cell + mem_type_id; bank = res_defs->cell_mem[idx]; } if (bank == res_defs->num_ext_mem_ids) continue; ret = __alloc_mem_resrc(dev, process, &pool->ext_memory[bank], &pm, mem_type_id, bank, alloc); if (ret == -ENXIO) continue; if (ret) goto free_out; } } } pool->cells |= cells; kfree(alloc); return 0; free_out: dev_dbg(dev, "failed to try_allocate resource\n"); kfree(alloc); return ret; } /* * Allocate resources for pg from `pool'. Mark the allocated * resources into `alloc'. Returns 0 on success, -ENOSPC * if there are no enough resources, in which cases resources * are not allocated at all, or some other error on other conditions. */ int ipu_psys_allocate_resources(const struct device *dev, struct ipu_fw_psys_process_group *pg, void *pg_manifest, struct ipu_psys_resource_alloc *alloc, struct ipu_psys_resource_pool *pool) { u32 id; u32 mem_type_id; int ret, i; u16 *process_offset_table; u8 processes; u32 cells = 0; int p, idx; u32 bmp, a_bmp; const struct ipu_fw_resource_definitions *res_defs; if (!pg) return -EINVAL; res_defs = get_res(); process_offset_table = (u16 *)((u8 *)pg + pg->processes_offset); processes = pg->process_count; for (i = 0; i < processes; i++) { u32 cell; struct ipu_fw_psys_process *process = (struct ipu_fw_psys_process *) ((char *)pg + process_offset_table[i]); struct ipu_fw_generic_program_manifest pm; memset(&pm, 0, sizeof(pm)); if (!process) { dev_err(dev, "can not get process\n"); ret = -ENOENT; goto free_out; } ret = ipu_fw_psys_get_program_manifest_by_process (&pm, pg_manifest, process); if (ret < 0) { dev_err(dev, "can not get manifest\n"); goto free_out; } if (pm.cell_id == res_defs->num_cells && pm.cell_type_id == res_defs->num_cells_type) { cell = res_defs->num_cells; } else if ((pm.cell_id != res_defs->num_cells && pm.cell_type_id == res_defs->num_cells_type)) { cell = pm.cell_id; } else { /* Find a free cell of desired type */ u32 type = pm.cell_type_id; for (cell = 0; cell < res_defs->num_cells; cell++) if (res_defs->cells[cell] == type && ((pool->cells | cells) & (1 << cell)) == 0) break; if (cell >= res_defs->num_cells) { dev_dbg(dev, "no free cells of right type\n"); ret = -ENOSPC; goto free_out; } ret = ipu_fw_psys_set_process_cell_id(process, 0, cell); if (ret) goto free_out; } if (cell < res_defs->num_cells) cells |= 1 << cell; if (pool->cells & cells) { dev_dbg(dev, "out of cell resources\n"); ret = -ENOSPC; goto free_out; } if (pm.dev_chn_size) { for (id = 0; id < res_defs->num_dev_channels; id++) { ret = __alloc_one_resrc(dev, process, &pool->dev_channels[id], &pm, id, alloc); if (ret == -ENXIO) continue; if (ret) goto free_out; idx = alloc->resources - 1; p = alloc->resource_alloc[idx].pos; ret = ipu_fw_psys_set_proc_dev_chn(process, id, p); if (ret) goto free_out; } } if (pm.dfm_port_bitmap) { for (id = 0; id < res_defs->num_dfm_ids; id++) { ret = ipu_psys_allocate_one_dfm(dev, process, &pool->dfms[id], &pm, id, alloc); if (ret == -ENXIO) continue; if (ret) goto free_out; idx = alloc->resources - 1; p = alloc->resource_alloc[idx].pos; bmp = pm.dfm_port_bitmap[id]; bmp = bmp << p; a_bmp = pm.dfm_active_port_bitmap[id]; a_bmp = a_bmp << p; ret = ipu_fw_psys_set_proc_dfm_bitmap(process, id, bmp, a_bmp); if (ret) goto free_out; } } if (pm.ext_mem_size) { for (mem_type_id = 0; mem_type_id < res_defs->num_ext_mem_types; mem_type_id++) { u32 bank = res_defs->num_ext_mem_ids; if (cell != res_defs->num_cells) { idx = res_defs->cell_mem_row * cell + mem_type_id; bank = res_defs->cell_mem[idx]; } if (bank == res_defs->num_ext_mem_ids) continue; ret = __alloc_mem_resrc(dev, process, &pool->ext_memory[bank], &pm, mem_type_id, bank, alloc); if (ret == -ENXIO) continue; if (ret) goto free_out; /* no return value check here because fw api * will do some checks, and would return * non-zero except mem_type_id == 0. * This maybe caused by that above flow of * allocating mem_bank_id is improper. */ idx = alloc->resources - 1; p = alloc->resource_alloc[idx].pos; ipu_fw_psys_set_process_ext_mem(process, mem_type_id, bank, p); } } } alloc->cells |= cells; pool->cells |= cells; return 0; free_out: dev_err(dev, "failed to allocate resources, ret %d\n", ret); ipu_psys_reset_process_cell(dev, pg, pg_manifest, i + 1); ipu_psys_free_resources(alloc, pool); return ret; } int ipu_psys_move_resources(const struct device *dev, struct ipu_psys_resource_alloc *alloc, struct ipu_psys_resource_pool *source_pool, struct ipu_psys_resource_pool *target_pool) { int i; if (target_pool->cells & alloc->cells) { dev_dbg(dev, "out of cell resources\n"); return -ENOSPC; } for (i = 0; i < alloc->resources; i++) { unsigned long bitmap = 0; unsigned int id = alloc->resource_alloc[i].resource->id; unsigned long fbit, end; switch (alloc->resource_alloc[i].type) { case IPU_RESOURCE_DEV_CHN: bitmap_set(&bitmap, alloc->resource_alloc[i].pos, alloc->resource_alloc[i].elements); if (*target_pool->dev_channels[id].bitmap & bitmap) return -ENOSPC; break; case IPU_RESOURCE_EXT_MEM: end = alloc->resource_alloc[i].elements + alloc->resource_alloc[i].pos; fbit = find_next_bit(target_pool->ext_memory[id].bitmap, end, alloc->resource_alloc[i].pos); /* if find_next_bit returns "end" it didn't find 1bit */ if (end != fbit) return -ENOSPC; break; case IPU_RESOURCE_DFM: bitmap = alloc->resource_alloc[i].elements; if (*target_pool->dfms[id].bitmap & bitmap) return -ENOSPC; break; default: dev_err(dev, "Illegal resource type\n"); return -EINVAL; } } for (i = 0; i < alloc->resources; i++) { u32 id = alloc->resource_alloc[i].resource->id; switch (alloc->resource_alloc[i].type) { case IPU_RESOURCE_DEV_CHN: bitmap_set(target_pool->dev_channels[id].bitmap, alloc->resource_alloc[i].pos, alloc->resource_alloc[i].elements); ipu_resource_free(&alloc->resource_alloc[i]); alloc->resource_alloc[i].resource = &target_pool->dev_channels[id]; break; case IPU_RESOURCE_EXT_MEM: bitmap_set(target_pool->ext_memory[id].bitmap, alloc->resource_alloc[i].pos, alloc->resource_alloc[i].elements); ipu_resource_free(&alloc->resource_alloc[i]); alloc->resource_alloc[i].resource = &target_pool->ext_memory[id]; break; case IPU_RESOURCE_DFM: *target_pool->dfms[id].bitmap |= alloc->resource_alloc[i].elements; *alloc->resource_alloc[i].resource->bitmap &= ~(alloc->resource_alloc[i].elements); alloc->resource_alloc[i].resource = &target_pool->dfms[id]; break; default: /* * Just keep compiler happy. This case failed already * in above loop. */ break; } } target_pool->cells |= alloc->cells; source_pool->cells &= ~alloc->cells; return 0; } void ipu_psys_reset_process_cell(const struct device *dev, struct ipu_fw_psys_process_group *pg, void *pg_manifest, int process_count) { int i; u16 *process_offset_table; const struct ipu_fw_resource_definitions *res_defs; if (!pg) return; res_defs = get_res(); process_offset_table = (u16 *)((u8 *)pg + pg->processes_offset); for (i = 0; i < process_count; i++) { struct ipu_fw_psys_process *process = (struct ipu_fw_psys_process *) ((char *)pg + process_offset_table[i]); struct ipu_fw_generic_program_manifest pm; int ret; if (!process) break; ret = ipu_fw_psys_get_program_manifest_by_process(&pm, pg_manifest, process); if (ret < 0) { dev_err(dev, "can not get manifest\n"); break; } if ((pm.cell_id != res_defs->num_cells && pm.cell_type_id == res_defs->num_cells_type)) continue; /* no return value check here because if finding free cell * failed, process cell would not set then calling clear_cell * will return non-zero. */ ipu_fw_psys_clear_process_cell(process); } } /* Free resources marked in `alloc' from `resources' */ void ipu_psys_free_resources(struct ipu_psys_resource_alloc *alloc, struct ipu_psys_resource_pool *pool) { unsigned int i; pool->cells &= ~alloc->cells; alloc->cells = 0; for (i = 0; i < alloc->resources; i++) ipu_resource_free(&alloc->resource_alloc[i]); alloc->resources = 0; } ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu6/psys/ipu6-fw-resources.c000066400000000000000000000437531471702545500307020ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2015 - 2024 Intel Corporation #include #include #include "ipu-psys.h" #include "ipu-fw-psys.h" #include "ipu6-platform-resources.h" /* resources table */ /* * Cell types by cell IDs */ static const u8 ipu6_fw_psys_cell_types[IPU6_FW_PSYS_N_CELL_ID] = { IPU6_FW_PSYS_SP_CTRL_TYPE_ID, IPU6_FW_PSYS_VP_TYPE_ID, IPU6_FW_PSYS_ACC_PSA_TYPE_ID, IPU6_FW_PSYS_ACC_PSA_TYPE_ID, IPU6_FW_PSYS_ACC_PSA_TYPE_ID, IPU6_FW_PSYS_ACC_PSA_TYPE_ID, IPU6_FW_PSYS_ACC_OSA_TYPE_ID, IPU6_FW_PSYS_ACC_OSA_TYPE_ID, IPU6_FW_PSYS_ACC_OSA_TYPE_ID, IPU6_FW_PSYS_ACC_PSA_TYPE_ID, IPU6_FW_PSYS_ACC_PSA_TYPE_ID, IPU6_FW_PSYS_ACC_PSA_TYPE_ID, IPU6_FW_PSYS_ACC_PSA_TYPE_ID, IPU6_FW_PSYS_ACC_ISA_TYPE_ID, IPU6_FW_PSYS_ACC_ISA_TYPE_ID, IPU6_FW_PSYS_ACC_ISA_TYPE_ID, IPU6_FW_PSYS_ACC_ISA_TYPE_ID, IPU6_FW_PSYS_ACC_ISA_TYPE_ID, IPU6_FW_PSYS_ACC_ISA_TYPE_ID, IPU6_FW_PSYS_ACC_ISA_TYPE_ID, IPU6_FW_PSYS_ACC_ISA_TYPE_ID, IPU6_FW_PSYS_ACC_ISA_TYPE_ID, IPU6_FW_PSYS_ACC_ISA_TYPE_ID, IPU6_FW_PSYS_ACC_ISA_TYPE_ID, IPU6_FW_PSYS_ACC_ISA_TYPE_ID, IPU6_FW_PSYS_ACC_ISA_TYPE_ID, IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* X2B_MD */ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* ICA_MEDIUM */ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* X2B_SVE_RGBIR */ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* PAF */ IPU6_FW_PSYS_GDC_TYPE_ID, IPU6_FW_PSYS_TNR_TYPE_ID, }; static const u16 ipu6_fw_num_dev_channels[IPU6_FW_PSYS_N_DEV_CHN_ID] = { IPU6_FW_PSYS_DEV_CHN_DMA_EXT0_MAX_SIZE, IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_READ_MAX_SIZE, IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE, IPU6_FW_PSYS_DEV_CHN_DMA_INTERNAL_MAX_SIZE, IPU6_FW_PSYS_DEV_CHN_DMA_ISA_MAX_SIZE, }; static const u16 ipu6_fw_psys_mem_size[IPU6_FW_PSYS_N_MEM_ID] = { IPU6_FW_PSYS_VMEM0_MAX_SIZE, IPU6_FW_PSYS_TRANSFER_VMEM0_MAX_SIZE, IPU6_FW_PSYS_TRANSFER_VMEM1_MAX_SIZE, IPU6_FW_PSYS_LB_VMEM_MAX_SIZE, IPU6_FW_PSYS_BAMEM0_MAX_SIZE, IPU6_FW_PSYS_DMEM0_MAX_SIZE, IPU6_FW_PSYS_DMEM1_MAX_SIZE, IPU6_FW_PSYS_DMEM2_MAX_SIZE, IPU6_FW_PSYS_DMEM3_MAX_SIZE, IPU6_FW_PSYS_PMEM0_MAX_SIZE }; static const u16 ipu6_fw_psys_dfms[IPU6_FW_PSYS_N_DEV_DFM_ID] = { IPU6_FW_PSYS_DEV_DFM_BB_FULL_PORT_ID_MAX_SIZE, IPU6_FW_PSYS_DEV_DFM_BB_EMPTY_PORT_ID_MAX_SIZE, IPU6_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID_MAX_SIZE, IPU6_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID_MAX_SIZE, IPU6_FW_PSYS_DEV_DFM_LB_FULL_PORT_ID_MAX_SIZE, IPU6_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID_MAX_SIZE, }; static const u8 ipu6_fw_psys_c_mem[IPU6_FW_PSYS_N_CELL_ID][IPU6_FW_PSYS_N_MEM_TYPE_ID] = { { /* IPU6_FW_PSYS_SP0_ID */ IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_DMEM0_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_SP1_ID */ IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_DMEM1_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_VP0_ID */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_DMEM3_ID, IPU6_FW_PSYS_VMEM0_ID, IPU6_FW_PSYS_BAMEM0_ID, IPU6_FW_PSYS_PMEM0_ID, }, { /* IPU6_FW_PSYS_ACC1_ID BNLM */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ACC2_ID DM */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ACC3_ID ACM */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ACC4_ID GTC YUV1 */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ACC5_ID OFS pin main */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ACC6_ID OFS pin display */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ACC7_ID OFS pin pp */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ACC8_ID GAMMASTAR */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ACC9_ID GLTM */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ACC10_ID XNR */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ISA_ICA_ID */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ISA_LSC_ID */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ISA_DPC_ID */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ISA_SIS_A_ID */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ISA_SIS_B_ID */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ISA_B2B_ID */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ISA_B2R_ID and ISA_R2I_SIE */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ISA_R2I_DS_A_ID */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ISA_R2I_DS_B_ID */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ISA_AWB_ID */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ISA_AE_ID */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ISA_AF_ID */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ISA_DOL_ID */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ISA_X2B_MD_ID */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ISA_ICA_MEDIUM_ID */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ISA_X2B_SVE_RGBIR_ID */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ISA_PAF_ID */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_BB_ACC_GDC0_ID */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_BB_ACC_TNR_ID */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, } }; static const struct ipu_fw_resource_definitions ipu6_defs = { .cells = ipu6_fw_psys_cell_types, .num_cells = IPU6_FW_PSYS_N_CELL_ID, .num_cells_type = IPU6_FW_PSYS_N_CELL_TYPE_ID, .dev_channels = ipu6_fw_num_dev_channels, .num_dev_channels = IPU6_FW_PSYS_N_DEV_CHN_ID, .num_ext_mem_types = IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID, .num_ext_mem_ids = IPU6_FW_PSYS_N_MEM_ID, .ext_mem_ids = ipu6_fw_psys_mem_size, .num_dfm_ids = IPU6_FW_PSYS_N_DEV_DFM_ID, .dfms = ipu6_fw_psys_dfms, .cell_mem_row = IPU6_FW_PSYS_N_MEM_TYPE_ID, .cell_mem = &ipu6_fw_psys_c_mem[0][0], }; const struct ipu_fw_resource_definitions *ipu6_res_defs = &ipu6_defs; /********** Generic resource handling **********/ int ipu6_fw_psys_set_proc_dev_chn(struct ipu_fw_psys_process *ptr, u16 offset, u16 value) { struct ipu6_fw_psys_process_ext *pm_ext; u8 ps_ext_offset; ps_ext_offset = ptr->process_extension_offset; if (!ps_ext_offset) return -EINVAL; pm_ext = (struct ipu6_fw_psys_process_ext *)((u8 *)ptr + ps_ext_offset); pm_ext->dev_chn_offset[offset] = value; return 0; } int ipu6_fw_psys_set_proc_dfm_bitmap(struct ipu_fw_psys_process *ptr, u16 id, u32 bitmap, u32 active_bitmap) { struct ipu6_fw_psys_process_ext *pm_ext; u8 ps_ext_offset; ps_ext_offset = ptr->process_extension_offset; if (!ps_ext_offset) return -EINVAL; pm_ext = (struct ipu6_fw_psys_process_ext *)((u8 *)ptr + ps_ext_offset); pm_ext->dfm_port_bitmap[id] = bitmap; pm_ext->dfm_active_port_bitmap[id] = active_bitmap; return 0; } int ipu6_fw_psys_set_process_ext_mem(struct ipu_fw_psys_process *ptr, u16 type_id, u16 mem_id, u16 offset) { struct ipu6_fw_psys_process_ext *pm_ext; u8 ps_ext_offset; ps_ext_offset = ptr->process_extension_offset; if (!ps_ext_offset) return -EINVAL; pm_ext = (struct ipu6_fw_psys_process_ext *)((u8 *)ptr + ps_ext_offset); pm_ext->ext_mem_offset[type_id] = offset; pm_ext->ext_mem_id[type_id] = mem_id; return 0; } static struct ipu_fw_psys_program_manifest * get_program_manifest(const struct ipu_fw_psys_program_group_manifest *manifest, const unsigned int program_index) { struct ipu_fw_psys_program_manifest *prg_manifest_base; u8 *program_manifest = NULL; u8 program_count; unsigned int i; program_count = manifest->program_count; prg_manifest_base = (struct ipu_fw_psys_program_manifest *) ((char *)manifest + manifest->program_manifest_offset); if (program_index < program_count) { program_manifest = (u8 *)prg_manifest_base; for (i = 0; i < program_index; i++) program_manifest += ((struct ipu_fw_psys_program_manifest *) program_manifest)->size; } return (struct ipu_fw_psys_program_manifest *)program_manifest; } int ipu6_fw_psys_get_program_manifest_by_process( struct ipu_fw_generic_program_manifest *gen_pm, const struct ipu_fw_psys_program_group_manifest *pg_manifest, struct ipu_fw_psys_process *process) { u32 program_id = process->program_idx; struct ipu_fw_psys_program_manifest *pm; struct ipu6_fw_psys_program_manifest_ext *pm_ext; pm = get_program_manifest(pg_manifest, program_id); if (!pm) return -ENOENT; if (pm->program_extension_offset) { pm_ext = (struct ipu6_fw_psys_program_manifest_ext *) ((u8 *)pm + pm->program_extension_offset); gen_pm->dev_chn_size = pm_ext->dev_chn_size; gen_pm->dev_chn_offset = pm_ext->dev_chn_offset; gen_pm->ext_mem_size = pm_ext->ext_mem_size; gen_pm->ext_mem_offset = (u16 *)pm_ext->ext_mem_offset; gen_pm->is_dfm_relocatable = pm_ext->is_dfm_relocatable; gen_pm->dfm_port_bitmap = pm_ext->dfm_port_bitmap; gen_pm->dfm_active_port_bitmap = pm_ext->dfm_active_port_bitmap; } memcpy(gen_pm->cells, pm->cells, sizeof(pm->cells)); gen_pm->cell_id = pm->cells[0]; gen_pm->cell_type_id = pm->cell_type_id; return 0; } #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) #if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) || \ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) void ipu6_fw_psys_pg_dump(struct ipu_psys *psys, struct ipu_psys_kcmd *kcmd, const char *note) { struct ipu_fw_psys_process_group *pg = kcmd->kpg->pg; u32 pgid = pg->ID; u8 processes = pg->process_count; u16 *process_offset_table = (u16 *)((char *)pg + pg->processes_offset); unsigned int p, chn, mem, mem_id; unsigned int mem_type, max_mem_id, dev_chn; if (ipu_ver == IPU_VER_6SE) { mem_type = IPU6SE_FW_PSYS_N_DATA_MEM_TYPE_ID; max_mem_id = IPU6SE_FW_PSYS_N_MEM_ID; dev_chn = IPU6SE_FW_PSYS_N_DEV_CHN_ID; } else if (ipu_ver == IPU_VER_6 || ipu_ver == IPU_VER_6EP || ipu_ver == IPU_VER_6EP_MTL) { mem_type = IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID; max_mem_id = IPU6_FW_PSYS_N_MEM_ID; dev_chn = IPU6_FW_PSYS_N_DEV_CHN_ID; } else { WARN(1, "%s ipu_ver:[%u] is unsupported!\n", __func__, ipu_ver); return; } dev_dbg(&psys->adev->dev, "%s %s pgid %i has %i processes:\n", __func__, note, pgid, processes); for (p = 0; p < processes; p++) { struct ipu_fw_psys_process *process = (struct ipu_fw_psys_process *) ((char *)pg + process_offset_table[p]); struct ipu6_fw_psys_process_ext *pm_ext = (struct ipu6_fw_psys_process_ext *)((u8 *)process + process->process_extension_offset); dev_dbg(&psys->adev->dev, "\t process %i size=%u", p, process->size); if (!process->process_extension_offset) continue; for (mem = 0; mem < mem_type; mem++) { mem_id = pm_ext->ext_mem_id[mem]; if (mem_id != max_mem_id) dev_dbg(&psys->adev->dev, "\t mem type %u id %d offset=0x%x", mem, mem_id, pm_ext->ext_mem_offset[mem]); } for (chn = 0; chn < dev_chn; chn++) { if (pm_ext->dev_chn_offset[chn] != (u16)(-1)) dev_dbg(&psys->adev->dev, "\t dev_chn[%u]=0x%x\n", chn, pm_ext->dev_chn_offset[chn]); } } } #else void ipu6_fw_psys_pg_dump(struct ipu_psys *psys, struct ipu_psys_kcmd *kcmd, const char *note) { if (ipu_ver == IPU_VER_6SE || ipu_ver == IPU_VER_6 || ipu_ver == IPU_VER_6EP || ipu_ver == IPU_VER_6EP_MTL) return; WARN(1, "%s ipu_ver:[%u] is unsupported!\n", __func__, ipu_ver); } #endif #else #if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) || \ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) void ipu6_fw_psys_pg_dump(struct ipu_psys *psys, struct ipu_psys_kcmd *kcmd, const char *note) { struct ipu_fw_psys_process_group *pg = kcmd->kpg->pg; struct device *dev = &psys->adev->auxdev.dev; u32 pgid = pg->ID; u8 processes = pg->process_count; u16 *process_offset_table = (u16 *)((char *)pg + pg->processes_offset); unsigned int p, chn, mem, mem_id; unsigned int mem_type, max_mem_id, dev_chn; if (ipu_ver == IPU6_VER_6SE) { mem_type = IPU6SE_FW_PSYS_N_DATA_MEM_TYPE_ID; max_mem_id = IPU6SE_FW_PSYS_N_MEM_ID; dev_chn = IPU6SE_FW_PSYS_N_DEV_CHN_ID; } else if (ipu_ver == IPU6_VER_6 || ipu_ver == IPU6_VER_6EP || ipu_ver == IPU6_VER_6EP_MTL) { mem_type = IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID; max_mem_id = IPU6_FW_PSYS_N_MEM_ID; dev_chn = IPU6_FW_PSYS_N_DEV_CHN_ID; } else { WARN(1, "%s ipu_ver:[%u] is unsupported!\n", __func__, ipu_ver); return; } dev_dbg(dev, "%s %s pgid %i has %i processes:\n", __func__, note, pgid, processes); for (p = 0; p < processes; p++) { struct ipu_fw_psys_process *process = (struct ipu_fw_psys_process *) ((char *)pg + process_offset_table[p]); struct ipu6_fw_psys_process_ext *pm_ext = (struct ipu6_fw_psys_process_ext *)((u8 *)process + process->process_extension_offset); dev_dbg(dev, "\t process %i size=%u", p, process->size); if (!process->process_extension_offset) continue; for (mem = 0; mem < mem_type; mem++) { mem_id = pm_ext->ext_mem_id[mem]; if (mem_id != max_mem_id) dev_dbg(dev, "\t mem type %u id %d offset=0x%x", mem, mem_id, pm_ext->ext_mem_offset[mem]); } for (chn = 0; chn < dev_chn; chn++) { if (pm_ext->dev_chn_offset[chn] != (u16)(-1)) dev_dbg(dev, "\t dev_chn[%u]=0x%x\n", chn, pm_ext->dev_chn_offset[chn]); } } } #else void ipu6_fw_psys_pg_dump(struct ipu_psys *psys, struct ipu_psys_kcmd *kcmd, const char *note) { if (ipu_ver == IPU6_VER_6SE || ipu_ver == IPU6_VER_6 || ipu_ver == IPU6_VER_6EP || ipu_ver == IPU6_VER_6EP_MTL) return; WARN(1, "%s ipu_ver:[%u] is unsupported!\n", __func__, ipu_ver); } #endif #endif ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu6/psys/ipu6-l-scheduler.c000066400000000000000000000455511471702545500304630ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2020 - 2024 Intel Corporation #include "ipu-psys.h" #include "ipu6-ppg.h" extern bool enable_power_gating; struct sched_list { struct list_head list; /* to protect the list */ struct mutex lock; }; static struct sched_list start_list = { .list = LIST_HEAD_INIT(start_list.list), .lock = __MUTEX_INITIALIZER(start_list.lock), }; static struct sched_list stop_list = { .list = LIST_HEAD_INIT(stop_list.list), .lock = __MUTEX_INITIALIZER(stop_list.lock), }; static struct sched_list *get_sc_list(enum SCHED_LIST type) { /* for debug purposes */ WARN_ON(type != SCHED_START_LIST && type != SCHED_STOP_LIST); if (type == SCHED_START_LIST) return &start_list; return &stop_list; } static bool is_kppg_in_list(struct ipu_psys_ppg *kppg, struct list_head *head) { struct ipu_psys_ppg *tmp; list_for_each_entry(tmp, head, sched_list) { if (kppg == tmp) return true; } return false; } void ipu_psys_scheduler_remove_kppg(struct ipu_psys_ppg *kppg, enum SCHED_LIST type) { struct sched_list *sc_list = get_sc_list(type); struct ipu_psys_ppg *tmp0, *tmp1; struct ipu_psys *psys = kppg->fh->psys; #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0) struct device *dev = &psys->adev->auxdev.dev; #endif mutex_lock(&sc_list->lock); list_for_each_entry_safe(tmp0, tmp1, &sc_list->list, sched_list) { if (tmp0 == kppg) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_dbg(&psys->adev->dev, "remove from %s list, kppg(%d 0x%p) state %d\n", type == SCHED_START_LIST ? "start" : "stop", kppg->kpg->pg->ID, kppg, kppg->state); #else dev_dbg(dev, "remove from %s list, kppg(%d 0x%p) state %d\n", type == SCHED_START_LIST ? "start" : "stop", kppg->kpg->pg->ID, kppg, kppg->state); #endif list_del_init(&kppg->sched_list); } } mutex_unlock(&sc_list->lock); } void ipu_psys_scheduler_add_kppg(struct ipu_psys_ppg *kppg, enum SCHED_LIST type) { int cur_pri = kppg->pri_base + kppg->pri_dynamic; struct sched_list *sc_list = get_sc_list(type); struct ipu_psys *psys = kppg->fh->psys; #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0) struct device *dev = &psys->adev->auxdev.dev; #endif struct ipu_psys_ppg *tmp0, *tmp1; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_dbg(&psys->adev->dev, #else dev_dbg(dev, #endif "add to %s list, kppg(%d 0x%p) state %d prio(%d %d) fh 0x%p\n", type == SCHED_START_LIST ? "start" : "stop", kppg->kpg->pg->ID, kppg, kppg->state, kppg->pri_base, kppg->pri_dynamic, kppg->fh); mutex_lock(&sc_list->lock); if (list_empty(&sc_list->list)) { list_add(&kppg->sched_list, &sc_list->list); goto out; } if (is_kppg_in_list(kppg, &sc_list->list)) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_dbg(&psys->adev->dev, "kppg already in list\n"); #else dev_dbg(dev, "kppg already in list\n"); #endif goto out; } list_for_each_entry_safe(tmp0, tmp1, &sc_list->list, sched_list) { int tmp_pri = tmp0->pri_base + tmp0->pri_dynamic; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_dbg(&psys->adev->dev, #else dev_dbg(dev, #endif "found kppg(%d 0x%p), state %d pri(%d %d) fh 0x%p\n", tmp0->kpg->pg->ID, tmp0, tmp0->state, tmp0->pri_base, tmp0->pri_dynamic, tmp0->fh); if (type == SCHED_START_LIST && tmp_pri > cur_pri) { list_add(&kppg->sched_list, tmp0->sched_list.prev); goto out; } else if (type == SCHED_STOP_LIST && tmp_pri < cur_pri) { list_add(&kppg->sched_list, tmp0->sched_list.prev); goto out; } } list_add_tail(&kppg->sched_list, &sc_list->list); out: mutex_unlock(&sc_list->lock); } static int ipu_psys_detect_resource_contention(struct ipu_psys_ppg *kppg) { struct ipu_psys_resource_pool *try_res_pool; struct ipu_psys *psys = kppg->fh->psys; #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0) struct device *dev = &psys->adev->auxdev.dev; #endif int ret = 0; int state; try_res_pool = kzalloc(sizeof(*try_res_pool), GFP_KERNEL); if (IS_ERR_OR_NULL(try_res_pool)) return -ENOMEM; mutex_lock(&kppg->mutex); state = kppg->state; mutex_unlock(&kppg->mutex); if (state == PPG_STATE_STARTED || state == PPG_STATE_RUNNING || state == PPG_STATE_RESUMED) goto exit; ret = ipu_psys_resource_pool_init(try_res_pool); if (ret < 0) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_err(&psys->adev->dev, "unable to alloc pg resources\n"); #else dev_err(dev, "unable to alloc pg resources\n"); #endif WARN_ON(1); goto exit; } ipu_psys_resource_copy(&psys->resource_pool_running, try_res_pool); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) ret = ipu_psys_try_allocate_resources(&psys->adev->dev, kppg->kpg->pg, #else ret = ipu_psys_try_allocate_resources(dev, kppg->kpg->pg, #endif kppg->manifest, try_res_pool); ipu_psys_resource_pool_cleanup(try_res_pool); exit: kfree(try_res_pool); return ret; } static void ipu_psys_scheduler_ppg_sort(struct ipu_psys *psys, bool *stopping) { struct ipu_psys_ppg *kppg, *tmp; struct ipu_psys_scheduler *sched; struct ipu_psys_fh *fh; list_for_each_entry(fh, &psys->fhs, list) { mutex_lock(&fh->mutex); sched = &fh->sched; if (list_empty(&sched->ppgs)) { mutex_unlock(&fh->mutex); continue; } list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { mutex_lock(&kppg->mutex); if (kppg->state == PPG_STATE_START || kppg->state == PPG_STATE_RESUME) { ipu_psys_scheduler_add_kppg(kppg, SCHED_START_LIST); } else if (kppg->state == PPG_STATE_RUNNING) { ipu_psys_scheduler_add_kppg(kppg, SCHED_STOP_LIST); } else if (kppg->state == PPG_STATE_SUSPENDING || kppg->state == PPG_STATE_STOPPING) { /* there are some suspending/stopping ppgs */ *stopping = true; } else if (kppg->state == PPG_STATE_RESUMING || kppg->state == PPG_STATE_STARTING) { /* how about kppg are resuming/starting? */ } mutex_unlock(&kppg->mutex); } mutex_unlock(&fh->mutex); } } static void ipu_psys_scheduler_update_start_ppg_priority(void) { struct sched_list *sc_list = get_sc_list(SCHED_START_LIST); struct ipu_psys_ppg *kppg, *tmp; mutex_lock(&sc_list->lock); if (!list_empty(&sc_list->list)) list_for_each_entry_safe(kppg, tmp, &sc_list->list, sched_list) kppg->pri_dynamic--; mutex_unlock(&sc_list->lock); } static bool ipu_psys_scheduler_switch_ppg(struct ipu_psys *psys) { struct sched_list *sc_list = get_sc_list(SCHED_STOP_LIST); #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0) struct device *dev = &psys->adev->auxdev.dev; #endif struct ipu_psys_ppg *kppg; bool resched = false; mutex_lock(&sc_list->lock); if (list_empty(&sc_list->list)) { /* some ppgs are RESUMING/STARTING */ #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_dbg(&psys->adev->dev, "no candidated stop ppg\n"); #else dev_dbg(dev, "no candidated stop ppg\n"); #endif mutex_unlock(&sc_list->lock); return false; } kppg = list_first_entry(&sc_list->list, struct ipu_psys_ppg, sched_list); mutex_unlock(&sc_list->lock); mutex_lock(&kppg->mutex); if (!(kppg->state & PPG_STATE_STOP)) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_dbg(&psys->adev->dev, "s_change:%s: %p %d -> %d\n", #else dev_dbg(dev, "s_change:%s: %p %d -> %d\n", #endif __func__, kppg, kppg->state, PPG_STATE_SUSPEND); kppg->state = PPG_STATE_SUSPEND; resched = true; } mutex_unlock(&kppg->mutex); return resched; } /* * search all kppgs and sort them into start_list and stop_list, alway start * first kppg(high priority) in start_list; * if there is resource contention, it would switch kppgs in stop_list * to suspend state one by one */ static bool ipu_psys_scheduler_ppg_start(struct ipu_psys *psys) { struct sched_list *sc_list = get_sc_list(SCHED_START_LIST); #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0) struct device *dev = &psys->adev->auxdev.dev; #endif struct ipu_psys_ppg *kppg, *kppg0; bool stopping_existed = false; int ret; ipu_psys_scheduler_ppg_sort(psys, &stopping_existed); mutex_lock(&sc_list->lock); if (list_empty(&sc_list->list)) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_dbg(&psys->adev->dev, "no ppg to start\n"); #else dev_dbg(dev, "no ppg to start\n"); #endif mutex_unlock(&sc_list->lock); return false; } list_for_each_entry_safe(kppg, kppg0, &sc_list->list, sched_list) { mutex_unlock(&sc_list->lock); ret = ipu_psys_detect_resource_contention(kppg); if (ret < 0) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_dbg(&psys->adev->dev, "ppg %d resource detect failed(%d)\n", #else dev_dbg(dev, "ppg %d resource detect failed(%d)\n", #endif kppg->kpg->pg->ID, ret); /* * switch out other ppg in 2 cases: * 1. resource contention * 2. no suspending/stopping ppg */ if (ret == -ENOSPC) { if (!stopping_existed && ipu_psys_scheduler_switch_ppg(psys)) { return true; } #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_dbg(&psys->adev->dev, "ppg is suspending/stopping\n"); #else dev_dbg(dev, "ppg is suspending/stopping\n"); #endif } else { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_err(&psys->adev->dev, "detect resource error %d\n", ret); #else dev_err(dev, "detect resource error %d\n", ret); #endif } } else { kppg->pri_dynamic = 0; mutex_lock(&kppg->mutex); if (kppg->state == PPG_STATE_START) ipu_psys_ppg_start(kppg); else ipu_psys_ppg_resume(kppg); mutex_unlock(&kppg->mutex); ipu_psys_scheduler_remove_kppg(kppg, SCHED_START_LIST); ipu_psys_scheduler_update_start_ppg_priority(); } mutex_lock(&sc_list->lock); } mutex_unlock(&sc_list->lock); return false; } static bool ipu_psys_scheduler_ppg_enqueue_bufset(struct ipu_psys *psys) { struct ipu_psys_scheduler *sched; struct ipu_psys_ppg *kppg; struct ipu_psys_fh *fh; bool resched = false; list_for_each_entry(fh, &psys->fhs, list) { mutex_lock(&fh->mutex); sched = &fh->sched; if (list_empty(&sched->ppgs)) { mutex_unlock(&fh->mutex); continue; } list_for_each_entry(kppg, &sched->ppgs, list) { if (ipu_psys_ppg_enqueue_bufsets(kppg)) resched = true; } mutex_unlock(&fh->mutex); } return resched; } /* * This function will check all kppgs within fhs, and if kppg state * is STOP or SUSPEND, l-scheduler will call ppg function to stop * or suspend it and update stop list */ static bool ipu_psys_scheduler_ppg_halt(struct ipu_psys *psys) { struct ipu_psys_scheduler *sched; struct ipu_psys_ppg *kppg, *tmp; struct ipu_psys_fh *fh; bool stopping_exit = false; list_for_each_entry(fh, &psys->fhs, list) { mutex_lock(&fh->mutex); sched = &fh->sched; if (list_empty(&sched->ppgs)) { mutex_unlock(&fh->mutex); continue; } list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { mutex_lock(&kppg->mutex); if (kppg->state & PPG_STATE_STOP) { ipu_psys_ppg_stop(kppg); ipu_psys_scheduler_remove_kppg(kppg, SCHED_STOP_LIST); } else if (kppg->state == PPG_STATE_SUSPEND) { ipu_psys_ppg_suspend(kppg); ipu_psys_scheduler_remove_kppg(kppg, SCHED_STOP_LIST); } else if (kppg->state == PPG_STATE_SUSPENDING || kppg->state == PPG_STATE_STOPPING) { stopping_exit = true; } mutex_unlock(&kppg->mutex); } mutex_unlock(&fh->mutex); } return stopping_exit; } static void ipu_psys_update_ppg_state_by_kcmd(struct ipu_psys *psys, struct ipu_psys_ppg *kppg, struct ipu_psys_kcmd *kcmd) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0) struct device *dev = &psys->adev->auxdev.dev; #endif int old_ppg_state = kppg->state; /* * Respond kcmd when ppg is in stable state: * STARTED/RESUMED/RUNNING/SUSPENDED/STOPPED */ if (kppg->state == PPG_STATE_STARTED || kppg->state == PPG_STATE_RESUMED || kppg->state == PPG_STATE_RUNNING) { if (kcmd->state == KCMD_STATE_PPG_START) ipu_psys_kcmd_complete(kppg, kcmd, 0); else if (kcmd->state == KCMD_STATE_PPG_STOP) kppg->state = PPG_STATE_STOP; } else if (kppg->state == PPG_STATE_SUSPENDED) { if (kcmd->state == KCMD_STATE_PPG_START) ipu_psys_kcmd_complete(kppg, kcmd, 0); else if (kcmd->state == KCMD_STATE_PPG_STOP) /* * Record the previous state * because here need resume at first */ kppg->state |= PPG_STATE_STOP; else if (kcmd->state == KCMD_STATE_PPG_ENQUEUE) kppg->state = PPG_STATE_RESUME; } else if (kppg->state == PPG_STATE_STOPPED) { if (kcmd->state == KCMD_STATE_PPG_START) kppg->state = PPG_STATE_START; else if (kcmd->state == KCMD_STATE_PPG_STOP) ipu_psys_kcmd_complete(kppg, kcmd, 0); else if (kcmd->state == KCMD_STATE_PPG_ENQUEUE) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_err(&psys->adev->dev, "ppg %p stopped!\n", kppg); #else dev_err(dev, "ppg %p stopped!\n", kppg); #endif ipu_psys_kcmd_complete(kppg, kcmd, -EIO); } } if (old_ppg_state != kppg->state) #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_dbg(&psys->adev->dev, "s_change:%s: %p %d -> %d\n", #else dev_dbg(dev, "s_change:%s: %p %d -> %d\n", #endif __func__, kppg, old_ppg_state, kppg->state); } static void ipu_psys_scheduler_kcmd_set(struct ipu_psys *psys) { struct ipu_psys_kcmd *kcmd; struct ipu_psys_scheduler *sched; struct ipu_psys_ppg *kppg, *tmp; struct ipu_psys_fh *fh; list_for_each_entry(fh, &psys->fhs, list) { mutex_lock(&fh->mutex); sched = &fh->sched; if (list_empty(&sched->ppgs)) { mutex_unlock(&fh->mutex); continue; } list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { mutex_lock(&kppg->mutex); if (list_empty(&kppg->kcmds_new_list)) { mutex_unlock(&kppg->mutex); continue; }; kcmd = list_first_entry(&kppg->kcmds_new_list, struct ipu_psys_kcmd, list); ipu_psys_update_ppg_state_by_kcmd(psys, kppg, kcmd); mutex_unlock(&kppg->mutex); } mutex_unlock(&fh->mutex); } } static bool is_ready_to_enter_power_gating(struct ipu_psys *psys) { struct ipu_psys_scheduler *sched; struct ipu_psys_ppg *kppg, *tmp; struct ipu_psys_fh *fh; list_for_each_entry(fh, &psys->fhs, list) { mutex_lock(&fh->mutex); sched = &fh->sched; if (list_empty(&sched->ppgs)) { mutex_unlock(&fh->mutex); continue; } list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { mutex_lock(&kppg->mutex); if (!list_empty(&kppg->kcmds_new_list) || !list_empty(&kppg->kcmds_processing_list)) { mutex_unlock(&kppg->mutex); mutex_unlock(&fh->mutex); return false; } if (!(kppg->state == PPG_STATE_RUNNING || kppg->state == PPG_STATE_STOPPED || kppg->state == PPG_STATE_SUSPENDED)) { mutex_unlock(&kppg->mutex); mutex_unlock(&fh->mutex); return false; } mutex_unlock(&kppg->mutex); } mutex_unlock(&fh->mutex); } return true; } static bool has_pending_kcmd(struct ipu_psys *psys) { struct ipu_psys_scheduler *sched; struct ipu_psys_ppg *kppg, *tmp; struct ipu_psys_fh *fh; list_for_each_entry(fh, &psys->fhs, list) { mutex_lock(&fh->mutex); sched = &fh->sched; if (list_empty(&sched->ppgs)) { mutex_unlock(&fh->mutex); continue; } list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { mutex_lock(&kppg->mutex); if (!list_empty(&kppg->kcmds_new_list) || !list_empty(&kppg->kcmds_processing_list)) { mutex_unlock(&kppg->mutex); mutex_unlock(&fh->mutex); return true; } mutex_unlock(&kppg->mutex); } mutex_unlock(&fh->mutex); } return false; } static bool ipu_psys_scheduler_exit_power_gating(struct ipu_psys *psys) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0) struct device *dev = &psys->adev->auxdev.dev; #endif /* Assume power gating process can be aborted directly during START */ if (psys->power_gating == PSYS_POWER_GATED) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_dbg(&psys->adev->dev, "powergating: exit ---\n"); #else dev_dbg(dev, "powergating: exit ---\n"); #endif ipu_psys_exit_power_gating(psys); } psys->power_gating = PSYS_POWER_NORMAL; return false; } static bool ipu_psys_scheduler_enter_power_gating(struct ipu_psys *psys) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0) struct device *dev = &psys->adev->auxdev.dev; #endif struct ipu_psys_scheduler *sched; struct ipu_psys_ppg *kppg, *tmp; struct ipu_psys_fh *fh; if (!enable_power_gating) return false; if (psys->power_gating == PSYS_POWER_NORMAL && is_ready_to_enter_power_gating(psys)) { /* Enter power gating */ #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_dbg(&psys->adev->dev, "powergating: enter +++\n"); #else dev_dbg(dev, "powergating: enter +++\n"); #endif psys->power_gating = PSYS_POWER_GATING; } if (psys->power_gating != PSYS_POWER_GATING) return false; /* Suspend ppgs one by one */ list_for_each_entry(fh, &psys->fhs, list) { mutex_lock(&fh->mutex); sched = &fh->sched; if (list_empty(&sched->ppgs)) { mutex_unlock(&fh->mutex); continue; } list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { mutex_lock(&kppg->mutex); if (kppg->state == PPG_STATE_RUNNING) { kppg->state = PPG_STATE_SUSPEND; mutex_unlock(&kppg->mutex); mutex_unlock(&fh->mutex); return true; } if (kppg->state != PPG_STATE_SUSPENDED && kppg->state != PPG_STATE_STOPPED) { /* Can't enter power gating */ mutex_unlock(&kppg->mutex); mutex_unlock(&fh->mutex); /* Need re-run l-scheduler to suspend ppg? */ return (kppg->state & PPG_STATE_STOP || kppg->state == PPG_STATE_SUSPEND); } mutex_unlock(&kppg->mutex); } mutex_unlock(&fh->mutex); } psys->power_gating = PSYS_POWER_GATED; ipu_psys_enter_power_gating(psys); return false; } void ipu_psys_run_next(struct ipu_psys *psys) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0) struct device *dev = &psys->adev->auxdev.dev; #endif /* Wake up scheduler due to unfinished work */ bool need_trigger = false; /* Wait FW callback if there are stopping/suspending/running ppg */ bool wait_fw_finish = false; /* * Code below will crash if fhs is empty. Normally this * shouldn't happen. */ if (list_empty(&psys->fhs)) { WARN_ON(1); return; } /* Abort power gating process */ if (psys->power_gating != PSYS_POWER_NORMAL && has_pending_kcmd(psys)) need_trigger = ipu_psys_scheduler_exit_power_gating(psys); /* Handle kcmd and related ppg switch */ if (psys->power_gating == PSYS_POWER_NORMAL) { ipu_psys_scheduler_kcmd_set(psys); wait_fw_finish = ipu_psys_scheduler_ppg_halt(psys); need_trigger |= ipu_psys_scheduler_ppg_start(psys); need_trigger |= ipu_psys_scheduler_ppg_enqueue_bufset(psys); } if (!(need_trigger || wait_fw_finish)) { /* Nothing to do, enter power gating */ need_trigger = ipu_psys_scheduler_enter_power_gating(psys); if (psys->power_gating == PSYS_POWER_GATING) wait_fw_finish = ipu_psys_scheduler_ppg_halt(psys); } if (need_trigger && !wait_fw_finish) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_dbg(&psys->adev->dev, "scheduler: wake up\n"); #else dev_dbg(dev, "scheduler: wake up\n"); #endif atomic_set(&psys->wakeup_count, 1); wake_up_interruptible(&psys->sched_cmd_wq); } } ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu6/psys/ipu6-platform-resources.h000066400000000000000000000151011471702545500321010ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2018 - 2024 Intel Corporation */ #ifndef IPU6_PLATFORM_RESOURCES_H #define IPU6_PLATFORM_RESOURCES_H #include #include #include "ipu-platform-resources.h" #define IPU6_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_EXT_STRUCT 0 enum { IPU6_FW_PSYS_CMD_QUEUE_COMMAND_ID = 0, IPU6_FW_PSYS_CMD_QUEUE_DEVICE_ID, IPU6_FW_PSYS_CMD_QUEUE_PPG0_COMMAND_ID, IPU6_FW_PSYS_CMD_QUEUE_PPG1_COMMAND_ID, IPU6_FW_PSYS_CMD_QUEUE_PPG2_COMMAND_ID, IPU6_FW_PSYS_CMD_QUEUE_PPG3_COMMAND_ID, IPU6_FW_PSYS_CMD_QUEUE_PPG4_COMMAND_ID, IPU6_FW_PSYS_CMD_QUEUE_PPG5_COMMAND_ID, IPU6_FW_PSYS_CMD_QUEUE_PPG6_COMMAND_ID, IPU6_FW_PSYS_CMD_QUEUE_PPG7_COMMAND_ID, IPU6_FW_PSYS_CMD_QUEUE_PPG8_COMMAND_ID, IPU6_FW_PSYS_CMD_QUEUE_PPG9_COMMAND_ID, IPU6_FW_PSYS_CMD_QUEUE_PPG10_COMMAND_ID, IPU6_FW_PSYS_CMD_QUEUE_PPG11_COMMAND_ID, IPU6_FW_PSYS_CMD_QUEUE_PPG12_COMMAND_ID, IPU6_FW_PSYS_CMD_QUEUE_PPG13_COMMAND_ID, IPU6_FW_PSYS_CMD_QUEUE_PPG14_COMMAND_ID, IPU6_FW_PSYS_CMD_QUEUE_PPG15_COMMAND_ID, IPU6_FW_PSYS_CMD_QUEUE_PPG16_COMMAND_ID, IPU6_FW_PSYS_CMD_QUEUE_PPG17_COMMAND_ID, IPU6_FW_PSYS_CMD_QUEUE_PPG18_COMMAND_ID, IPU6_FW_PSYS_CMD_QUEUE_PPG19_COMMAND_ID, IPU6_FW_PSYS_CMD_QUEUE_PPG20_COMMAND_ID, IPU6_FW_PSYS_CMD_QUEUE_PPG21_COMMAND_ID, IPU6_FW_PSYS_CMD_QUEUE_PPG22_COMMAND_ID, IPU6_FW_PSYS_CMD_QUEUE_PPG23_COMMAND_ID, IPU6_FW_PSYS_CMD_QUEUE_PPG24_COMMAND_ID, IPU6_FW_PSYS_CMD_QUEUE_PPG25_COMMAND_ID, IPU6_FW_PSYS_CMD_QUEUE_PPG26_COMMAND_ID, IPU6_FW_PSYS_CMD_QUEUE_PPG27_COMMAND_ID, IPU6_FW_PSYS_CMD_QUEUE_PPG28_COMMAND_ID, IPU6_FW_PSYS_CMD_QUEUE_PPG29_COMMAND_ID, IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID }; enum { IPU6_FW_PSYS_TRANSFER_VMEM0_TYPE_ID = 0, IPU6_FW_PSYS_TRANSFER_VMEM1_TYPE_ID, IPU6_FW_PSYS_LB_VMEM_TYPE_ID, IPU6_FW_PSYS_DMEM_TYPE_ID, IPU6_FW_PSYS_VMEM_TYPE_ID, IPU6_FW_PSYS_BAMEM_TYPE_ID, IPU6_FW_PSYS_PMEM_TYPE_ID, IPU6_FW_PSYS_N_MEM_TYPE_ID }; enum ipu6_mem_id { IPU6_FW_PSYS_VMEM0_ID = 0, /* ISP0 VMEM */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, /* TRANSFER VMEM 0 */ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, /* TRANSFER VMEM 1 */ IPU6_FW_PSYS_LB_VMEM_ID, /* LB VMEM */ IPU6_FW_PSYS_BAMEM0_ID, /* ISP0 BAMEM */ IPU6_FW_PSYS_DMEM0_ID, /* SPC0 Dmem */ IPU6_FW_PSYS_DMEM1_ID, /* SPP0 Dmem */ IPU6_FW_PSYS_DMEM2_ID, /* SPP1 Dmem */ IPU6_FW_PSYS_DMEM3_ID, /* ISP0 Dmem */ IPU6_FW_PSYS_PMEM0_ID, /* ISP0 PMEM */ IPU6_FW_PSYS_N_MEM_ID }; enum { IPU6_FW_PSYS_DEV_CHN_DMA_EXT0_ID = 0, IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_READ_ID, IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_ID, IPU6_FW_PSYS_DEV_CHN_DMA_INTERNAL_ID, IPU6_FW_PSYS_DEV_CHN_DMA_ISA_ID, IPU6_FW_PSYS_N_DEV_CHN_ID }; enum { IPU6_FW_PSYS_SP_CTRL_TYPE_ID = 0, IPU6_FW_PSYS_SP_SERVER_TYPE_ID, IPU6_FW_PSYS_VP_TYPE_ID, IPU6_FW_PSYS_ACC_PSA_TYPE_ID, IPU6_FW_PSYS_ACC_ISA_TYPE_ID, IPU6_FW_PSYS_ACC_OSA_TYPE_ID, IPU6_FW_PSYS_GDC_TYPE_ID, IPU6_FW_PSYS_TNR_TYPE_ID, IPU6_FW_PSYS_N_CELL_TYPE_ID }; enum { IPU6_FW_PSYS_SP0_ID = 0, IPU6_FW_PSYS_VP0_ID, IPU6_FW_PSYS_PSA_ACC_BNLM_ID, IPU6_FW_PSYS_PSA_ACC_DM_ID, IPU6_FW_PSYS_PSA_ACC_ACM_ID, IPU6_FW_PSYS_PSA_ACC_GTC_YUV1_ID, IPU6_FW_PSYS_BB_ACC_OFS_PIN_MAIN_ID, IPU6_FW_PSYS_BB_ACC_OFS_PIN_DISPLAY_ID, IPU6_FW_PSYS_BB_ACC_OFS_PIN_PP_ID, IPU6_FW_PSYS_PSA_ACC_GAMMASTAR_ID, IPU6_FW_PSYS_PSA_ACC_GLTM_ID, IPU6_FW_PSYS_PSA_ACC_XNR_ID, IPU6_FW_PSYS_PSA_VCSC_ID, /* VCSC */ IPU6_FW_PSYS_ISA_ICA_ID, IPU6_FW_PSYS_ISA_LSC_ID, IPU6_FW_PSYS_ISA_DPC_ID, IPU6_FW_PSYS_ISA_SIS_A_ID, IPU6_FW_PSYS_ISA_SIS_B_ID, IPU6_FW_PSYS_ISA_B2B_ID, IPU6_FW_PSYS_ISA_B2R_R2I_SIE_ID, IPU6_FW_PSYS_ISA_R2I_DS_A_ID, IPU6_FW_PSYS_ISA_R2I_DS_B_ID, IPU6_FW_PSYS_ISA_AWB_ID, IPU6_FW_PSYS_ISA_AE_ID, IPU6_FW_PSYS_ISA_AF_ID, IPU6_FW_PSYS_ISA_DOL_ID, IPU6_FW_PSYS_ISA_ICA_MEDIUM_ID, IPU6_FW_PSYS_ISA_X2B_MD_ID, IPU6_FW_PSYS_ISA_X2B_SVE_RGBIR_ID, IPU6_FW_PSYS_ISA_PAF_ID, IPU6_FW_PSYS_BB_ACC_GDC0_ID, IPU6_FW_PSYS_BB_ACC_TNR_ID, IPU6_FW_PSYS_N_CELL_ID }; enum { IPU6_FW_PSYS_DEV_DFM_BB_FULL_PORT_ID = 0, IPU6_FW_PSYS_DEV_DFM_BB_EMPTY_PORT_ID, IPU6_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID, IPU6_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID, IPU6_FW_PSYS_DEV_DFM_LB_FULL_PORT_ID, IPU6_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID, }; /* Excluding PMEM */ #define IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID (IPU6_FW_PSYS_N_MEM_TYPE_ID - 1) #define IPU6_FW_PSYS_N_DEV_DFM_ID \ (IPU6_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID + 1) #define IPU6_FW_PSYS_VMEM0_MAX_SIZE 0x0800 /* Transfer VMEM0 words, ref HAS Transfer*/ #define IPU6_FW_PSYS_TRANSFER_VMEM0_MAX_SIZE 0x0800 /* Transfer VMEM1 words, ref HAS Transfer*/ #define IPU6_FW_PSYS_TRANSFER_VMEM1_MAX_SIZE 0x0800 #define IPU6_FW_PSYS_LB_VMEM_MAX_SIZE 0x0400 /* LB VMEM words */ #define IPU6_FW_PSYS_BAMEM0_MAX_SIZE 0x0800 #define IPU6_FW_PSYS_DMEM0_MAX_SIZE 0x4000 #define IPU6_FW_PSYS_DMEM1_MAX_SIZE 0x1000 #define IPU6_FW_PSYS_DMEM2_MAX_SIZE 0x1000 #define IPU6_FW_PSYS_DMEM3_MAX_SIZE 0x1000 #define IPU6_FW_PSYS_PMEM0_MAX_SIZE 0x0500 #define IPU6_FW_PSYS_DEV_CHN_DMA_EXT0_MAX_SIZE 30 #define IPU6_FW_PSYS_DEV_CHN_GDC_MAX_SIZE 0 #define IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_READ_MAX_SIZE 30 #define IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE 43 #define IPU6_FW_PSYS_DEV_CHN_DMA_INTERNAL_MAX_SIZE 8 #define IPU6_FW_PSYS_DEV_CHN_DMA_IPFD_MAX_SIZE 0 #define IPU6_FW_PSYS_DEV_CHN_DMA_ISA_MAX_SIZE 2 #define IPU6_FW_PSYS_DEV_DFM_BB_FULL_PORT_ID_MAX_SIZE 32 #define IPU6_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID_MAX_SIZE 32 #define IPU6_FW_PSYS_DEV_DFM_LB_FULL_PORT_ID_MAX_SIZE 32 #define IPU6_FW_PSYS_DEV_DFM_BB_EMPTY_PORT_ID_MAX_SIZE 32 #define IPU6_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID_MAX_SIZE 32 #define IPU6_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID_MAX_SIZE 32 struct ipu6_fw_psys_program_manifest_ext { u32 dfm_port_bitmap[IPU6_FW_PSYS_N_DEV_DFM_ID]; u32 dfm_active_port_bitmap[IPU6_FW_PSYS_N_DEV_DFM_ID]; u16 ext_mem_size[IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID]; u16 ext_mem_offset[IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID]; u16 dev_chn_size[IPU6_FW_PSYS_N_DEV_CHN_ID]; u16 dev_chn_offset[IPU6_FW_PSYS_N_DEV_CHN_ID]; u8 is_dfm_relocatable[IPU6_FW_PSYS_N_DEV_DFM_ID]; u8 dec_resources_input[IPU_FW_PSYS_MAX_INPUT_DEC_RESOURCES]; u8 dec_resources_input_terminal[IPU_FW_PSYS_MAX_INPUT_DEC_RESOURCES]; u8 dec_resources_output[IPU_FW_PSYS_MAX_OUTPUT_DEC_RESOURCES]; u8 dec_resources_output_terminal[IPU_FW_PSYS_MAX_OUTPUT_DEC_RESOURCES]; u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_PROGRAM_MANIFEST_EXT]; }; struct ipu6_fw_psys_process_ext { u32 dfm_port_bitmap[IPU6_FW_PSYS_N_DEV_DFM_ID]; u32 dfm_active_port_bitmap[IPU6_FW_PSYS_N_DEV_DFM_ID]; u16 ext_mem_offset[IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID]; u16 dev_chn_offset[IPU6_FW_PSYS_N_DEV_CHN_ID]; u8 ext_mem_id[IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID]; }; #endif /* IPU6_PLATFORM_RESOURCES_H */ ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu6/psys/ipu6-ppg.c000066400000000000000000000562451471702545500270440ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2020 - 2024 Intel Corporation #include #include #include #include #include "ipu6-ppg.h" static bool enable_suspend_resume; module_param(enable_suspend_resume, bool, 0664); MODULE_PARM_DESC(enable_suspend_resume, "enable fw ppg suspend/resume api"); static struct ipu_psys_kcmd * ipu_psys_ppg_get_kcmd(struct ipu_psys_ppg *kppg, enum ipu_psys_cmd_state state) { struct ipu_psys_kcmd *kcmd; if (list_empty(&kppg->kcmds_new_list)) return NULL; list_for_each_entry(kcmd, &kppg->kcmds_new_list, list) { if (kcmd->state == state) return kcmd; } return NULL; } struct ipu_psys_kcmd *ipu_psys_ppg_get_stop_kcmd(struct ipu_psys_ppg *kppg) { struct ipu_psys_kcmd *kcmd; WARN(!mutex_is_locked(&kppg->mutex), "ppg locking error"); if (list_empty(&kppg->kcmds_processing_list)) return NULL; list_for_each_entry(kcmd, &kppg->kcmds_processing_list, list) { if (kcmd->state == KCMD_STATE_PPG_STOP) return kcmd; } return NULL; } static struct ipu_psys_buffer_set * __get_buf_set(struct ipu_psys_fh *fh, size_t buf_set_size) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0) struct device *dev = &fh->psys->adev->auxdev.dev; #endif struct ipu_psys_buffer_set *kbuf_set; struct ipu_psys_scheduler *sched = &fh->sched; mutex_lock(&sched->bs_mutex); list_for_each_entry(kbuf_set, &sched->buf_sets, list) { if (!kbuf_set->buf_set_size && kbuf_set->size >= buf_set_size) { kbuf_set->buf_set_size = buf_set_size; mutex_unlock(&sched->bs_mutex); return kbuf_set; } } mutex_unlock(&sched->bs_mutex); /* no suitable buffer available, allocate new one */ kbuf_set = kzalloc(sizeof(*kbuf_set), GFP_KERNEL); if (!kbuf_set) return NULL; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) kbuf_set->kaddr = dma_alloc_attrs(&fh->psys->adev->dev, buf_set_size, &kbuf_set->dma_addr, GFP_KERNEL, 0); #else kbuf_set->kaddr = dma_alloc_attrs(dev, buf_set_size, &kbuf_set->dma_addr, GFP_KERNEL, 0); #endif if (!kbuf_set->kaddr) { kfree(kbuf_set); return NULL; } kbuf_set->buf_set_size = buf_set_size; kbuf_set->size = buf_set_size; mutex_lock(&sched->bs_mutex); list_add(&kbuf_set->list, &sched->buf_sets); mutex_unlock(&sched->bs_mutex); return kbuf_set; } static struct ipu_psys_buffer_set * ipu_psys_create_buffer_set(struct ipu_psys_kcmd *kcmd, struct ipu_psys_ppg *kppg) { struct ipu_psys_fh *fh = kcmd->fh; struct ipu_psys *psys = fh->psys; #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0) struct device *dev = &psys->adev->auxdev.dev; #endif struct ipu_psys_buffer_set *kbuf_set; size_t buf_set_size; u32 *keb; buf_set_size = ipu_fw_psys_ppg_get_buffer_set_size(kcmd); kbuf_set = __get_buf_set(fh, buf_set_size); if (!kbuf_set) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_err(&psys->adev->dev, "failed to create buffer set\n"); #else dev_err(dev, "failed to create buffer set\n"); #endif return NULL; } kbuf_set->buf_set = ipu_fw_psys_ppg_create_buffer_set(kcmd, kbuf_set->kaddr, 0); ipu_fw_psys_ppg_buffer_set_vaddress(kbuf_set->buf_set, kbuf_set->dma_addr); keb = kcmd->kernel_enable_bitmap; ipu_fw_psys_ppg_buffer_set_set_kernel_enable_bitmap(kbuf_set->buf_set, keb); return kbuf_set; } #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) int ipu_psys_ppg_get_bufset(struct ipu_psys_kcmd *kcmd, struct ipu_psys_ppg *kppg) { struct ipu_psys *psys = kppg->fh->psys; struct ipu_psys_buffer_set *kbuf_set; unsigned int i; int ret; kbuf_set = ipu_psys_create_buffer_set(kcmd, kppg); if (!kbuf_set) { ret = -EINVAL; goto error; } kcmd->kbuf_set = kbuf_set; kbuf_set->kcmd = kcmd; for (i = 0; i < kcmd->nbuffers; i++) { struct ipu_fw_psys_terminal *terminal; u32 buffer; terminal = ipu_fw_psys_pg_get_terminal(kcmd, i); if (!terminal) continue; buffer = (u32)kcmd->kbufs[i]->dma_addr + kcmd->buffers[i].data_offset; ret = ipu_fw_psys_ppg_set_buffer_set(kcmd, terminal, i, buffer); if (ret) { dev_err(&psys->adev->dev, "Unable to set bufset\n"); goto error; } } return 0; error: dev_err(&psys->adev->dev, "failed to get buffer set\n"); return ret; } void ipu_psys_ppg_complete(struct ipu_psys *psys, struct ipu_psys_ppg *kppg) { u8 queue_id; int old_ppg_state; if (!psys || !kppg) return; mutex_lock(&kppg->mutex); old_ppg_state = kppg->state; if (kppg->state == PPG_STATE_STOPPING) { struct ipu_psys_kcmd tmp_kcmd = { .kpg = kppg->kpg, }; kppg->state = PPG_STATE_STOPPED; ipu_psys_free_resources(&kppg->kpg->resource_alloc, &psys->resource_pool_running); queue_id = ipu_fw_psys_ppg_get_base_queue_id(&tmp_kcmd); ipu_psys_free_cmd_queue_resource(&psys->resource_pool_running, queue_id); pm_runtime_put(&psys->adev->dev); } else { if (kppg->state == PPG_STATE_SUSPENDING) { kppg->state = PPG_STATE_SUSPENDED; ipu_psys_free_resources(&kppg->kpg->resource_alloc, &psys->resource_pool_running); } else if (kppg->state == PPG_STATE_STARTED || kppg->state == PPG_STATE_RESUMED) { kppg->state = PPG_STATE_RUNNING; } /* Kick l-scheduler thread for FW callback, * also for checking if need to enter power gating */ atomic_set(&psys->wakeup_count, 1); wake_up_interruptible(&psys->sched_cmd_wq); } if (old_ppg_state != kppg->state) dev_dbg(&psys->adev->dev, "s_change:%s: %p %d -> %d\n", __func__, kppg, old_ppg_state, kppg->state); mutex_unlock(&kppg->mutex); } int ipu_psys_ppg_start(struct ipu_psys_ppg *kppg) { struct ipu_psys *psys = kppg->fh->psys; struct ipu_psys_kcmd *kcmd = ipu_psys_ppg_get_kcmd(kppg, KCMD_STATE_PPG_START); unsigned int i; int ret; if (!kcmd) { dev_err(&psys->adev->dev, "failed to find start kcmd!\n"); return -EINVAL; } dev_dbg(&psys->adev->dev, "start ppg id %d, addr 0x%p\n", ipu_fw_psys_pg_get_id(kcmd), kppg); kppg->state = PPG_STATE_STARTING; for (i = 0; i < kcmd->nbuffers; i++) { struct ipu_fw_psys_terminal *terminal; terminal = ipu_fw_psys_pg_get_terminal(kcmd, i); if (!terminal) continue; ret = ipu_fw_psys_terminal_set(terminal, i, kcmd, 0, kcmd->buffers[i].len); if (ret) { dev_err(&psys->adev->dev, "Unable to set terminal\n"); return ret; } } ret = ipu_fw_psys_pg_submit(kcmd); if (ret) { dev_err(&psys->adev->dev, "failed to submit kcmd!\n"); return ret; } ret = ipu_psys_allocate_resources(&psys->adev->dev, kcmd->kpg->pg, kcmd->pg_manifest, &kcmd->kpg->resource_alloc, &psys->resource_pool_running); if (ret) { dev_err(&psys->adev->dev, "alloc resources failed!\n"); return ret; } ret = pm_runtime_get_sync(&psys->adev->dev); if (ret < 0) { dev_err(&psys->adev->dev, "failed to power on psys\n"); goto error; } ret = ipu_psys_kcmd_start(psys, kcmd); if (ret) { ipu_psys_kcmd_complete(kppg, kcmd, -EIO); goto error; } dev_dbg(&psys->adev->dev, "s_change:%s: %p %d -> %d\n", __func__, kppg, kppg->state, PPG_STATE_STARTED); kppg->state = PPG_STATE_STARTED; ipu_psys_kcmd_complete(kppg, kcmd, 0); return 0; error: pm_runtime_put_noidle(&psys->adev->dev); ipu_psys_reset_process_cell(&psys->adev->dev, kcmd->kpg->pg, kcmd->pg_manifest, kcmd->kpg->pg->process_count); ipu_psys_free_resources(&kppg->kpg->resource_alloc, &psys->resource_pool_running); dev_err(&psys->adev->dev, "failed to start ppg\n"); return ret; } int ipu_psys_ppg_resume(struct ipu_psys_ppg *kppg) { struct ipu_psys *psys = kppg->fh->psys; struct ipu_psys_kcmd tmp_kcmd = { .kpg = kppg->kpg, .fh = kppg->fh, }; int ret; dev_dbg(&psys->adev->dev, "resume ppg id %d, addr 0x%p\n", ipu_fw_psys_pg_get_id(&tmp_kcmd), kppg); kppg->state = PPG_STATE_RESUMING; if (enable_suspend_resume) { ret = ipu_psys_allocate_resources(&psys->adev->dev, kppg->kpg->pg, kppg->manifest, &kppg->kpg->resource_alloc, &psys->resource_pool_running); if (ret) { dev_err(&psys->adev->dev, "failed to allocate res\n"); return -EIO; } ret = ipu_fw_psys_ppg_resume(&tmp_kcmd); if (ret) { dev_err(&psys->adev->dev, "failed to resume ppg\n"); goto error; } } else { kppg->kpg->pg->state = IPU_FW_PSYS_PROCESS_GROUP_READY; ret = ipu_fw_psys_pg_submit(&tmp_kcmd); if (ret) { dev_err(&psys->adev->dev, "failed to submit kcmd!\n"); return ret; } ret = ipu_psys_allocate_resources(&psys->adev->dev, kppg->kpg->pg, kppg->manifest, &kppg->kpg->resource_alloc, &psys->resource_pool_running); if (ret) { dev_err(&psys->adev->dev, "failed to allocate res\n"); return ret; } ret = ipu_psys_kcmd_start(psys, &tmp_kcmd); if (ret) { dev_err(&psys->adev->dev, "failed to start kcmd!\n"); goto error; } } dev_dbg(&psys->adev->dev, "s_change:%s: %p %d -> %d\n", __func__, kppg, kppg->state, PPG_STATE_RESUMED); kppg->state = PPG_STATE_RESUMED; return 0; error: ipu_psys_reset_process_cell(&psys->adev->dev, kppg->kpg->pg, kppg->manifest, kppg->kpg->pg->process_count); ipu_psys_free_resources(&kppg->kpg->resource_alloc, &psys->resource_pool_running); return ret; } int ipu_psys_ppg_stop(struct ipu_psys_ppg *kppg) { struct ipu_psys_kcmd *kcmd = ipu_psys_ppg_get_kcmd(kppg, KCMD_STATE_PPG_STOP); struct ipu_psys *psys = kppg->fh->psys; struct ipu_psys_kcmd kcmd_temp; int ppg_id, ret = 0; if (kcmd) { list_move_tail(&kcmd->list, &kppg->kcmds_processing_list); } else { dev_dbg(&psys->adev->dev, "Exceptional stop happened!\n"); kcmd_temp.kpg = kppg->kpg; kcmd_temp.fh = kppg->fh; kcmd = &kcmd_temp; /* delete kppg in stop list to avoid this ppg resuming */ ipu_psys_scheduler_remove_kppg(kppg, SCHED_STOP_LIST); } ppg_id = ipu_fw_psys_pg_get_id(kcmd); dev_dbg(&psys->adev->dev, "stop ppg(%d, addr 0x%p)\n", ppg_id, kppg); if (kppg->state & PPG_STATE_SUSPENDED) { if (enable_suspend_resume) { dev_dbg(&psys->adev->dev, "need resume before stop!\n"); kcmd_temp.kpg = kppg->kpg; kcmd_temp.fh = kppg->fh; ret = ipu_fw_psys_ppg_resume(&kcmd_temp); if (ret) dev_err(&psys->adev->dev, "ppg(%d) failed to resume\n", ppg_id); } else if (kcmd != &kcmd_temp) { ipu_psys_free_cmd_queue_resource( &psys->resource_pool_running, ipu_fw_psys_ppg_get_base_queue_id(kcmd)); ipu_psys_kcmd_complete(kppg, kcmd, 0); dev_dbg(&psys->adev->dev, "s_change:%s %p %d -> %d\n", __func__, kppg, kppg->state, PPG_STATE_STOPPED); pm_runtime_put(&psys->adev->dev); kppg->state = PPG_STATE_STOPPED; return 0; } else { return 0; } } dev_dbg(&psys->adev->dev, "s_change:%s %p %d -> %d\n", __func__, kppg, kppg->state, PPG_STATE_STOPPING); kppg->state = PPG_STATE_STOPPING; ret = ipu_fw_psys_pg_abort(kcmd); if (ret) dev_err(&psys->adev->dev, "ppg(%d) failed to abort\n", ppg_id); return ret; } int ipu_psys_ppg_suspend(struct ipu_psys_ppg *kppg) { struct ipu_psys *psys = kppg->fh->psys; struct ipu_psys_kcmd tmp_kcmd = { .kpg = kppg->kpg, .fh = kppg->fh, }; int ppg_id = ipu_fw_psys_pg_get_id(&tmp_kcmd); int ret = 0; dev_dbg(&psys->adev->dev, "suspend ppg(%d, addr 0x%p)\n", ppg_id, kppg); dev_dbg(&psys->adev->dev, "s_change:%s %p %d -> %d\n", __func__, kppg, kppg->state, PPG_STATE_SUSPENDING); kppg->state = PPG_STATE_SUSPENDING; if (enable_suspend_resume) ret = ipu_fw_psys_ppg_suspend(&tmp_kcmd); else ret = ipu_fw_psys_pg_abort(&tmp_kcmd); if (ret) dev_err(&psys->adev->dev, "failed to %s ppg(%d)\n", enable_suspend_resume ? "suspend" : "stop", ret); return ret; } #else int ipu_psys_ppg_get_bufset(struct ipu_psys_kcmd *kcmd, struct ipu_psys_ppg *kppg) { struct ipu_psys *psys = kppg->fh->psys; struct device *dev = &psys->adev->auxdev.dev; struct ipu_psys_buffer_set *kbuf_set; unsigned int i; int ret; kbuf_set = ipu_psys_create_buffer_set(kcmd, kppg); if (!kbuf_set) { ret = -EINVAL; goto error; } kcmd->kbuf_set = kbuf_set; kbuf_set->kcmd = kcmd; for (i = 0; i < kcmd->nbuffers; i++) { struct ipu_fw_psys_terminal *terminal; u32 buffer; terminal = ipu_fw_psys_pg_get_terminal(kcmd, i); if (!terminal) continue; buffer = (u32)kcmd->kbufs[i]->dma_addr + kcmd->buffers[i].data_offset; ret = ipu_fw_psys_ppg_set_buffer_set(kcmd, terminal, i, buffer); if (ret) { dev_err(dev, "Unable to set bufset\n"); goto error; } } return 0; error: dev_err(dev, "failed to get buffer set\n"); return ret; } void ipu_psys_ppg_complete(struct ipu_psys *psys, struct ipu_psys_ppg *kppg) { struct device *dev; u8 queue_id; int old_ppg_state; if (!psys || !kppg) return; dev = &psys->adev->auxdev.dev; mutex_lock(&kppg->mutex); old_ppg_state = kppg->state; if (kppg->state == PPG_STATE_STOPPING) { struct ipu_psys_kcmd tmp_kcmd = { .kpg = kppg->kpg, }; kppg->state = PPG_STATE_STOPPED; ipu_psys_free_resources(&kppg->kpg->resource_alloc, &psys->resource_pool_running); queue_id = ipu_fw_psys_ppg_get_base_queue_id(&tmp_kcmd); ipu_psys_free_cmd_queue_resource(&psys->resource_pool_running, queue_id); pm_runtime_put(dev); } else { if (kppg->state == PPG_STATE_SUSPENDING) { kppg->state = PPG_STATE_SUSPENDED; ipu_psys_free_resources(&kppg->kpg->resource_alloc, &psys->resource_pool_running); } else if (kppg->state == PPG_STATE_STARTED || kppg->state == PPG_STATE_RESUMED) { kppg->state = PPG_STATE_RUNNING; } /* Kick l-scheduler thread for FW callback, * also for checking if need to enter power gating */ atomic_set(&psys->wakeup_count, 1); wake_up_interruptible(&psys->sched_cmd_wq); } if (old_ppg_state != kppg->state) dev_dbg(dev, "s_change:%s: %p %d -> %d\n", __func__, kppg, old_ppg_state, kppg->state); mutex_unlock(&kppg->mutex); } int ipu_psys_ppg_start(struct ipu_psys_ppg *kppg) { struct ipu_psys *psys = kppg->fh->psys; struct device *dev = &psys->adev->auxdev.dev; struct ipu_psys_kcmd *kcmd = ipu_psys_ppg_get_kcmd(kppg, KCMD_STATE_PPG_START); unsigned int i; int ret; if (!kcmd) { dev_err(dev, "failed to find start kcmd!\n"); return -EINVAL; } dev_dbg(dev, "start ppg id %d, addr 0x%p\n", ipu_fw_psys_pg_get_id(kcmd), kppg); kppg->state = PPG_STATE_STARTING; for (i = 0; i < kcmd->nbuffers; i++) { struct ipu_fw_psys_terminal *terminal; terminal = ipu_fw_psys_pg_get_terminal(kcmd, i); if (!terminal) continue; ret = ipu_fw_psys_terminal_set(terminal, i, kcmd, 0, kcmd->buffers[i].len); if (ret) { dev_err(dev, "Unable to set terminal\n"); return ret; } } ret = ipu_fw_psys_pg_submit(kcmd); if (ret) { dev_err(dev, "failed to submit kcmd!\n"); return ret; } ret = ipu_psys_allocate_resources(dev, kcmd->kpg->pg, kcmd->pg_manifest, &kcmd->kpg->resource_alloc, &psys->resource_pool_running); if (ret) { dev_err(dev, "alloc resources failed!\n"); return ret; } ret = pm_runtime_get_sync(dev); if (ret < 0) { dev_err(dev, "failed to power on psys\n"); goto error; } ret = ipu_psys_kcmd_start(psys, kcmd); if (ret) { ipu_psys_kcmd_complete(kppg, kcmd, -EIO); goto error; } dev_dbg(dev, "s_change:%s: %p %d -> %d\n", __func__, kppg, kppg->state, PPG_STATE_STARTED); kppg->state = PPG_STATE_STARTED; ipu_psys_kcmd_complete(kppg, kcmd, 0); return 0; error: pm_runtime_put_noidle(dev); ipu_psys_reset_process_cell(dev, kcmd->kpg->pg, kcmd->pg_manifest, kcmd->kpg->pg->process_count); ipu_psys_free_resources(&kppg->kpg->resource_alloc, &psys->resource_pool_running); dev_err(dev, "failed to start ppg\n"); return ret; } int ipu_psys_ppg_resume(struct ipu_psys_ppg *kppg) { struct ipu_psys *psys = kppg->fh->psys; struct device *dev = &psys->adev->auxdev.dev; struct ipu_psys_kcmd tmp_kcmd = { .kpg = kppg->kpg, .fh = kppg->fh, }; int ret; dev_dbg(dev, "resume ppg id %d, addr 0x%p\n", ipu_fw_psys_pg_get_id(&tmp_kcmd), kppg); kppg->state = PPG_STATE_RESUMING; if (enable_suspend_resume) { ret = ipu_psys_allocate_resources(dev, kppg->kpg->pg, kppg->manifest, &kppg->kpg->resource_alloc, &psys->resource_pool_running); if (ret) { dev_err(dev, "failed to allocate res\n"); return -EIO; } ret = ipu_fw_psys_ppg_resume(&tmp_kcmd); if (ret) { dev_err(dev, "failed to resume ppg\n"); goto error; } } else { kppg->kpg->pg->state = IPU_FW_PSYS_PROCESS_GROUP_READY; ret = ipu_fw_psys_pg_submit(&tmp_kcmd); if (ret) { dev_err(dev, "failed to submit kcmd!\n"); return ret; } ret = ipu_psys_allocate_resources(dev, kppg->kpg->pg, kppg->manifest, &kppg->kpg->resource_alloc, &psys->resource_pool_running); if (ret) { dev_err(dev, "failed to allocate res\n"); return ret; } ret = ipu_psys_kcmd_start(psys, &tmp_kcmd); if (ret) { dev_err(dev, "failed to start kcmd!\n"); goto error; } } dev_dbg(dev, "s_change:%s: %p %d -> %d\n", __func__, kppg, kppg->state, PPG_STATE_RESUMED); kppg->state = PPG_STATE_RESUMED; return 0; error: ipu_psys_reset_process_cell(dev, kppg->kpg->pg, kppg->manifest, kppg->kpg->pg->process_count); ipu_psys_free_resources(&kppg->kpg->resource_alloc, &psys->resource_pool_running); return ret; } int ipu_psys_ppg_stop(struct ipu_psys_ppg *kppg) { struct ipu_psys_kcmd *kcmd = ipu_psys_ppg_get_kcmd(kppg, KCMD_STATE_PPG_STOP); struct ipu_psys *psys = kppg->fh->psys; struct device *dev = &psys->adev->auxdev.dev; struct ipu_psys_kcmd kcmd_temp; int ppg_id, ret = 0; if (kcmd) { list_move_tail(&kcmd->list, &kppg->kcmds_processing_list); } else { dev_dbg(dev, "Exceptional stop happened!\n"); kcmd_temp.kpg = kppg->kpg; kcmd_temp.fh = kppg->fh; kcmd = &kcmd_temp; /* delete kppg in stop list to avoid this ppg resuming */ ipu_psys_scheduler_remove_kppg(kppg, SCHED_STOP_LIST); } ppg_id = ipu_fw_psys_pg_get_id(kcmd); dev_dbg(dev, "stop ppg(%d, addr 0x%p)\n", ppg_id, kppg); if (kppg->state & PPG_STATE_SUSPENDED) { if (enable_suspend_resume) { dev_dbg(dev, "need resume before stop!\n"); kcmd_temp.kpg = kppg->kpg; kcmd_temp.fh = kppg->fh; ret = ipu_fw_psys_ppg_resume(&kcmd_temp); if (ret) dev_err(dev, "ppg(%d) failed to resume\n", ppg_id); } else if (kcmd != &kcmd_temp) { u8 queue_id = ipu_fw_psys_ppg_get_base_queue_id(kcmd); ipu_psys_free_cmd_queue_resource(&psys->resource_pool_running, queue_id); ipu_psys_kcmd_complete(kppg, kcmd, 0); dev_dbg(dev, "s_change:%s %p %d -> %d\n", __func__, kppg, kppg->state, PPG_STATE_STOPPED); pm_runtime_put(dev); kppg->state = PPG_STATE_STOPPED; return 0; } else { return 0; } } dev_dbg(dev, "s_change:%s %p %d -> %d\n", __func__, kppg, kppg->state, PPG_STATE_STOPPING); kppg->state = PPG_STATE_STOPPING; ret = ipu_fw_psys_pg_abort(kcmd); if (ret) dev_err(dev, "ppg(%d) failed to abort\n", ppg_id); return ret; } int ipu_psys_ppg_suspend(struct ipu_psys_ppg *kppg) { struct ipu_psys *psys = kppg->fh->psys; struct device *dev = &psys->adev->auxdev.dev; struct ipu_psys_kcmd tmp_kcmd = { .kpg = kppg->kpg, .fh = kppg->fh, }; int ppg_id = ipu_fw_psys_pg_get_id(&tmp_kcmd); int ret = 0; dev_dbg(dev, "suspend ppg(%d, addr 0x%p)\n", ppg_id, kppg); dev_dbg(dev, "s_change:%s %p %d -> %d\n", __func__, kppg, kppg->state, PPG_STATE_SUSPENDING); kppg->state = PPG_STATE_SUSPENDING; if (enable_suspend_resume) ret = ipu_fw_psys_ppg_suspend(&tmp_kcmd); else ret = ipu_fw_psys_pg_abort(&tmp_kcmd); if (ret) dev_err(dev, "failed to %s ppg(%d)\n", enable_suspend_resume ? "suspend" : "stop", ret); return ret; } #endif static bool ipu_psys_ppg_is_bufset_existing(struct ipu_psys_ppg *kppg) { return !list_empty(&kppg->kcmds_new_list); } /* * ipu_psys_ppg_enqueue_bufsets - enqueue buffer sets to firmware * Sometimes, if the ppg is at suspended state, this function will return true * to reschedule and let the resume command scheduled before the buffer sets * enqueuing. */ bool ipu_psys_ppg_enqueue_bufsets(struct ipu_psys_ppg *kppg) { struct ipu_psys_kcmd *kcmd, *kcmd0; struct ipu_psys *psys = kppg->fh->psys; #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0) struct device *dev = &psys->adev->auxdev.dev; #endif bool need_resume = false; mutex_lock(&kppg->mutex); if (kppg->state & (PPG_STATE_STARTED | PPG_STATE_RESUMED | PPG_STATE_RUNNING)) { if (ipu_psys_ppg_is_bufset_existing(kppg)) { list_for_each_entry_safe(kcmd, kcmd0, &kppg->kcmds_new_list, list) { int ret; if (kcmd->state != KCMD_STATE_PPG_ENQUEUE) { need_resume = true; break; } ret = ipu_fw_psys_ppg_enqueue_bufs(kcmd); if (ret) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_err(&psys->adev->dev, #else dev_err(dev, #endif "kppg 0x%p fail to qbufset %d", kppg, ret); break; } list_move_tail(&kcmd->list, &kppg->kcmds_processing_list); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_dbg(&psys->adev->dev, #else dev_dbg(dev, #endif "kppg %d %p queue kcmd 0x%p fh 0x%p\n", ipu_fw_psys_pg_get_id(kcmd), kppg, kcmd, kcmd->fh); } } } mutex_unlock(&kppg->mutex); return need_resume; } void ipu_psys_enter_power_gating(struct ipu_psys *psys) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0) struct device *dev = &psys->adev->auxdev.dev; #endif struct ipu_psys_scheduler *sched; struct ipu_psys_ppg *kppg, *tmp; struct ipu_psys_fh *fh; int ret = 0; list_for_each_entry(fh, &psys->fhs, list) { mutex_lock(&fh->mutex); sched = &fh->sched; if (list_empty(&sched->ppgs)) { mutex_unlock(&fh->mutex); continue; } list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { mutex_lock(&kppg->mutex); /* * Only for SUSPENDED kppgs, STOPPED kppgs has already * power down and new kppgs might come now. */ if (kppg->state != PPG_STATE_SUSPENDED) { mutex_unlock(&kppg->mutex); continue; } #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) ret = pm_runtime_put(&psys->adev->dev); if (ret < 0) { dev_err(&psys->adev->dev, "failed to power gating off\n"); pm_runtime_get_sync(&psys->adev->dev); } #else ret = pm_runtime_put(dev); if (ret < 0) { dev_err(dev, "failed to power gating off\n"); pm_runtime_get_sync(dev); } #endif mutex_unlock(&kppg->mutex); } mutex_unlock(&fh->mutex); } } void ipu_psys_exit_power_gating(struct ipu_psys *psys) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0) struct device *dev = &psys->adev->auxdev.dev; #endif struct ipu_psys_scheduler *sched; struct ipu_psys_ppg *kppg, *tmp; struct ipu_psys_fh *fh; int ret = 0; list_for_each_entry(fh, &psys->fhs, list) { mutex_lock(&fh->mutex); sched = &fh->sched; if (list_empty(&sched->ppgs)) { mutex_unlock(&fh->mutex); continue; } list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { mutex_lock(&kppg->mutex); /* Only for SUSPENDED kppgs */ if (kppg->state != PPG_STATE_SUSPENDED) { mutex_unlock(&kppg->mutex); continue; } #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) ret = pm_runtime_get_sync(&psys->adev->dev); if (ret < 0) { dev_err(&psys->adev->dev, "failed to power gating\n"); pm_runtime_put_noidle(&psys->adev->dev); } #else ret = pm_runtime_get_sync(dev); if (ret < 0) { dev_err(dev, "failed to power gating\n"); pm_runtime_put_noidle(dev); } #endif mutex_unlock(&kppg->mutex); } mutex_unlock(&fh->mutex); } } ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu6/psys/ipu6-ppg.h000066400000000000000000000023001471702545500270300ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2020 - 2024 Intel Corporation */ #ifndef IPU6_PPG_H #define IPU6_PPG_H #include "ipu-psys.h" /* starting from '2' in case of someone passes true or false */ enum SCHED_LIST { SCHED_START_LIST = 2, SCHED_STOP_LIST }; enum ipu_psys_power_gating_state { PSYS_POWER_NORMAL = 0, PSYS_POWER_GATING, PSYS_POWER_GATED }; int ipu_psys_ppg_get_bufset(struct ipu_psys_kcmd *kcmd, struct ipu_psys_ppg *kppg); struct ipu_psys_kcmd *ipu_psys_ppg_get_stop_kcmd(struct ipu_psys_ppg *kppg); void ipu_psys_scheduler_remove_kppg(struct ipu_psys_ppg *kppg, enum SCHED_LIST type); void ipu_psys_scheduler_add_kppg(struct ipu_psys_ppg *kppg, enum SCHED_LIST type); int ipu_psys_ppg_start(struct ipu_psys_ppg *kppg); int ipu_psys_ppg_resume(struct ipu_psys_ppg *kppg); int ipu_psys_ppg_stop(struct ipu_psys_ppg *kppg); int ipu_psys_ppg_suspend(struct ipu_psys_ppg *kppg); void ipu_psys_ppg_complete(struct ipu_psys *psys, struct ipu_psys_ppg *kppg); bool ipu_psys_ppg_enqueue_bufsets(struct ipu_psys_ppg *kppg); void ipu_psys_enter_power_gating(struct ipu_psys *psys); void ipu_psys_exit_power_gating(struct ipu_psys *psys); #endif /* IPU6_PPG_H */ ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu6/psys/ipu6-psys-gpc.c000066400000000000000000000116761471702545500300220ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2020 - 2024 Intel Corporation #ifdef CONFIG_DEBUG_FS #include #include #include "ipu-psys.h" #include "ipu-platform-regs.h" /* * GPC (Gerneral Performance Counters) */ #define IPU_PSYS_GPC_NUM 16 #ifndef CONFIG_PM #define pm_runtime_get_sync(d) 0 #define pm_runtime_put(d) 0 #endif struct ipu_psys_gpc { bool enable; unsigned int route; unsigned int source; unsigned int sense; unsigned int gpcindex; void *prit; }; struct ipu_psys_gpcs { bool gpc_enable; struct ipu_psys_gpc gpc[IPU_PSYS_GPC_NUM]; void *prit; }; static int ipu6_psys_gpc_global_enable_get(void *data, u64 *val) { struct ipu_psys_gpcs *psys_gpcs = data; struct ipu_psys *psys = psys_gpcs->prit; mutex_lock(&psys->mutex); *val = psys_gpcs->gpc_enable; mutex_unlock(&psys->mutex); return 0; } static int ipu6_psys_gpc_global_enable_set(void *data, u64 val) { struct ipu_psys_gpcs *psys_gpcs = data; struct ipu_psys *psys = psys_gpcs->prit; void __iomem *base; int idx, res; if (val != 0 && val != 1) return -EINVAL; if (!psys || !psys->pdata || !psys->pdata->base) return -EINVAL; mutex_lock(&psys->mutex); base = psys->pdata->base + IPU_GPC_BASE; res = pm_runtime_get_sync(&psys->adev->dev); if (res < 0) { pm_runtime_put(&psys->adev->dev); mutex_unlock(&psys->mutex); return res; } if (val == 0) { writel(0x0, base + IPU_GPREG_TRACE_TIMER_RST); writel(0x0, base + IPU_CDC_MMU_GPC_OVERALL_ENABLE); writel(0xffff, base + IPU_CDC_MMU_GPC_SOFT_RESET); psys_gpcs->gpc_enable = false; for (idx = 0; idx < IPU_PSYS_GPC_NUM; idx++) { psys_gpcs->gpc[idx].enable = 0; psys_gpcs->gpc[idx].sense = 0; psys_gpcs->gpc[idx].route = 0; psys_gpcs->gpc[idx].source = 0; } pm_runtime_put(&psys->adev->dev); } else { /* Set gpc reg and start all gpc here. * RST free running local timer. */ writel(0x0, base + IPU_GPREG_TRACE_TIMER_RST); writel(0x1, base + IPU_GPREG_TRACE_TIMER_RST); for (idx = 0; idx < IPU_PSYS_GPC_NUM; idx++) { /* Enable */ writel(psys_gpcs->gpc[idx].enable, base + IPU_CDC_MMU_GPC_ENABLE0 + 4 * idx); /* Setting (route/source/sense) */ writel((psys_gpcs->gpc[idx].sense << IPU_GPC_SENSE_OFFSET) + (psys_gpcs->gpc[idx].route << IPU_GPC_ROUTE_OFFSET) + (psys_gpcs->gpc[idx].source << IPU_GPC_SOURCE_OFFSET), base + IPU_CDC_MMU_GPC_CNT_SEL0 + 4 * idx); } /* Soft reset and Overall Enable. */ writel(0x0, base + IPU_CDC_MMU_GPC_OVERALL_ENABLE); writel(0xffff, base + IPU_CDC_MMU_GPC_SOFT_RESET); writel(0x1, base + IPU_CDC_MMU_GPC_OVERALL_ENABLE); psys_gpcs->gpc_enable = true; } mutex_unlock(&psys->mutex); return 0; } DEFINE_SIMPLE_ATTRIBUTE(psys_gpc_globe_enable_fops, ipu6_psys_gpc_global_enable_get, ipu6_psys_gpc_global_enable_set, "%llu\n"); static int ipu6_psys_gpc_count_get(void *data, u64 *val) { struct ipu_psys_gpc *psys_gpc = data; struct ipu_psys *psys = psys_gpc->prit; void __iomem *base; int res; if (!psys || !psys->pdata || !psys->pdata->base) return -EINVAL; mutex_lock(&psys->mutex); base = psys->pdata->base + IPU_GPC_BASE; res = pm_runtime_get_sync(&psys->adev->dev); if (res < 0) { pm_runtime_put(&psys->adev->dev); mutex_unlock(&psys->mutex); return res; } *val = readl(base + IPU_CDC_MMU_GPC_VALUE0 + 4 * psys_gpc->gpcindex); mutex_unlock(&psys->mutex); return 0; } DEFINE_SIMPLE_ATTRIBUTE(psys_gpc_count_fops, ipu6_psys_gpc_count_get, NULL, "%llu\n"); int ipu_psys_gpc_init_debugfs(struct ipu_psys *psys) { struct dentry *gpcdir; struct dentry *dir; struct dentry *file; int idx; char gpcname[10]; struct ipu_psys_gpcs *psys_gpcs; psys_gpcs = devm_kzalloc(&psys->dev, sizeof(*psys_gpcs), GFP_KERNEL); if (!psys_gpcs) return -ENOMEM; gpcdir = debugfs_create_dir("gpc", psys->debugfsdir); if (IS_ERR(gpcdir)) return -ENOMEM; psys_gpcs->prit = psys; file = debugfs_create_file("enable", 0600, gpcdir, psys_gpcs, &psys_gpc_globe_enable_fops); if (IS_ERR(file)) goto err; for (idx = 0; idx < IPU_PSYS_GPC_NUM; idx++) { sprintf(gpcname, "gpc%d", idx); dir = debugfs_create_dir(gpcname, gpcdir); if (IS_ERR(dir)) goto err; #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) file = debugfs_create_bool("enable", 0600, dir, &psys_gpcs->gpc[idx].enable); if (IS_ERR(file)) goto err; #else debugfs_create_bool("enable", 0600, dir, &psys_gpcs->gpc[idx].enable); #endif debugfs_create_u32("source", 0600, dir, &psys_gpcs->gpc[idx].source); debugfs_create_u32("route", 0600, dir, &psys_gpcs->gpc[idx].route); debugfs_create_u32("sense", 0600, dir, &psys_gpcs->gpc[idx].sense); psys_gpcs->gpc[idx].gpcindex = idx; psys_gpcs->gpc[idx].prit = psys; file = debugfs_create_file("count", 0400, dir, &psys_gpcs->gpc[idx], &psys_gpc_count_fops); if (IS_ERR(file)) goto err; } return 0; err: debugfs_remove_recursive(gpcdir); return -ENOMEM; } #endif ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu6/psys/ipu6-psys.c000066400000000000000000001022611471702545500272420ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2020 - 2024 Intel Corporation #include #include #include #include #include #include #include #include #include #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) #include #else #include #endif #include #include #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) #include "ipu.h" #include "ipu-psys.h" #include "ipu6-ppg.h" #include "ipu-platform-regs.h" #include "ipu-trace.h" #else #include "ipu6.h" #include "ipu-psys.h" #include "ipu6-ppg.h" #include "ipu6-platform-regs.h" #include "ipu6-platform-buttress-regs.h" #endif MODULE_IMPORT_NS(DMA_BUF); static bool early_pg_transfer; module_param(early_pg_transfer, bool, 0664); MODULE_PARM_DESC(early_pg_transfer, "Copy PGs back to user after resource allocation"); bool enable_power_gating = true; module_param(enable_power_gating, bool, 0664); MODULE_PARM_DESC(enable_power_gating, "enable power gating"); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) struct ipu_trace_block psys_trace_blocks[] = { { .offset = IPU_TRACE_REG_PS_TRACE_UNIT_BASE, .type = IPU_TRACE_BLOCK_TUN, }, { .offset = IPU_TRACE_REG_PS_SPC_EVQ_BASE, .type = IPU_TRACE_BLOCK_TM, }, { .offset = IPU_TRACE_REG_PS_SPP0_EVQ_BASE, .type = IPU_TRACE_BLOCK_TM, }, { .offset = IPU_TRACE_REG_PS_SPC_GPC_BASE, .type = IPU_TRACE_BLOCK_GPC, }, { .offset = IPU_TRACE_REG_PS_SPP0_GPC_BASE, .type = IPU_TRACE_BLOCK_GPC, }, { .offset = IPU_TRACE_REG_PS_MMU_GPC_BASE, .type = IPU_TRACE_BLOCK_GPC, }, { .offset = IPU_TRACE_REG_PS_GPREG_TRACE_TIMER_RST_N, .type = IPU_TRACE_TIMER_RST, }, { .type = IPU_TRACE_BLOCK_END, } }; #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) static void ipu6_set_sp_info_bits(void *base) { int i; writel(IPU_INFO_REQUEST_DESTINATION_IOSF, base + IPU_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER); for (i = 0; i < 4; i++) writel(IPU_INFO_REQUEST_DESTINATION_IOSF, base + IPU_REG_PSYS_INFO_SEG_CMEM_MASTER(i)); for (i = 0; i < 4; i++) writel(IPU_INFO_REQUEST_DESTINATION_IOSF, base + IPU_REG_PSYS_INFO_SEG_XMEM_MASTER(i)); } #else static void ipu6_set_sp_info_bits(void __iomem *base) { int i; writel(IPU6_INFO_REQUEST_DESTINATION_IOSF, base + IPU6_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER); for (i = 0; i < 4; i++) writel(IPU6_INFO_REQUEST_DESTINATION_IOSF, base + IPU_REG_PSYS_INFO_SEG_CMEM_MASTER(i)); for (i = 0; i < 4; i++) writel(IPU6_INFO_REQUEST_DESTINATION_IOSF, base + IPU_REG_PSYS_INFO_SEG_XMEM_MASTER(i)); } #endif #define PSYS_SUBDOMAINS_STATUS_WAIT_COUNT 1000 void ipu_psys_subdomains_power(struct ipu_psys *psys, bool on) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0) struct device *dev = &psys->adev->auxdev.dev; #endif unsigned int i; u32 val; /* power domain req */ #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_dbg(&psys->adev->dev, "power %s psys sub-domains", on ? "UP" : "DOWN"); #else dev_dbg(dev, "power %s psys sub-domains", on ? "UP" : "DOWN"); #endif if (on) writel(IPU_PSYS_SUBDOMAINS_POWER_MASK, psys->adev->isp->base + IPU_PSYS_SUBDOMAINS_POWER_REQ); else writel(0x0, psys->adev->isp->base + IPU_PSYS_SUBDOMAINS_POWER_REQ); i = 0; do { usleep_range(10, 20); val = readl(psys->adev->isp->base + IPU_PSYS_SUBDOMAINS_POWER_STATUS); if (!(val & BIT(31))) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_dbg(&psys->adev->dev, "PS sub-domains req done with status 0x%x", #else dev_dbg(dev, "PS sub-domains req done with status 0x%x", #endif val); break; } i++; } while (i < PSYS_SUBDOMAINS_STATUS_WAIT_COUNT); if (i == PSYS_SUBDOMAINS_STATUS_WAIT_COUNT) #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_warn(&psys->adev->dev, "Psys sub-domains %s req timeout!", #else dev_warn(dev, "Psys sub-domains %s req timeout!", #endif on ? "UP" : "DOWN"); } void ipu_psys_setup_hw(struct ipu_psys *psys) { void __iomem *base = psys->pdata->base; void __iomem *spc_regs_base = base + psys->pdata->ipdata->hw_variant.spc_offset; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) void *psys_iommu0_ctrl; #else void __iomem *psys_iommu0_ctrl; #endif u32 irqs; const u8 r3 = IPU_DEVICE_AB_GROUP1_TARGET_ID_R3_SPC_STATUS_REG; const u8 r4 = IPU_DEVICE_AB_GROUP1_TARGET_ID_R4_SPC_MASTER_BASE_ADDR; const u8 r5 = IPU_DEVICE_AB_GROUP1_TARGET_ID_R5_SPC_PC_STALL; if (!psys->adev->isp->secure_mode) { /* configure access blocker for non-secure mode */ writel(NCI_AB_ACCESS_MODE_RW, base + IPU_REG_DMA_TOP_AB_GROUP1_BASE_ADDR + IPU_REG_DMA_TOP_AB_RING_ACCESS_OFFSET(r3)); writel(NCI_AB_ACCESS_MODE_RW, base + IPU_REG_DMA_TOP_AB_GROUP1_BASE_ADDR + IPU_REG_DMA_TOP_AB_RING_ACCESS_OFFSET(r4)); writel(NCI_AB_ACCESS_MODE_RW, base + IPU_REG_DMA_TOP_AB_GROUP1_BASE_ADDR + IPU_REG_DMA_TOP_AB_RING_ACCESS_OFFSET(r5)); } psys_iommu0_ctrl = base + psys->pdata->ipdata->hw_variant.mmu_hw[0].offset + #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) IPU_MMU_INFO_OFFSET; #else IPU6_MMU_INFO_OFFSET; #endif writel(IPU_INFO_REQUEST_DESTINATION_IOSF, psys_iommu0_ctrl); ipu6_set_sp_info_bits(spc_regs_base + IPU_PSYS_REG_SPC_STATUS_CTRL); ipu6_set_sp_info_bits(spc_regs_base + IPU_PSYS_REG_SPP0_STATUS_CTRL); /* Enable FW interrupt #0 */ writel(0, base + IPU_REG_PSYS_GPDEV_FWIRQ(0)); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) irqs = IPU_PSYS_GPDEV_IRQ_FWIRQ(IPU_PSYS_GPDEV_FWIRQ0); #else irqs = IPU6_PSYS_GPDEV_IRQ_FWIRQ(IPU_PSYS_GPDEV_FWIRQ0); #endif writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_EDGE); writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_LEVEL_NOT_PULSE); writel(0xffffffff, base + IPU_REG_PSYS_GPDEV_IRQ_CLEAR); writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_MASK); writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_ENABLE); } static struct ipu_psys_ppg *ipu_psys_identify_kppg(struct ipu_psys_kcmd *kcmd) { struct ipu_psys_scheduler *sched = &kcmd->fh->sched; struct ipu_psys_ppg *kppg, *tmp; mutex_lock(&kcmd->fh->mutex); if (list_empty(&sched->ppgs)) goto not_found; list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { if (ipu_fw_psys_pg_get_token(kcmd) != kppg->token) continue; mutex_unlock(&kcmd->fh->mutex); return kppg; } not_found: mutex_unlock(&kcmd->fh->mutex); return NULL; } /* * Called to free up all resources associated with a kcmd. * After this the kcmd doesn't anymore exist in the driver. */ static void ipu_psys_kcmd_free(struct ipu_psys_kcmd *kcmd) { struct ipu_psys_ppg *kppg; struct ipu_psys_scheduler *sched; if (!kcmd) return; kppg = ipu_psys_identify_kppg(kcmd); sched = &kcmd->fh->sched; if (kcmd->kbuf_set) { mutex_lock(&sched->bs_mutex); kcmd->kbuf_set->buf_set_size = 0; mutex_unlock(&sched->bs_mutex); kcmd->kbuf_set = NULL; } if (kppg) { mutex_lock(&kppg->mutex); if (!list_empty(&kcmd->list)) list_del(&kcmd->list); mutex_unlock(&kppg->mutex); } kfree(kcmd->pg_manifest); kfree(kcmd->kbufs); kfree(kcmd->buffers); kfree(kcmd); } static struct ipu_psys_kcmd *ipu_psys_copy_cmd(struct ipu_psys_command *cmd, struct ipu_psys_fh *fh) { struct ipu_psys *psys = fh->psys; #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0) struct device *dev = &psys->adev->auxdev.dev; #endif struct ipu_psys_kcmd *kcmd; struct ipu_psys_kbuffer *kpgbuf; unsigned int i; int ret, prevfd, fd; fd = prevfd = -1; if (cmd->bufcount > IPU_MAX_PSYS_CMD_BUFFERS) return NULL; if (!cmd->pg_manifest_size) return NULL; kcmd = kzalloc(sizeof(*kcmd), GFP_KERNEL); if (!kcmd) return NULL; kcmd->state = KCMD_STATE_PPG_NEW; kcmd->fh = fh; INIT_LIST_HEAD(&kcmd->list); mutex_lock(&fh->mutex); fd = cmd->pg; kpgbuf = ipu_psys_lookup_kbuffer(fh, fd); if (!kpgbuf || !kpgbuf->sgt) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_err(&psys->adev->dev, "%s kbuf %p with fd %d not found.\n", #else dev_err(dev, "%s kbuf %p with fd %d not found.\n", #endif __func__, kpgbuf, fd); mutex_unlock(&fh->mutex); goto error; } /* check and remap if possibe */ kpgbuf = ipu_psys_mapbuf_locked(fd, fh); if (!kpgbuf || !kpgbuf->sgt) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_err(&psys->adev->dev, "%s remap failed\n", __func__); #else dev_err(dev, "%s remap failed\n", __func__); #endif mutex_unlock(&fh->mutex); goto error; } mutex_unlock(&fh->mutex); kcmd->pg_user = kpgbuf->kaddr; kcmd->kpg = __get_pg_buf(psys, kpgbuf->len); if (!kcmd->kpg) goto error; memcpy(kcmd->kpg->pg, kcmd->pg_user, kcmd->kpg->pg_size); kcmd->pg_manifest = kzalloc(cmd->pg_manifest_size, GFP_KERNEL); if (!kcmd->pg_manifest) goto error; ret = copy_from_user(kcmd->pg_manifest, cmd->pg_manifest, cmd->pg_manifest_size); if (ret) goto error; kcmd->pg_manifest_size = cmd->pg_manifest_size; kcmd->user_token = cmd->user_token; kcmd->issue_id = cmd->issue_id; kcmd->priority = cmd->priority; if (kcmd->priority >= IPU_PSYS_CMD_PRIORITY_NUM) goto error; /* * Kenel enable bitmap be used only. */ memcpy(kcmd->kernel_enable_bitmap, cmd->kernel_enable_bitmap, sizeof(cmd->kernel_enable_bitmap)); kcmd->nbuffers = ipu_fw_psys_pg_get_terminal_count(kcmd); kcmd->buffers = kcalloc(kcmd->nbuffers, sizeof(*kcmd->buffers), GFP_KERNEL); if (!kcmd->buffers) goto error; kcmd->kbufs = kcalloc(kcmd->nbuffers, sizeof(kcmd->kbufs[0]), GFP_KERNEL); if (!kcmd->kbufs) goto error; /* should be stop cmd for ppg */ if (!cmd->buffers) { kcmd->state = KCMD_STATE_PPG_STOP; return kcmd; } if (!cmd->bufcount || kcmd->nbuffers > cmd->bufcount) goto error; ret = copy_from_user(kcmd->buffers, cmd->buffers, kcmd->nbuffers * sizeof(*kcmd->buffers)); if (ret) goto error; for (i = 0; i < kcmd->nbuffers; i++) { struct ipu_fw_psys_terminal *terminal; terminal = ipu_fw_psys_pg_get_terminal(kcmd, i); if (!terminal) continue; if (!(kcmd->buffers[i].flags & IPU_BUFFER_FLAG_DMA_HANDLE)) { kcmd->state = KCMD_STATE_PPG_START; continue; } if (kcmd->state == KCMD_STATE_PPG_START) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_err(&psys->adev->dev, "err: all buffer.flags&DMA_HANDLE must 0\n"); #else dev_err(dev, "buffer.flags & DMA_HANDLE must be 0\n"); #endif goto error; } mutex_lock(&fh->mutex); fd = kcmd->buffers[i].base.fd; kpgbuf = ipu_psys_lookup_kbuffer(fh, fd); if (!kpgbuf || !kpgbuf->sgt) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_err(&psys->adev->dev, #else dev_err(dev, #endif "%s kcmd->buffers[%d] %p fd %d not found.\n", __func__, i, kpgbuf, fd); mutex_unlock(&fh->mutex); goto error; } kpgbuf = ipu_psys_mapbuf_locked(fd, fh); if (!kpgbuf || !kpgbuf->sgt) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_err(&psys->adev->dev, "%s remap failed\n", #else dev_err(dev, "%s remap failed\n", #endif __func__); mutex_unlock(&fh->mutex); goto error; } mutex_unlock(&fh->mutex); kcmd->kbufs[i] = kpgbuf; if (!kcmd->kbufs[i] || !kcmd->kbufs[i]->sgt || kcmd->kbufs[i]->len < kcmd->buffers[i].bytes_used) goto error; if ((kcmd->kbufs[i]->flags & IPU_BUFFER_FLAG_NO_FLUSH) || (kcmd->buffers[i].flags & IPU_BUFFER_FLAG_NO_FLUSH) || prevfd == kcmd->buffers[i].base.fd) continue; prevfd = kcmd->buffers[i].base.fd; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dma_sync_sg_for_device(&psys->adev->dev, kcmd->kbufs[i]->sgt->sgl, kcmd->kbufs[i]->sgt->orig_nents, DMA_BIDIRECTIONAL); #else dma_sync_sg_for_device(dev, kcmd->kbufs[i]->sgt->sgl, kcmd->kbufs[i]->sgt->orig_nents, DMA_BIDIRECTIONAL); #endif } if (kcmd->state != KCMD_STATE_PPG_START) kcmd->state = KCMD_STATE_PPG_ENQUEUE; return kcmd; error: ipu_psys_kcmd_free(kcmd); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_dbg(&psys->adev->dev, "failed to copy cmd\n"); #else dev_dbg(dev, "failed to copy cmd\n"); #endif return NULL; } static struct ipu_psys_buffer_set * ipu_psys_lookup_kbuffer_set(struct ipu_psys *psys, u32 addr) { struct ipu_psys_fh *fh; struct ipu_psys_buffer_set *kbuf_set; struct ipu_psys_scheduler *sched; list_for_each_entry(fh, &psys->fhs, list) { sched = &fh->sched; mutex_lock(&sched->bs_mutex); list_for_each_entry(kbuf_set, &sched->buf_sets, list) { if (kbuf_set->buf_set && kbuf_set->buf_set->ipu_virtual_address == addr) { mutex_unlock(&sched->bs_mutex); return kbuf_set; } } mutex_unlock(&sched->bs_mutex); } return NULL; } static struct ipu_psys_ppg *ipu_psys_lookup_ppg(struct ipu_psys *psys, dma_addr_t pg_addr) { struct ipu_psys_scheduler *sched; struct ipu_psys_ppg *kppg, *tmp; struct ipu_psys_fh *fh; list_for_each_entry(fh, &psys->fhs, list) { sched = &fh->sched; mutex_lock(&fh->mutex); if (list_empty(&sched->ppgs)) { mutex_unlock(&fh->mutex); continue; } list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { if (pg_addr != kppg->kpg->pg_dma_addr) continue; mutex_unlock(&fh->mutex); return kppg; } mutex_unlock(&fh->mutex); } return NULL; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0) #define BUTTRESS_FREQ_CTL_QOS_FLOOR_SHIFT 8 static void ipu_buttress_set_psys_ratio(struct ipu6_device *isp, unsigned int psys_divisor, unsigned int psys_qos_floor) { struct ipu6_buttress_ctrl *ctrl = isp->psys->ctrl; mutex_lock(&isp->buttress.power_mutex); if (ctrl->ratio == psys_divisor && ctrl->qos_floor == psys_qos_floor) goto out_mutex_unlock; ctrl->ratio = psys_divisor; ctrl->qos_floor = psys_qos_floor; if (ctrl->started) { /* * According to documentation driver initiates DVFS * transition by writing wanted ratio, floor ratio and start * bit. No need to stop PS first */ writel(BUTTRESS_FREQ_CTL_START | ctrl->qos_floor << BUTTRESS_FREQ_CTL_QOS_FLOOR_SHIFT | psys_divisor, isp->base + BUTTRESS_REG_PS_FREQ_CTL); } out_mutex_unlock: mutex_unlock(&isp->buttress.power_mutex); } static void ipu_buttress_set_psys_freq(struct ipu6_device *isp, unsigned int freq) { unsigned int psys_ratio = freq / BUTTRESS_PS_FREQ_STEP; dev_dbg(&isp->psys->auxdev.dev, "freq:%u\n", freq); ipu_buttress_set_psys_ratio(isp, psys_ratio, psys_ratio); } static void ipu_buttress_add_psys_constraint(struct ipu6_device *isp, struct ipu6_psys_constraint *constraint) { struct ipu6_buttress *b = &isp->buttress; mutex_lock(&b->cons_mutex); list_add(&constraint->list, &b->constraints); mutex_unlock(&b->cons_mutex); } static void ipu_buttress_remove_psys_constraint(struct ipu6_device *isp, struct ipu6_psys_constraint *constraint) { struct ipu6_buttress *b = &isp->buttress; struct ipu6_psys_constraint *c; unsigned int min_freq = 0; mutex_lock(&b->cons_mutex); list_del(&constraint->list); list_for_each_entry(c, &b->constraints, list) if (c->min_freq > min_freq) min_freq = c->min_freq; ipu_buttress_set_psys_freq(isp, min_freq); mutex_unlock(&b->cons_mutex); } #endif /* * Move kcmd into completed state (due to running finished or failure). * Fill up the event struct and notify waiters. */ void ipu_psys_kcmd_complete(struct ipu_psys_ppg *kppg, struct ipu_psys_kcmd *kcmd, int error) { struct ipu_psys_fh *fh = kcmd->fh; struct ipu_psys *psys = fh->psys; #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0) struct device *dev = &psys->adev->auxdev.dev; #endif kcmd->ev.type = IPU_PSYS_EVENT_TYPE_CMD_COMPLETE; kcmd->ev.user_token = kcmd->user_token; kcmd->ev.issue_id = kcmd->issue_id; kcmd->ev.error = error; list_move_tail(&kcmd->list, &kppg->kcmds_finished_list); if (kcmd->constraint.min_freq) ipu_buttress_remove_psys_constraint(psys->adev->isp, &kcmd->constraint); if (!early_pg_transfer && kcmd->pg_user && kcmd->kpg->pg) { struct ipu_psys_kbuffer *kbuf; kbuf = ipu_psys_lookup_kbuffer_by_kaddr(kcmd->fh, kcmd->pg_user); if (kbuf && kbuf->valid) memcpy(kcmd->pg_user, kcmd->kpg->pg, kcmd->kpg->pg_size); else #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_dbg(&psys->adev->dev, "Skipping unmapped buffer\n"); #else dev_dbg(dev, "Skipping unmapped buffer\n"); #endif } kcmd->state = KCMD_STATE_PPG_COMPLETE; wake_up_interruptible(&fh->wait); } /* * Submit kcmd into psys queue. If running fails, complete the kcmd * with an error. * * Found a runnable PG. Move queue to the list tail for round-robin * scheduling and run the PG. Start the watchdog timer if the PG was * started successfully. Enable PSYS power if requested. */ int ipu_psys_kcmd_start(struct ipu_psys *psys, struct ipu_psys_kcmd *kcmd) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0) struct device *dev = &psys->adev->auxdev.dev; #endif int ret; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) if (psys->adev->isp->flr_done) return -EIO; #endif if (early_pg_transfer && kcmd->pg_user && kcmd->kpg->pg) memcpy(kcmd->pg_user, kcmd->kpg->pg, kcmd->kpg->pg_size); ret = ipu_fw_psys_pg_start(kcmd); if (ret) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_err(&psys->adev->dev, "failed to start kcmd!\n"); #else dev_err(dev, "failed to start kcmd!\n"); #endif return ret; } ipu_fw_psys_pg_dump(psys, kcmd, "run"); ret = ipu_fw_psys_pg_disown(kcmd); if (ret) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_err(&psys->adev->dev, "failed to start kcmd!\n"); #else dev_err(dev, "failed to start kcmd!\n"); #endif return ret; } return 0; } static int ipu_psys_kcmd_send_to_ppg_start(struct ipu_psys_kcmd *kcmd) { struct ipu_psys_fh *fh = kcmd->fh; struct ipu_psys_scheduler *sched = &fh->sched; struct ipu_psys *psys = fh->psys; #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0) struct device *dev = &psys->adev->auxdev.dev; #endif struct ipu_psys_ppg *kppg; struct ipu_psys_resource_pool *rpr; int queue_id; int ret; rpr = &psys->resource_pool_running; kppg = kzalloc(sizeof(*kppg), GFP_KERNEL); if (!kppg) return -ENOMEM; kppg->fh = fh; kppg->kpg = kcmd->kpg; kppg->state = PPG_STATE_START; kppg->pri_base = kcmd->priority; kppg->pri_dynamic = 0; INIT_LIST_HEAD(&kppg->list); mutex_init(&kppg->mutex); INIT_LIST_HEAD(&kppg->kcmds_new_list); INIT_LIST_HEAD(&kppg->kcmds_processing_list); INIT_LIST_HEAD(&kppg->kcmds_finished_list); INIT_LIST_HEAD(&kppg->sched_list); kppg->manifest = kzalloc(kcmd->pg_manifest_size, GFP_KERNEL); if (!kppg->manifest) { kfree(kppg); return -ENOMEM; } memcpy(kppg->manifest, kcmd->pg_manifest, kcmd->pg_manifest_size); queue_id = ipu_psys_allocate_cmd_queue_resource(rpr); if (queue_id == -ENOSPC) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_err(&psys->adev->dev, "no available queue\n"); #else dev_err(dev, "no available queue\n"); #endif kfree(kppg->manifest); kfree(kppg); mutex_unlock(&psys->mutex); return -ENOMEM; } /* * set token as start cmd will immediately be followed by a * enqueue cmd so that kppg could be retrieved. */ kppg->token = (u64)kcmd->kpg; ipu_fw_psys_pg_set_token(kcmd, kppg->token); ipu_fw_psys_ppg_set_base_queue_id(kcmd, queue_id); ret = ipu_fw_psys_pg_set_ipu_vaddress(kcmd, kcmd->kpg->pg_dma_addr); if (ret) { ipu_psys_free_cmd_queue_resource(rpr, queue_id); kfree(kppg->manifest); kfree(kppg); return -EIO; } memcpy(kcmd->pg_user, kcmd->kpg->pg, kcmd->kpg->pg_size); mutex_lock(&fh->mutex); list_add_tail(&kppg->list, &sched->ppgs); mutex_unlock(&fh->mutex); mutex_lock(&kppg->mutex); list_add(&kcmd->list, &kppg->kcmds_new_list); mutex_unlock(&kppg->mutex); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_dbg(&psys->adev->dev, "START ppg(%d, 0x%p) kcmd 0x%p, queue %d\n", #else dev_dbg(dev, "START ppg(%d, 0x%p) kcmd 0x%p, queue %d\n", #endif ipu_fw_psys_pg_get_id(kcmd), kppg, kcmd, queue_id); /* Kick l-scheduler thread */ atomic_set(&psys->wakeup_count, 1); wake_up_interruptible(&psys->sched_cmd_wq); return 0; } static int ipu_psys_kcmd_send_to_ppg(struct ipu_psys_kcmd *kcmd) { struct ipu_psys_fh *fh = kcmd->fh; struct ipu_psys *psys = fh->psys; #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0) struct device *dev = &psys->adev->auxdev.dev; #endif struct ipu_psys_ppg *kppg; struct ipu_psys_resource_pool *rpr; unsigned long flags; u8 id; bool resche = true; rpr = &psys->resource_pool_running; if (kcmd->state == KCMD_STATE_PPG_START) return ipu_psys_kcmd_send_to_ppg_start(kcmd); kppg = ipu_psys_identify_kppg(kcmd); spin_lock_irqsave(&psys->pgs_lock, flags); kcmd->kpg->pg_size = 0; spin_unlock_irqrestore(&psys->pgs_lock, flags); if (!kppg) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_err(&psys->adev->dev, "token not match\n"); #else dev_err(dev, "token not match\n"); #endif return -EINVAL; } kcmd->kpg = kppg->kpg; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_dbg(&psys->adev->dev, "%s ppg(%d, 0x%p) kcmd %p\n", #else dev_dbg(dev, "%s ppg(%d, 0x%p) kcmd %p\n", #endif (kcmd->state == KCMD_STATE_PPG_STOP) ? "STOP" : "ENQUEUE", ipu_fw_psys_pg_get_id(kcmd), kppg, kcmd); if (kcmd->state == KCMD_STATE_PPG_STOP) { mutex_lock(&kppg->mutex); if (kppg->state == PPG_STATE_STOPPED) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_dbg(&psys->adev->dev, "kppg 0x%p stopped!\n", kppg); #else dev_dbg(dev, "kppg 0x%p stopped!\n", kppg); #endif id = ipu_fw_psys_ppg_get_base_queue_id(kcmd); ipu_psys_free_cmd_queue_resource(rpr, id); ipu_psys_kcmd_complete(kppg, kcmd, 0); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) pm_runtime_put(&psys->adev->dev); #else pm_runtime_put(dev); #endif resche = false; } else { list_add(&kcmd->list, &kppg->kcmds_new_list); } mutex_unlock(&kppg->mutex); } else { int ret; ret = ipu_psys_ppg_get_bufset(kcmd, kppg); if (ret) return ret; mutex_lock(&kppg->mutex); list_add_tail(&kcmd->list, &kppg->kcmds_new_list); mutex_unlock(&kppg->mutex); } if (resche) { /* Kick l-scheduler thread */ atomic_set(&psys->wakeup_count, 1); wake_up_interruptible(&psys->sched_cmd_wq); } return 0; } int ipu_psys_kcmd_new(struct ipu_psys_command *cmd, struct ipu_psys_fh *fh) { struct ipu_psys *psys = fh->psys; #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0) struct device *dev = &psys->adev->auxdev.dev; #endif struct ipu_psys_kcmd *kcmd; size_t pg_size; int ret; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) if (psys->adev->isp->flr_done) return -EIO; #endif kcmd = ipu_psys_copy_cmd(cmd, fh); if (!kcmd) return -EINVAL; pg_size = ipu_fw_psys_pg_get_size(kcmd); if (pg_size > kcmd->kpg->pg_size) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_dbg(&psys->adev->dev, "pg size mismatch %lu %lu\n", pg_size, kcmd->kpg->pg_size); #else dev_dbg(dev, "pg size mismatch %lu %lu\n", pg_size, kcmd->kpg->pg_size); #endif ret = -EINVAL; goto error; } if (ipu_fw_psys_pg_get_protocol(kcmd) != IPU_FW_PSYS_PROCESS_GROUP_PROTOCOL_PPG) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_err(&psys->adev->dev, "No support legacy pg now\n"); #else dev_err(dev, "No support legacy pg now\n"); #endif ret = -EINVAL; goto error; } if (cmd->min_psys_freq) { kcmd->constraint.min_freq = cmd->min_psys_freq; ipu_buttress_add_psys_constraint(psys->adev->isp, &kcmd->constraint); } ret = ipu_psys_kcmd_send_to_ppg(kcmd); if (ret) goto error; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_dbg(&psys->adev->dev, "IOC_QCMD: user_token:%llx issue_id:0x%llx pri:%d\n", #else dev_dbg(dev, "IOC_QCMD: user_token:%llx issue_id:0x%llx pri:%d\n", #endif cmd->user_token, cmd->issue_id, cmd->priority); return 0; error: ipu_psys_kcmd_free(kcmd); return ret; } static bool ipu_psys_kcmd_is_valid(struct ipu_psys *psys, struct ipu_psys_kcmd *kcmd) { struct ipu_psys_fh *fh; struct ipu_psys_kcmd *kcmd0; struct ipu_psys_ppg *kppg, *tmp; struct ipu_psys_scheduler *sched; list_for_each_entry(fh, &psys->fhs, list) { sched = &fh->sched; mutex_lock(&fh->mutex); if (list_empty(&sched->ppgs)) { mutex_unlock(&fh->mutex); continue; } list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) { mutex_lock(&kppg->mutex); list_for_each_entry(kcmd0, &kppg->kcmds_processing_list, list) { if (kcmd0 == kcmd) { mutex_unlock(&kppg->mutex); mutex_unlock(&fh->mutex); return true; } } mutex_unlock(&kppg->mutex); } mutex_unlock(&fh->mutex); } return false; } void ipu_psys_handle_events(struct ipu_psys *psys) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0) struct device *dev = &psys->adev->auxdev.dev; #endif struct ipu_psys_kcmd *kcmd; struct ipu_fw_psys_event event; struct ipu_psys_ppg *kppg; bool error; u32 hdl; u16 cmd, status; int res; do { memset(&event, 0, sizeof(event)); if (!ipu_fw_psys_rcv_event(psys, &event)) break; if (!event.context_handle) break; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_dbg(&psys->adev->dev, "ppg event: 0x%x, %d, status %d\n", event.context_handle, event.command, event.status); #else dev_dbg(dev, "ppg event: 0x%x, %d, status %d\n", event.context_handle, event.command, event.status); #endif error = false; /* * event.command == CMD_RUN shows this is fw processing frame * done as pPG mode, and event.context_handle should be pointer * of buffer set; so we make use of this pointer to lookup * kbuffer_set and kcmd */ hdl = event.context_handle; cmd = event.command; status = event.status; kppg = NULL; kcmd = NULL; if (cmd == IPU_FW_PSYS_PROCESS_GROUP_CMD_RUN) { struct ipu_psys_buffer_set *kbuf_set; /* * Need change ppg state when the 1st running is done * (after PPG started/resumed) */ kbuf_set = ipu_psys_lookup_kbuffer_set(psys, hdl); if (kbuf_set) kcmd = kbuf_set->kcmd; if (!kbuf_set || !kcmd) error = true; else kppg = ipu_psys_identify_kppg(kcmd); } else if (cmd == IPU_FW_PSYS_PROCESS_GROUP_CMD_STOP || cmd == IPU_FW_PSYS_PROCESS_GROUP_CMD_SUSPEND || cmd == IPU_FW_PSYS_PROCESS_GROUP_CMD_RESUME) { /* * STOP/SUSPEND/RESUME cmd event would run this branch; * only stop cmd queued by user has stop_kcmd and need * to notify user to dequeue. */ kppg = ipu_psys_lookup_ppg(psys, hdl); if (kppg) { mutex_lock(&kppg->mutex); if (kppg->state == PPG_STATE_STOPPING) { kcmd = ipu_psys_ppg_get_stop_kcmd(kppg); if (!kcmd) error = true; } mutex_unlock(&kppg->mutex); } } else { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_err(&psys->adev->dev, "invalid event\n"); #else dev_err(dev, "invalid event\n"); #endif continue; } if (error || !kppg) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_err(&psys->adev->dev, "event error, command %d\n", cmd); #else dev_err(dev, "event error, command %d\n", cmd); #endif break; } #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_dbg(&psys->adev->dev, "event to kppg 0x%p, kcmd 0x%p\n", kppg, kcmd); #else dev_dbg(dev, "event to kppg 0x%p, kcmd 0x%p\n", kppg, kcmd); #endif ipu_psys_ppg_complete(psys, kppg); if (kcmd && ipu_psys_kcmd_is_valid(psys, kcmd)) { res = (status == IPU_PSYS_EVENT_CMD_COMPLETE || status == IPU_PSYS_EVENT_FRAGMENT_COMPLETE) ? 0 : -EIO; mutex_lock(&kppg->mutex); ipu_psys_kcmd_complete(kppg, kcmd, res); mutex_unlock(&kppg->mutex); } } while (1); } int ipu_psys_fh_init(struct ipu_psys_fh *fh) { struct ipu_psys *psys = fh->psys; #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0) struct device *dev = &psys->adev->auxdev.dev; #endif struct ipu_psys_buffer_set *kbuf_set, *kbuf_set_tmp; struct ipu_psys_scheduler *sched = &fh->sched; int i; mutex_init(&sched->bs_mutex); INIT_LIST_HEAD(&sched->buf_sets); INIT_LIST_HEAD(&sched->ppgs); /* allocate and map memory for buf_sets */ for (i = 0; i < IPU_PSYS_BUF_SET_POOL_SIZE; i++) { kbuf_set = kzalloc(sizeof(*kbuf_set), GFP_KERNEL); if (!kbuf_set) goto out_free_buf_sets; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) kbuf_set->kaddr = dma_alloc_attrs(&psys->adev->dev, IPU_PSYS_BUF_SET_MAX_SIZE, &kbuf_set->dma_addr, GFP_KERNEL, 0); #else kbuf_set->kaddr = dma_alloc_attrs(dev, IPU_PSYS_BUF_SET_MAX_SIZE, &kbuf_set->dma_addr, GFP_KERNEL, 0); #endif if (!kbuf_set->kaddr) { kfree(kbuf_set); goto out_free_buf_sets; } kbuf_set->size = IPU_PSYS_BUF_SET_MAX_SIZE; list_add(&kbuf_set->list, &sched->buf_sets); } return 0; out_free_buf_sets: list_for_each_entry_safe(kbuf_set, kbuf_set_tmp, &sched->buf_sets, list) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dma_free_attrs(&psys->adev->dev, kbuf_set->size, kbuf_set->kaddr, #else dma_free_attrs(dev, kbuf_set->size, kbuf_set->kaddr, #endif kbuf_set->dma_addr, 0); list_del(&kbuf_set->list); kfree(kbuf_set); } mutex_destroy(&sched->bs_mutex); return -ENOMEM; } int ipu_psys_fh_deinit(struct ipu_psys_fh *fh) { struct ipu_psys *psys = fh->psys; #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0) struct device *dev = &psys->adev->auxdev.dev; #endif struct ipu_psys_ppg *kppg, *kppg0; struct ipu_psys_kcmd *kcmd, *kcmd0; struct ipu_psys_buffer_set *kbuf_set, *kbuf_set0; struct ipu_psys_scheduler *sched = &fh->sched; struct ipu_psys_resource_pool *rpr; struct ipu_psys_resource_alloc *alloc; u8 id; mutex_lock(&fh->mutex); if (!list_empty(&sched->ppgs)) { list_for_each_entry_safe(kppg, kppg0, &sched->ppgs, list) { unsigned long flags; mutex_lock(&kppg->mutex); if (!(kppg->state & (PPG_STATE_STOPPED | PPG_STATE_STOPPING))) { struct ipu_psys_kcmd tmp = { .kpg = kppg->kpg, }; rpr = &psys->resource_pool_running; alloc = &kppg->kpg->resource_alloc; id = ipu_fw_psys_ppg_get_base_queue_id(&tmp); ipu_psys_ppg_stop(kppg); ipu_psys_free_resources(alloc, rpr); ipu_psys_free_cmd_queue_resource(rpr, id); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_dbg(&psys->adev->dev, #else dev_dbg(dev, #endif "s_change:%s %p %d -> %d\n", __func__, kppg, kppg->state, PPG_STATE_STOPPED); kppg->state = PPG_STATE_STOPPED; if (psys->power_gating != PSYS_POWER_GATED) #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) pm_runtime_put(&psys->adev->dev); #else pm_runtime_put(dev); #endif } list_del(&kppg->list); mutex_unlock(&kppg->mutex); list_for_each_entry_safe(kcmd, kcmd0, &kppg->kcmds_new_list, list) { kcmd->pg_user = NULL; mutex_unlock(&fh->mutex); ipu_psys_kcmd_free(kcmd); mutex_lock(&fh->mutex); } list_for_each_entry_safe(kcmd, kcmd0, &kppg->kcmds_processing_list, list) { kcmd->pg_user = NULL; mutex_unlock(&fh->mutex); ipu_psys_kcmd_free(kcmd); mutex_lock(&fh->mutex); } list_for_each_entry_safe(kcmd, kcmd0, &kppg->kcmds_finished_list, list) { kcmd->pg_user = NULL; mutex_unlock(&fh->mutex); ipu_psys_kcmd_free(kcmd); mutex_lock(&fh->mutex); } spin_lock_irqsave(&psys->pgs_lock, flags); kppg->kpg->pg_size = 0; spin_unlock_irqrestore(&psys->pgs_lock, flags); mutex_destroy(&kppg->mutex); kfree(kppg->manifest); kfree(kppg); } } mutex_unlock(&fh->mutex); mutex_lock(&sched->bs_mutex); list_for_each_entry_safe(kbuf_set, kbuf_set0, &sched->buf_sets, list) { #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dma_free_attrs(&psys->adev->dev, kbuf_set->size, kbuf_set->kaddr, #else dma_free_attrs(dev, kbuf_set->size, kbuf_set->kaddr, #endif kbuf_set->dma_addr, 0); list_del(&kbuf_set->list); kfree(kbuf_set); } mutex_unlock(&sched->bs_mutex); mutex_destroy(&sched->bs_mutex); return 0; } struct ipu_psys_kcmd *ipu_get_completed_kcmd(struct ipu_psys_fh *fh) { struct ipu_psys_scheduler *sched = &fh->sched; #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0) struct device *dev = &fh->psys->adev->auxdev.dev; #endif struct ipu_psys_kcmd *kcmd; struct ipu_psys_ppg *kppg; mutex_lock(&fh->mutex); if (list_empty(&sched->ppgs)) { mutex_unlock(&fh->mutex); return NULL; } list_for_each_entry(kppg, &sched->ppgs, list) { mutex_lock(&kppg->mutex); if (list_empty(&kppg->kcmds_finished_list)) { mutex_unlock(&kppg->mutex); continue; } kcmd = list_first_entry(&kppg->kcmds_finished_list, struct ipu_psys_kcmd, list); mutex_unlock(&fh->mutex); mutex_unlock(&kppg->mutex); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_dbg(&fh->psys->adev->dev, "get completed kcmd 0x%p\n", kcmd); #else dev_dbg(dev, "get completed kcmd 0x%p\n", kcmd); #endif return kcmd; } mutex_unlock(&fh->mutex); return NULL; } long ipu_ioctl_dqevent(struct ipu_psys_event *event, struct ipu_psys_fh *fh, unsigned int f_flags) { struct ipu_psys *psys = fh->psys; #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0) struct device *dev = &psys->adev->auxdev.dev; #endif struct ipu_psys_kcmd *kcmd = NULL; int rval; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) dev_dbg(&psys->adev->dev, "IOC_DQEVENT\n"); #else dev_dbg(dev, "IOC_DQEVENT\n"); #endif if (!(f_flags & O_NONBLOCK)) { rval = wait_event_interruptible(fh->wait, (kcmd = ipu_get_completed_kcmd(fh))); if (rval == -ERESTARTSYS) return rval; } if (!kcmd) { kcmd = ipu_get_completed_kcmd(fh); if (!kcmd) return -ENODATA; } *event = kcmd->ev; ipu_psys_kcmd_free(kcmd); return 0; } ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu6/psys/ipu6ep-fw-resources.c000066400000000000000000000233541471702545500312220ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2020 - 2024 Intel Corporation #include #include #include "ipu-psys.h" #include "ipu-fw-psys.h" #include "ipu6-platform-resources.h" #include "ipu6ep-platform-resources.h" /* resources table */ /* * Cell types by cell IDs */ static const u8 ipu6ep_fw_psys_cell_types[IPU6EP_FW_PSYS_N_CELL_ID] = { IPU6_FW_PSYS_SP_CTRL_TYPE_ID, IPU6_FW_PSYS_VP_TYPE_ID, IPU6_FW_PSYS_ACC_PSA_TYPE_ID, IPU6_FW_PSYS_ACC_PSA_TYPE_ID, IPU6_FW_PSYS_ACC_PSA_TYPE_ID, IPU6_FW_PSYS_ACC_PSA_TYPE_ID, IPU6_FW_PSYS_ACC_OSA_TYPE_ID, IPU6_FW_PSYS_ACC_OSA_TYPE_ID, IPU6_FW_PSYS_ACC_OSA_TYPE_ID, IPU6_FW_PSYS_ACC_PSA_TYPE_ID, IPU6_FW_PSYS_ACC_PSA_TYPE_ID, IPU6_FW_PSYS_ACC_PSA_TYPE_ID, IPU6_FW_PSYS_ACC_PSA_TYPE_ID, IPU6_FW_PSYS_ACC_ISA_TYPE_ID, IPU6_FW_PSYS_ACC_ISA_TYPE_ID, IPU6_FW_PSYS_ACC_ISA_TYPE_ID, IPU6_FW_PSYS_ACC_ISA_TYPE_ID, IPU6_FW_PSYS_ACC_ISA_TYPE_ID, IPU6_FW_PSYS_ACC_ISA_TYPE_ID, IPU6_FW_PSYS_ACC_ISA_TYPE_ID, IPU6_FW_PSYS_ACC_ISA_TYPE_ID, IPU6_FW_PSYS_ACC_ISA_TYPE_ID, IPU6_FW_PSYS_ACC_ISA_TYPE_ID, IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* AF */ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* X2B_MD */ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* X2B_SVE_RGBIR */ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* PAF */ IPU6_FW_PSYS_GDC_TYPE_ID, IPU6_FW_PSYS_TNR_TYPE_ID, }; static const u16 ipu6ep_fw_num_dev_channels[IPU6_FW_PSYS_N_DEV_CHN_ID] = { IPU6_FW_PSYS_DEV_CHN_DMA_EXT0_MAX_SIZE, IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_READ_MAX_SIZE, IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE, IPU6_FW_PSYS_DEV_CHN_DMA_INTERNAL_MAX_SIZE, IPU6_FW_PSYS_DEV_CHN_DMA_ISA_MAX_SIZE, }; static const u16 ipu6ep_fw_psys_mem_size[IPU6_FW_PSYS_N_MEM_ID] = { IPU6_FW_PSYS_VMEM0_MAX_SIZE, IPU6_FW_PSYS_TRANSFER_VMEM0_MAX_SIZE, IPU6_FW_PSYS_TRANSFER_VMEM1_MAX_SIZE, IPU6_FW_PSYS_LB_VMEM_MAX_SIZE, IPU6_FW_PSYS_BAMEM0_MAX_SIZE, IPU6_FW_PSYS_DMEM0_MAX_SIZE, IPU6_FW_PSYS_DMEM1_MAX_SIZE, IPU6_FW_PSYS_DMEM2_MAX_SIZE, IPU6_FW_PSYS_DMEM3_MAX_SIZE, IPU6_FW_PSYS_PMEM0_MAX_SIZE }; static const u16 ipu6ep_fw_psys_dfms[IPU6_FW_PSYS_N_DEV_DFM_ID] = { IPU6_FW_PSYS_DEV_DFM_BB_FULL_PORT_ID_MAX_SIZE, IPU6_FW_PSYS_DEV_DFM_BB_EMPTY_PORT_ID_MAX_SIZE, IPU6_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID_MAX_SIZE, IPU6_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID_MAX_SIZE, IPU6_FW_PSYS_DEV_DFM_LB_FULL_PORT_ID_MAX_SIZE, IPU6_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID_MAX_SIZE, }; static const u8 ipu6ep_fw_psys_c_mem[IPU6EP_FW_PSYS_N_CELL_ID][IPU6_FW_PSYS_N_MEM_TYPE_ID] = { { /* IPU6_FW_PSYS_SP0_ID */ IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_DMEM0_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_SP1_ID */ IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_DMEM1_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_VP0_ID */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_DMEM3_ID, IPU6_FW_PSYS_VMEM0_ID, IPU6_FW_PSYS_BAMEM0_ID, IPU6_FW_PSYS_PMEM0_ID, }, { /* IPU6_FW_PSYS_ACC1_ID BNLM */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ACC2_ID DM */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ACC3_ID ACM */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ACC4_ID GTC YUV1 */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ACC5_ID OFS pin main */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ACC6_ID OFS pin display */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ACC7_ID OFS pin pp */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ACC8_ID GAMMASTAR */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ACC9_ID GLTM */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ACC10_ID XNR */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ISA_ICA_ID */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ISA_LSC_ID */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ISA_DPC_ID */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ISA_SIS_A_ID */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ISA_SIS_B_ID */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ISA_B2B_ID */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ISA_B2R_ID and ISA_R2I_SIE */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ISA_R2I_DS_A_ID */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ISA_AWB_ID */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ISA_AE_ID */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ISA_AF_ID */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ISA_X2B_MD_ID */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ISA_X2B_SVE_RGBIR_ID */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_ISA_PAF_ID */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_LB_VMEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_BB_ACC_GDC0_ID */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, }, { /* IPU6_FW_PSYS_BB_ACC_TNR_ID */ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, IPU6_FW_PSYS_TRANSFER_VMEM1_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, IPU6_FW_PSYS_N_MEM_ID, } }; static const struct ipu_fw_resource_definitions ipu6ep_defs = { .cells = ipu6ep_fw_psys_cell_types, .num_cells = IPU6EP_FW_PSYS_N_CELL_ID, .num_cells_type = IPU6_FW_PSYS_N_CELL_TYPE_ID, .dev_channels = ipu6ep_fw_num_dev_channels, .num_dev_channels = IPU6_FW_PSYS_N_DEV_CHN_ID, .num_ext_mem_types = IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID, .num_ext_mem_ids = IPU6_FW_PSYS_N_MEM_ID, .ext_mem_ids = ipu6ep_fw_psys_mem_size, .num_dfm_ids = IPU6_FW_PSYS_N_DEV_DFM_ID, .dfms = ipu6ep_fw_psys_dfms, .cell_mem_row = IPU6_FW_PSYS_N_MEM_TYPE_ID, .cell_mem = &ipu6ep_fw_psys_c_mem[0][0], }; const struct ipu_fw_resource_definitions *ipu6ep_res_defs = &ipu6ep_defs; ipu6ep-platform-resources.h000066400000000000000000000023111471702545500323460ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu6/psys/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2020 - 2024 Intel Corporation */ #ifndef IPU6EP_PLATFORM_RESOURCES_H #define IPU6EP_PLATFORM_RESOURCES_H #include #include enum { IPU6EP_FW_PSYS_SP0_ID = 0, IPU6EP_FW_PSYS_VP0_ID, IPU6EP_FW_PSYS_PSA_ACC_BNLM_ID, IPU6EP_FW_PSYS_PSA_ACC_DM_ID, IPU6EP_FW_PSYS_PSA_ACC_ACM_ID, IPU6EP_FW_PSYS_PSA_ACC_GTC_YUV1_ID, IPU6EP_FW_PSYS_BB_ACC_OFS_PIN_MAIN_ID, IPU6EP_FW_PSYS_BB_ACC_OFS_PIN_DISPLAY_ID, IPU6EP_FW_PSYS_BB_ACC_OFS_PIN_PP_ID, IPU6EP_FW_PSYS_PSA_ACC_GAMMASTAR_ID, IPU6EP_FW_PSYS_PSA_ACC_GLTM_ID, IPU6EP_FW_PSYS_PSA_ACC_XNR_ID, IPU6EP_FW_PSYS_PSA_VCSC_ID, /* VCSC */ IPU6EP_FW_PSYS_ISA_ICA_ID, IPU6EP_FW_PSYS_ISA_LSC_ID, IPU6EP_FW_PSYS_ISA_DPC_ID, IPU6EP_FW_PSYS_ISA_SIS_A_ID, IPU6EP_FW_PSYS_ISA_SIS_B_ID, IPU6EP_FW_PSYS_ISA_B2B_ID, IPU6EP_FW_PSYS_ISA_B2R_R2I_SIE_ID, IPU6EP_FW_PSYS_ISA_R2I_DS_A_ID, IPU6EP_FW_PSYS_ISA_AWB_ID, IPU6EP_FW_PSYS_ISA_AE_ID, IPU6EP_FW_PSYS_ISA_AF_ID, IPU6EP_FW_PSYS_ISA_X2B_MD_ID, IPU6EP_FW_PSYS_ISA_X2B_SVE_RGBIR_ID, IPU6EP_FW_PSYS_ISA_PAF_ID, IPU6EP_FW_PSYS_BB_ACC_GDC0_ID, IPU6EP_FW_PSYS_BB_ACC_TNR_ID, IPU6EP_FW_PSYS_N_CELL_ID }; #endif /* IPU6EP_PLATFORM_RESOURCES_H */ ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu6/psys/ipu6se-fw-resources.c000066400000000000000000000141561471702545500312250ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2015 - 2024 Intel Corporation #include #include #include "ipu-psys.h" #include "ipu-fw-psys.h" #include "ipu6se-platform-resources.h" /* resources table */ /* * Cell types by cell IDs */ /* resources table */ /* * Cell types by cell IDs */ #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) const u8 ipu6se_fw_psys_cell_types[IPU6SE_FW_PSYS_N_CELL_ID] = { #else static const u8 ipu6se_fw_psys_cell_types[IPU6SE_FW_PSYS_N_CELL_ID] = { #endif IPU6SE_FW_PSYS_SP_CTRL_TYPE_ID, IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_ICA_ID */ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_LSC_ID */ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_DPC_ID */ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_B2B_ID */ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_BNLM_ID */ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_DM_ID */ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_R2I_SIE_ID */ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_R2I_DS_A_ID */ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_R2I_DS_B_ID */ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_AWB_ID */ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_AE_ID */ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_AF_ID*/ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID /* PAF */ }; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) const u16 ipu6se_fw_num_dev_channels[IPU6SE_FW_PSYS_N_DEV_CHN_ID] = { #else static const u16 ipu6se_fw_num_dev_channels[IPU6SE_FW_PSYS_N_DEV_CHN_ID] = { #endif IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT0_MAX_SIZE, IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_READ_MAX_SIZE, IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE, IPU6SE_FW_PSYS_DEV_CHN_DMA_ISA_MAX_SIZE, }; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) const u16 ipu6se_fw_psys_mem_size[IPU6SE_FW_PSYS_N_MEM_ID] = { #else static const u16 ipu6se_fw_psys_mem_size[IPU6SE_FW_PSYS_N_MEM_ID] = { #endif IPU6SE_FW_PSYS_TRANSFER_VMEM0_MAX_SIZE, IPU6SE_FW_PSYS_LB_VMEM_MAX_SIZE, IPU6SE_FW_PSYS_DMEM0_MAX_SIZE, IPU6SE_FW_PSYS_DMEM1_MAX_SIZE }; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) const u16 ipu6se_fw_psys_dfms[IPU6SE_FW_PSYS_N_DEV_DFM_ID] = { #else static const u16 ipu6se_fw_psys_dfms[IPU6SE_FW_PSYS_N_DEV_DFM_ID] = { #endif IPU6SE_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID_MAX_SIZE, IPU6SE_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID_MAX_SIZE }; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 10, 0) const u8 #else static const u8 #endif ipu6se_fw_psys_c_mem[IPU6SE_FW_PSYS_N_CELL_ID][IPU6SE_FW_PSYS_N_MEM_TYPE_ID] = { { /* IPU6SE_FW_PSYS_SP0_ID */ IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_DMEM0_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, }, { /* IPU6SE_FW_PSYS_ISA_ICA_ID */ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, IPU6SE_FW_PSYS_LB_VMEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, }, { /* IPU6SE_FW_PSYS_ISA_LSC_ID */ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, IPU6SE_FW_PSYS_LB_VMEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, }, { /* IPU6SE_FW_PSYS_ISA_DPC_ID */ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, IPU6SE_FW_PSYS_LB_VMEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, }, { /* IPU6SE_FW_PSYS_ISA_B2B_ID */ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, IPU6SE_FW_PSYS_LB_VMEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, }, { /* IPU6SE_FW_PSYS_ISA_BNLM_ID */ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, IPU6SE_FW_PSYS_LB_VMEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, }, { /* IPU6SE_FW_PSYS_ISA_DM_ID */ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, IPU6SE_FW_PSYS_LB_VMEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, }, { /* IPU6SE_FW_PSYS_ISA_R2I_SIE_ID */ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, IPU6SE_FW_PSYS_LB_VMEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, }, { /* IPU6SE_FW_PSYS_ISA_R2I_DS_A_ID */ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, IPU6SE_FW_PSYS_LB_VMEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, }, { /* IPU6SE_FW_PSYS_ISA_R2I_DS_B_ID */ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, IPU6SE_FW_PSYS_LB_VMEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, }, { /* IPU6SE_FW_PSYS_ISA_AWB_ID */ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, IPU6SE_FW_PSYS_LB_VMEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, }, { /* IPU6SE_FW_PSYS_ISA_AE_ID */ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, IPU6SE_FW_PSYS_LB_VMEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, }, { /* IPU6SE_FW_PSYS_ISA_AF_ID */ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, IPU6SE_FW_PSYS_LB_VMEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, }, { /* IPU6SE_FW_PSYS_ISA_PAF_ID */ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID, IPU6SE_FW_PSYS_LB_VMEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, IPU6SE_FW_PSYS_N_MEM_ID, } }; static const struct ipu_fw_resource_definitions ipu6se_defs = { .cells = ipu6se_fw_psys_cell_types, .num_cells = IPU6SE_FW_PSYS_N_CELL_ID, .num_cells_type = IPU6SE_FW_PSYS_N_CELL_TYPE_ID, .dev_channels = ipu6se_fw_num_dev_channels, .num_dev_channels = IPU6SE_FW_PSYS_N_DEV_CHN_ID, .num_ext_mem_types = IPU6SE_FW_PSYS_N_DATA_MEM_TYPE_ID, .num_ext_mem_ids = IPU6SE_FW_PSYS_N_MEM_ID, .ext_mem_ids = ipu6se_fw_psys_mem_size, .num_dfm_ids = IPU6SE_FW_PSYS_N_DEV_DFM_ID, .dfms = ipu6se_fw_psys_dfms, .cell_mem_row = IPU6SE_FW_PSYS_N_MEM_TYPE_ID, .cell_mem = &ipu6se_fw_psys_c_mem[0][0], }; const struct ipu_fw_resource_definitions *ipu6se_res_defs = &ipu6se_defs; ipu6se-platform-resources.h000066400000000000000000000060521471702545500323570ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/drivers/media/pci/intel/ipu6/psys/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2018 - 2024 Intel Corporation */ #ifndef IPU6SE_PLATFORM_RESOURCES_H #define IPU6SE_PLATFORM_RESOURCES_H #include #include #include "ipu-platform-resources.h" #define IPU6SE_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_EXT_STRUCT 1 enum { IPU6SE_FW_PSYS_CMD_QUEUE_COMMAND_ID = 0, IPU6SE_FW_PSYS_CMD_QUEUE_DEVICE_ID, IPU6SE_FW_PSYS_CMD_QUEUE_PPG0_COMMAND_ID, IPU6SE_FW_PSYS_CMD_QUEUE_PPG1_COMMAND_ID, IPU6SE_FW_PSYS_CMD_QUEUE_PPG2_COMMAND_ID, IPU6SE_FW_PSYS_CMD_QUEUE_PPG3_COMMAND_ID, IPU6SE_FW_PSYS_CMD_QUEUE_PPG4_COMMAND_ID, IPU6SE_FW_PSYS_CMD_QUEUE_PPG5_COMMAND_ID, IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID }; enum { IPU6SE_FW_PSYS_TRANSFER_VMEM0_TYPE_ID = 0, IPU6SE_FW_PSYS_LB_VMEM_TYPE_ID, IPU6SE_FW_PSYS_DMEM_TYPE_ID, IPU6SE_FW_PSYS_VMEM_TYPE_ID, IPU6SE_FW_PSYS_BAMEM_TYPE_ID, IPU6SE_FW_PSYS_PMEM_TYPE_ID, IPU6SE_FW_PSYS_N_MEM_TYPE_ID }; enum ipu6se_mem_id { IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID = 0, /* TRANSFER VMEM 0 */ IPU6SE_FW_PSYS_LB_VMEM_ID, /* LB VMEM */ IPU6SE_FW_PSYS_DMEM0_ID, /* SPC0 Dmem */ IPU6SE_FW_PSYS_DMEM1_ID, /* SPP0 Dmem */ IPU6SE_FW_PSYS_N_MEM_ID }; enum { IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT0_ID = 0, IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_READ_ID, IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_ID, IPU6SE_FW_PSYS_DEV_CHN_DMA_ISA_ID, IPU6SE_FW_PSYS_N_DEV_CHN_ID }; enum { IPU6SE_FW_PSYS_SP_CTRL_TYPE_ID = 0, IPU6SE_FW_PSYS_SP_SERVER_TYPE_ID, IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, IPU6SE_FW_PSYS_N_CELL_TYPE_ID }; enum { IPU6SE_FW_PSYS_SP0_ID = 0, IPU6SE_FW_PSYS_ISA_ICA_ID, IPU6SE_FW_PSYS_ISA_LSC_ID, IPU6SE_FW_PSYS_ISA_DPC_ID, IPU6SE_FW_PSYS_ISA_B2B_ID, IPU6SE_FW_PSYS_ISA_BNLM_ID, IPU6SE_FW_PSYS_ISA_DM_ID, IPU6SE_FW_PSYS_ISA_R2I_SIE_ID, IPU6SE_FW_PSYS_ISA_R2I_DS_A_ID, IPU6SE_FW_PSYS_ISA_R2I_DS_B_ID, IPU6SE_FW_PSYS_ISA_AWB_ID, IPU6SE_FW_PSYS_ISA_AE_ID, IPU6SE_FW_PSYS_ISA_AF_ID, IPU6SE_FW_PSYS_ISA_PAF_ID, IPU6SE_FW_PSYS_N_CELL_ID }; enum { IPU6SE_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID = 0, IPU6SE_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID, }; /* Excluding PMEM */ #define IPU6SE_FW_PSYS_N_DATA_MEM_TYPE_ID (IPU6SE_FW_PSYS_N_MEM_TYPE_ID - 1) #define IPU6SE_FW_PSYS_N_DEV_DFM_ID \ (IPU6SE_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID + 1) #define IPU6SE_FW_PSYS_VMEM0_MAX_SIZE 0x0800 /* Transfer VMEM0 words, ref HAS Transfer*/ #define IPU6SE_FW_PSYS_TRANSFER_VMEM0_MAX_SIZE 0x0800 #define IPU6SE_FW_PSYS_LB_VMEM_MAX_SIZE 0x0400 /* LB VMEM words */ #define IPU6SE_FW_PSYS_DMEM0_MAX_SIZE 0x4000 #define IPU6SE_FW_PSYS_DMEM1_MAX_SIZE 0x1000 #define IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT0_MAX_SIZE 22 #define IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_READ_MAX_SIZE 22 #define IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE 22 #define IPU6SE_FW_PSYS_DEV_CHN_DMA_IPFD_MAX_SIZE 0 #define IPU6SE_FW_PSYS_DEV_CHN_DMA_ISA_MAX_SIZE 2 #define IPU6SE_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID_MAX_SIZE 32 #define IPU6SE_FW_PSYS_DEV_DFM_LB_FULL_PORT_ID_MAX_SIZE 32 #define IPU6SE_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID_MAX_SIZE 32 #define IPU6SE_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID_MAX_SIZE 32 #endif /* IPU6SE_PLATFORM_RESOURCES_H */ ipu6-drivers-0~git202411190607.0ad49882/include/000077500000000000000000000000001471702545500203345ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/include/media/000077500000000000000000000000001471702545500214135ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/include/media/ipu-isys.h000066400000000000000000000020571471702545500233520ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2014 - 2022 Intel Corporation */ #ifndef MEDIA_IPU_H #define MEDIA_IPU_H #include #include #include #include #define IPU_ISYS_MAX_CSI2_LANES 4 struct ipu_isys_csi2_config { unsigned int nlanes; unsigned int port; }; struct ipu_isys_subdev_i2c_info { struct i2c_board_info board_info; int i2c_adapter_id; char i2c_adapter_bdf[32]; }; struct ipu_isys_subdev_info { struct ipu_isys_csi2_config *csi2; struct ipu_isys_subdev_i2c_info i2c; }; struct ipu_isys_clk_mapping { struct clk_lookup clkdev_data; char *platform_clock_name; }; struct ipu_isys_subdev_pdata { struct ipu_isys_subdev_info **subdevs; struct ipu_isys_clk_mapping *clk_map; }; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0) struct sensor_async_subdev { struct v4l2_async_subdev asd; struct ipu_isys_csi2_config csi2; }; #else struct sensor_async_sd { struct v4l2_async_connection asc; struct ipu_isys_csi2_config csi2; }; #endif #endif /* MEDIA_IPU_H */ ipu6-drivers-0~git202411190607.0ad49882/include/uapi/000077500000000000000000000000001471702545500212725ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/include/uapi/linux/000077500000000000000000000000001471702545500224315ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/include/uapi/linux/ipu-isys.h000066400000000000000000000007411471702545500243660ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* Copyright (C) 2016 - 2020 Intel Corporation */ #ifndef UAPI_LINUX_IPU_ISYS_H #define UAPI_LINUX_IPU_ISYS_H #define V4L2_CID_IPU_BASE (V4L2_CID_USER_BASE + 0x1080) #define V4L2_CID_IPU_STORE_CSI2_HEADER (V4L2_CID_IPU_BASE + 2) #define V4L2_CID_IPU_ISYS_COMPRESSION (V4L2_CID_IPU_BASE + 3) #define VIDIOC_IPU_GET_DRIVER_VERSION \ _IOWR('v', BASE_VIDIOC_PRIVATE + 3, uint32_t) #endif /* UAPI_LINUX_IPU_ISYS_H */ ipu6-drivers-0~git202411190607.0ad49882/include/uapi/linux/ipu-psys.h000066400000000000000000000071171471702545500244010ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* Copyright (C) 2013 - 2020 Intel Corporation */ #ifndef _UAPI_IPU_PSYS_H #define _UAPI_IPU_PSYS_H #ifdef __KERNEL__ #include #else #include #endif struct ipu_psys_capability { uint32_t version; uint8_t driver[20]; uint32_t pg_count; uint8_t dev_model[32]; uint32_t reserved[17]; } __attribute__ ((packed)); struct ipu_psys_event { uint32_t type; /* IPU_PSYS_EVENT_TYPE_ */ uint64_t user_token; uint64_t issue_id; uint32_t buffer_idx; uint32_t error; int32_t reserved[2]; } __attribute__ ((packed)); #define IPU_PSYS_EVENT_TYPE_CMD_COMPLETE 1 #define IPU_PSYS_EVENT_TYPE_BUFFER_COMPLETE 2 /** * struct ipu_psys_buffer - for input/output terminals * @len: total allocated size @ base address * @userptr: user pointer * @fd: DMA-BUF handle * @data_offset:offset to valid data * @bytes_used: amount of valid data including offset * @flags: flags */ struct ipu_psys_buffer { uint64_t len; union { int fd; void __user *userptr; uint64_t reserved; } base; uint32_t data_offset; uint32_t bytes_used; uint32_t flags; uint32_t reserved[2]; } __attribute__ ((packed)); #define IPU_BUFFER_FLAG_INPUT (1 << 0) #define IPU_BUFFER_FLAG_OUTPUT (1 << 1) #define IPU_BUFFER_FLAG_MAPPED (1 << 2) #define IPU_BUFFER_FLAG_NO_FLUSH (1 << 3) #define IPU_BUFFER_FLAG_DMA_HANDLE (1 << 4) #define IPU_BUFFER_FLAG_USERPTR (1 << 5) #define IPU_PSYS_CMD_PRIORITY_HIGH 0 #define IPU_PSYS_CMD_PRIORITY_MED 1 #define IPU_PSYS_CMD_PRIORITY_LOW 2 #define IPU_PSYS_CMD_PRIORITY_NUM 3 /** * struct ipu_psys_command - processing command * @issue_id: unique id for the command set by user * @user_token: token of the command * @priority: priority of the command * @pg_manifest: userspace pointer to program group manifest * @buffers: userspace pointers to array of psys dma buf structs * @pg: process group DMA-BUF handle * @pg_manifest_size: size of program group manifest * @bufcount: number of buffers in buffers array * @min_psys_freq: minimum psys frequency in MHz used for this cmd * @frame_counter: counter of current frame synced between isys and psys * @kernel_enable_bitmap: enable bits for each individual kernel * @terminal_enable_bitmap: enable bits for each individual terminals * @routing_enable_bitmap: enable bits for each individual routing * @rbm: enable bits for routing * * Specifies a processing command with input and output buffers. */ struct ipu_psys_command { uint64_t issue_id; uint64_t user_token; uint32_t priority; void __user *pg_manifest; struct ipu_psys_buffer __user *buffers; int pg; uint32_t pg_manifest_size; uint32_t bufcount; uint32_t min_psys_freq; uint32_t frame_counter; uint32_t kernel_enable_bitmap[4]; uint32_t terminal_enable_bitmap[4]; uint32_t routing_enable_bitmap[4]; uint32_t rbm[5]; uint32_t reserved[2]; } __attribute__ ((packed)); struct ipu_psys_manifest { uint32_t index; uint32_t size; void __user *manifest; uint32_t reserved[5]; } __attribute__ ((packed)); #define IPU_IOC_QUERYCAP _IOR('A', 1, struct ipu_psys_capability) #define IPU_IOC_MAPBUF _IOWR('A', 2, int) #define IPU_IOC_UNMAPBUF _IOWR('A', 3, int) #define IPU_IOC_GETBUF _IOWR('A', 4, struct ipu_psys_buffer) #define IPU_IOC_PUTBUF _IOWR('A', 5, struct ipu_psys_buffer) #define IPU_IOC_QCMD _IOWR('A', 6, struct ipu_psys_command) #define IPU_IOC_DQEVENT _IOWR('A', 7, struct ipu_psys_event) #define IPU_IOC_CMD_CANCEL _IOWR('A', 8, struct ipu_psys_command) #define IPU_IOC_GET_MANIFEST _IOWR('A', 9, struct ipu_psys_manifest) #endif /* _UAPI_IPU_PSYS_H */ ipu6-drivers-0~git202411190607.0ad49882/patch/000077500000000000000000000000001471702545500200105ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patch/0002-iommu-Add-passthrough-for-MTL-IPU.patch000066400000000000000000000050141471702545500274420ustar00rootroot00000000000000diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 52afcdaf7c7f..cc7df6c4e3ee 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -37,6 +37,8 @@ #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB) #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) +#define IS_INTEL_IPU(pdev) ((pdev)->vendor == PCI_VENDOR_ID_INTEL && \ + ((pdev)->device == 0x7d19)) #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e) #define IOAPIC_RANGE_START (0xfee00000) @@ -287,12 +289,14 @@ int intel_iommu_enabled = 0; EXPORT_SYMBOL_GPL(intel_iommu_enabled); static int dmar_map_gfx = 1; +static int dmar_map_ipu = 1; static int intel_iommu_superpage = 1; static int iommu_identity_mapping; static int iommu_skip_te_disable; #define IDENTMAP_GFX 2 #define IDENTMAP_AZALIA 4 +#define IDENTMAP_IPU 8 const struct iommu_ops intel_iommu_ops; @@ -2584,6 +2588,9 @@ static int device_def_domain_type(struct device *dev) if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev)) return IOMMU_DOMAIN_IDENTITY; + + if ((iommu_identity_mapping & IDENTMAP_IPU) && IS_INTEL_IPU(pdev)) + return IOMMU_DOMAIN_IDENTITY; } return 0; @@ -2973,6 +2980,9 @@ static int __init init_dmars(void) if (!dmar_map_gfx) iommu_identity_mapping |= IDENTMAP_GFX; + if (!dmar_map_ipu) + iommu_identity_mapping |= IDENTMAP_IPU; + check_tylersburg_isoch(); ret = si_domain_init(hw_pass_through); @@ -4813,6 +4823,18 @@ static void quirk_iommu_igfx(struct pci_dev *dev) dmar_map_gfx = 0; } +static void quirk_iommu_ipu(struct pci_dev *dev) +{ + if (!IS_INTEL_IPU(dev)) + return; + + if (risky_device(dev)) + return; + + pci_info(dev, "Passthrough IOMMU for integrated Intel IPU\n"); + dmar_map_ipu = 0; +} + /* G4x/GM45 integrated gfx dmar support is totally busted. */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_igfx); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_igfx); @@ -4848,6 +4870,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1632, quirk_iommu_igfx); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163A, quirk_iommu_igfx); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163D, quirk_iommu_igfx); +/* disable IPU dmar support */ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_iommu_ipu); + static void quirk_iommu_rwbf(struct pci_dev *dev) { if (risky_device(dev)) ipu6-drivers-0~git202411190607.0ad49882/patch/gc5035-on-adlm/000077500000000000000000000000001471702545500222435ustar00rootroot000000000000000001-Add-the-camera-sensor-gc5035-to-support-ADL-M.patch000066400000000000000000001566721471702545500334350ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patch/gc5035-on-adlmFrom 0f5a529e1843d937e26c3303686418b3252f8c80 Mon Sep 17 00:00:00 2001 From: "liang.wang" Date: Tue, 18 Apr 2023 11:56:16 +0800 Subject: [PATCH] Add the camera sensor gc5035 to support ADL-M. The sensor driver was updated to support several features: 1. Add acpi to support x86 platform. 2. Add some resolutions/control and more regs specifically for ADL-M. 3. Add gpio pins supported by int3272 and power on the sensor. 4. v4l2 support. 5. Adapt to intel IPU6-drivers, like 3A. The sensor driver gc5035 is modified based on the following link: https://patchwork.kernel.org/project/linux-media/patch/20200902224813.14283-4-tfiga@chromium.org Regarding to kernel update, don't forget to add the new sensor support in .config Signed-off-by: liang.wang --- drivers/media/i2c/Kconfig | 13 + drivers/media/i2c/Makefile | 1 + drivers/media/i2c/gc5035.c | 2201 +++++++++++++++++ drivers/platform/x86/intel/int3472/common.h | 3 +- drivers/platform/x86/intel/int3472/discrete.c | 56 +- 5 files changed, 2268 insertions(+), 6 deletions(-) create mode 100644 drivers/media/i2c/gc5035.c diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig index 7806d4b81716..9f7ca594d32c 100644 --- a/drivers/media/i2c/Kconfig +++ b/drivers/media/i2c/Kconfig @@ -47,6 +47,19 @@ config VIDEO_AR0521 To compile this driver as a module, choose M here: the module will be called ar0521. +config VIDEO_GC5035 + tristate "Galaxycore GC5035 sensor support" + depends on I2C && VIDEO_DEV + select MEDIA_CONTROLLER + select V4L2_FWNODE + select VIDEO_V4L2_SUBDEV_API + help + This is a Video4Linux2 sensor driver for the Galaxycore + GC5035 imaging sensor.. + + To compile this driver as a module, choose M here: the + module will be called gc5035. + config VIDEO_HI556 tristate "Hynix Hi-556 sensor support" depends on I2C && VIDEO_DEV diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile index 0a2933103dd9..22f37eb2eaf9 100644 --- a/drivers/media/i2c/Makefile +++ b/drivers/media/i2c/Makefile @@ -102,6 +102,7 @@ obj-$(CONFIG_VIDEO_OV9734) += ov9734.o obj-$(CONFIG_VIDEO_RDACM20) += rdacm20.o obj-$(CONFIG_VIDEO_RDACM21) += rdacm21.o obj-$(CONFIG_VIDEO_RJ54N1) += rj54n1cb0c.o +obj-$(CONFIG_VIDEO_GC5035) += gc5035.o obj-$(CONFIG_VIDEO_S5C73M3) += s5c73m3/ obj-$(CONFIG_VIDEO_S5K4ECGX) += s5k4ecgx.o obj-$(CONFIG_VIDEO_S5K5BAF) += s5k5baf.o diff --git a/drivers/media/i2c/gc5035.c b/drivers/media/i2c/gc5035.c new file mode 100644 index 000000000000..b46fd5474b2a --- /dev/null +++ b/drivers/media/i2c/gc5035.c @@ -0,0 +1,2201 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020 Bitland Inc. +// Copyright 2020 Google LLC.. +// Copyright (c) 2022 Intel Corporation. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* External clock frequency supported by the driver */ +#define GC5035_MCLK_RATE 24000000UL +/* Number of lanes supported by this driver */ +#define GC5035_DATA_LANES 2 +/* Bits per sample of sensor output */ +#define GC5035_BITS_PER_SAMPLE 10 + +#define MIPI_FREQ 438000000LL + +/* pixel rate = link frequency * 2 * lanes / BITS_PER_SAMPLE */ +#define GC5035_PIXEL_RATE (MIPI_FREQ * 2LL * 2LL / 10) + +/* System registers (accessible regardless of the page. */ + +/* Chip ID */ +#define GC5035_REG_CHIP_ID_H 0xf0 +#define GC5035_REG_CHIP_ID_L 0xf1 +#define GC5035_CHIP_ID 0x5035 +#define GC5035_ID(_msb, _lsb) ((_msb) << 8 | (_lsb)) + +/* Register page selection register */ +#define GC5035_PAGE_REG 0xfe + +/* Page 0 registers */ + +/* Exposure control */ +#define GC5035_REG_EXPOSURE_H 0x03 +#define GC5035_REG_EXPOSURE_L 0x04 +#define GC5035_EXPOSURE_H_MASK 0x3f +#define GC5035_EXPOSURE_MIN 4 +#define GC5035_EXPOSURE_STEP 1 + +/* Analog gain control */ +#define GC5035_REG_ANALOG_GAIN 0xb6 +#define GC5035_ANALOG_GAIN_MIN 256 +#define GC5035_ANALOG_GAIN_MAX (16 * GC5035_ANALOG_GAIN_MIN) +#define GC5035_ANALOG_GAIN_STEP 1 +#define GC5035_ANALOG_GAIN_DEFAULT GC5035_ANALOG_GAIN_MIN + +/* Digital gain control */ +#define GC5035_REG_DIGI_GAIN_H 0xb1 +#define GC5035_REG_DIGI_GAIN_L 0xb2 +#define GC5035_DGAIN_H_MASK 0x0f +#define GC5035_DGAIN_L_MASK 0xfc +#define GC5035_DGAIN_L_SHIFT 2 +#define GC5035_DIGI_GAIN_MIN 0 +#define GC5035_DIGI_GAIN_MAX 1023 +#define GC5035_DIGI_GAIN_STEP 1 +#define GC5035_DIGI_GAIN_DEFAULT GC5035_DIGI_GAIN_MAX + +/* Vblank control */ +#define GC5035_REG_VTS_H 0x41 +#define GC5035_REG_VTS_L 0x42 +#define GC5035_VTS_H_MASK 0x3f +#define GC5035_VTS_MAX 16383 +#define GC5035_EXPOSURE_MARGIN 16 + +#define GC5035_REG_CTRL_MODE 0x3e +#define GC5035_MODE_SW_STANDBY 0x01 +#define GC5035_MODE_STREAMING 0x91 + +/* Page 1 registers */ + +/* Test pattern control */ +#define GC5035_REG_TEST_PATTERN 0x8c +#define GC5035_TEST_PATTERN_ENABLE 0x11 +#define GC5035_TEST_PATTERN_DISABLE 0x10 + +/* Page 2 registers */ + +/* OTP access registers */ +#define GC5035_REG_OTP_MODE 0xf3 +#define GC5035_OTP_PRE_READ 0x20 +#define GC5035_OTP_READ_MODE 0x12 +#define GC5035_OTP_READ_DONE 0x00 +#define GC5035_REG_OTP_DATA 0x6c +#define GC5035_REG_OTP_ACCESS_ADDR_H 0x69 +#define GC5035_REG_OTP_ACCESS_ADDR_L 0x6a +#define GC5035_OTP_ACCESS_ADDR_H_MASK 0x1f +#define GC5035_OTP_ADDR_MASK 0x1fff +#define GC5035_OTP_ADDR_SHIFT 3 +#define GC5035_REG_DD_TOTALNUM_H 0x01 +#define GC5035_REG_DD_TOTALNUM_L 0x02 +#define GC5035_DD_TOTALNUM_H_MASK 0x07 +#define GC5035_REG_DD_LOAD_STATUS 0x06 +#define GC5035_OTP_BIT_LOAD BIT(0) + +/* OTP-related definitions */ + +#define GC5035_OTP_ID_SIZE 9 +#define GC5035_OTP_ID_DATA_OFFSET 0x0020 +#define GC5035_OTP_DATA_LENGTH 1024 + +/* OTP DPC parameters */ +#define GC5035_OTP_DPC_FLAG_OFFSET 0x0068 +#define GC5035_OTP_DPC_FLAG_MASK 0x03 +#define GC5035_OTP_FLAG_EMPTY 0x00 +#define GC5035_OTP_FLAG_VALID 0x01 +#define GC5035_OTP_DPC_TOTAL_NUMBER_OFFSET 0x0070 +#define GC5035_OTP_DPC_ERROR_NUMBER_OFFSET 0x0078 + +/* OTP register parameters */ +#define GC5035_OTP_REG_FLAG_OFFSET 0x0880 +#define GC5035_OTP_REG_DATA_OFFSET 0x0888 +#define GC5035_OTP_REG_ADDR_OFFSET 1 +#define GC5035_OTP_REG_VAL_OFFSET 2 +#define GC5035_OTP_PAGE_FLAG_OFFSET 3 +#define GC5035_OTP_PER_PAGE_SIZE 4 +#define GC5035_OTP_REG_PAGE_MASK 0x07 +#define GC5035_OTP_REG_MAX_GROUP 5 +#define GC5035_OTP_REG_BYTE_PER_GROUP 5 +#define GC5035_OTP_REG_PER_GROUP 2 +#define GC5035_OTP_REG_BYTE_PER_REG 2 +#define GC5035_OTP_REG_DATA_SIZE 25 +#define GC5035_OTP_REG_SIZE 10 + +#define GC5035_DD_DELAY_US (10 * 1000) +#define GC5035_DD_TIMEOUT_US (100 * 1000) + +/* The clock source index in INT3472 CLDB */ +#define INT3472_CLDB_CLKSRC_INDEX 14 + +/* + * 82c0d13a-78c5-4244-9bb1-eb8b539a8d11 + * This _DSM GUID calls CLKC and CLKF. + */ +static const guid_t clock_ctrl_guid = + GUID_INIT(0x82c0d13a, 0x78c5, 0x4244, + 0x9b, 0xb1, 0xeb, 0x8b, 0x53, 0x9a, 0x8d, 0x11); + +static const char * const gc5035_supplies[] = { + /* + * Requested separately due to power sequencing needs: + * "iovdd", * Power supply for I/O circuits * + */ + "dvdd12", /* Digital core power */ + "avdd21", /* Analog power */ +}; + +struct gc5035_regval { + u8 addr; + u8 val; +}; + +struct gc5035_reg { + u8 page; + struct gc5035_regval regval; +}; + +struct gc5035_otp_regs { + unsigned int num_regs; + struct gc5035_reg regs[GC5035_OTP_REG_SIZE]; +}; + +struct gc5035_dpc { + bool valid; + unsigned int total_num; +}; + +struct gc5035_mode { + u32 width; + u32 height; + u32 max_fps; + u32 hts_def; + u32 vts_def; + u32 exp_def; + const struct gc5035_regval *reg_list; + size_t num_regs; +}; + + +struct gc5035_power_ctrl { + /* Control Logic ACPI device */ + struct acpi_device *ctrl_logic; + /* GPIO for reset */ + struct gpio_desc *reset_gpio; + /* GPIO for power enable */ + struct gpio_desc *pwren_gpio; + /* GPIO for privacy LED */ + struct gpio_desc *pled_gpio; + + int status; + /* Clock source index */ + u8 clk_source_index; +}; + +struct gc5035 { + struct i2c_client *client; + struct clk *mclk; + unsigned long mclk_rate; + //struct gpio_desc *resetb_gpio; + ///struct gpio_desc *pwdn_gpio; + struct regulator *iovdd_supply; + struct regulator_bulk_data supplies[ARRAY_SIZE(gc5035_supplies)]; + + struct v4l2_subdev subdev; + struct media_pad pad; + struct v4l2_ctrl_handler ctrl_handler; + struct v4l2_ctrl *exposure; + struct v4l2_ctrl *hblank; + struct v4l2_ctrl *vblank; + + u32 Dgain_ratio; + bool otp_read; + u8 otp_id[GC5035_OTP_ID_SIZE]; + struct gc5035_dpc dpc; + struct gc5035_otp_regs otp_regs; + + /* + * Serialize control access, get/set format, get selection + * and start streaming. + */ + struct mutex mutex; + struct gc5035_power_ctrl power; + bool streaming; + const struct gc5035_mode *cur_mode; +}; + +static inline struct gc5035 *to_gc5035(struct v4l2_subdev *sd) +{ + return container_of(sd, struct gc5035, subdev); +} + +static const struct gc5035_regval gc5035_otp_init_regs[] = { + {0xfc, 0x01}, + {0xf4, 0x40}, + {0xf5, 0xe9}, + {0xf6, 0x14}, + {0xf8, 0x49}, + {0xf9, 0x82}, + {0xfa, 0x00}, + {0xfc, 0x81}, + {0xfe, 0x00}, + {0x36, 0x01}, + {0xd3, 0x87}, + {0x36, 0x00}, + {0x33, 0x00}, + {0xf7, 0x01}, + {0xfc, 0x8e}, + {0xfe, 0x00}, + {0xee, 0x30}, + {0xfa, 0x10}, + {0xf5, 0xe9}, + {0xfe, 0x02}, + {0x67, 0xc0}, + {0x59, 0x3f}, + {0x55, 0x84}, + {0x65, 0x80}, + {0x66, 0x03}, + {0xfe, 0x00}, +}; + +static const struct gc5035_regval gc5035_otp_exit_regs[] = { + {0xfe, 0x02}, + {0x67, 0x00}, + {0xfe, 0x00}, + {0xfa, 0x00}, +}; + +static const struct gc5035_regval gc5035_dd_auto_load_regs[] = { + {0xfe, 0x02}, + {0xbe, 0x00}, + {0xa9, 0x01}, + {0x09, 0x33}, +}; + +static const struct gc5035_regval gc5035_otp_dd_regs[] = { + {0x03, 0x00}, + {0x04, 0x80}, + {0x95, 0x0a}, + {0x96, 0x30}, + {0x97, 0x0a}, + {0x98, 0x32}, + {0x99, 0x07}, + {0x9a, 0xa9}, + {0xf3, 0x80}, +}; + +static const struct gc5035_regval gc5035_otp_dd_enable_regs[] = { + {0xbe, 0x01}, + {0x09, 0x00}, + {0xfe, 0x01}, + {0x80, 0x02}, + {0xfe, 0x00}, +}; + +/* + * Xclk 24Mhz + * Pclk 87.6Mhz + * grabwindow_width 2592 + * grabwindow_height 1944 + * max_framerate 30fps + * mipi_datarate per lane 876Mbps + */ +static const struct gc5035_regval gc5035_global_regs[] = { + /*init*/ + {0xfc, 0x01}, + {0xf4, 0x40}, + {0xf5, 0xe9}, + {0xf6, 0x14}, + {0xf8, 0x49}, + {0xf9, 0x82}, + {0xfa, 0x00}, + {0xfc, 0x81}, + {0xfe, 0x00}, + {0x36, 0x01}, + {0xd3, 0x87}, + {0x36, 0x00}, + {0x33, 0x00}, + {0xfe, 0x03}, + {0x01, 0xe7}, + {0xf7, 0x01}, + {0xfc, 0x8f}, + {0xfc, 0x8f}, + {0xfc, 0x8e}, + {0xfe, 0x00}, + {0xee, 0x30}, + {0x87, 0x18}, + {0xfe, 0x01}, + {0x8c, 0x90}, + {0xfe, 0x00}, + + /* Analog & CISCTL */ + {0xfe, 0x00}, + {0x05, 0x02}, + {0x06, 0xda}, + {0x9d, 0x0c}, + {0x09, 0x00}, + {0x0a, 0x04}, + {0x0b, 0x00}, + {0x0c, 0x03}, + {0x0d, 0x07}, + {0x0e, 0xa8}, + {0x0f, 0x0a}, + {0x10, 0x30}, + {0x11, 0x02}, + {0x17, 0x80}, + {0x19, 0x05}, + {0xfe, 0x02}, + {0x30, 0x03}, + {0x31, 0x03}, + {0xfe, 0x00}, + {0xd9, 0xc0}, + {0x1b, 0x20}, + {0x21, 0x48}, + {0x28, 0x22}, + {0x29, 0x58}, + {0x44, 0x20}, + {0x4b, 0x10}, + {0x4e, 0x1a}, + {0x50, 0x11}, + {0x52, 0x33}, + {0x53, 0x44}, + {0x55, 0x10}, + {0x5b, 0x11}, + {0xc5, 0x02}, + {0x8c, 0x1a}, + {0xfe, 0x02}, + {0x33, 0x05}, + {0x32, 0x38}, + {0xfe, 0x00}, + {0x91, 0x80}, + {0x92, 0x28}, + {0x93, 0x20}, + {0x95, 0xa0}, + {0x96, 0xe0}, + {0xd5, 0xfc}, + {0x97, 0x28}, + {0x16, 0x0c}, + {0x1a, 0x1a}, + {0x1f, 0x11}, + {0x20, 0x10}, + {0x46, 0x83}, + {0x4a, 0x04}, + {0x54, 0x02}, + {0x62, 0x00}, + {0x72, 0x8f}, + {0x73, 0x89}, + {0x7a, 0x05}, + {0x7d, 0xcc}, + {0x90, 0x00}, + {0xce, 0x18}, + {0xd0, 0xb2}, + {0xd2, 0x40}, + {0xe6, 0xe0}, + {0xfe, 0x02}, + {0x12, 0x01}, + {0x13, 0x01}, + {0x14, 0x01}, + {0x15, 0x02}, + {0x22, 0x7c}, + {0x91, 0x00}, + {0x92, 0x00}, + {0x93, 0x00}, + {0x94, 0x00}, + {0xfe, 0x00}, + {0xfc, 0x88}, + {0xfe, 0x10}, + {0xfe, 0x00}, + {0xfc, 0x8e}, + {0xfe, 0x00}, + {0xfe, 0x00}, + {0xfe, 0x00}, + {0xfc, 0x88}, + {0xfe, 0x10}, + {0xfe, 0x00}, + {0xfc, 0x8e}, + + /* Gain */ + {0xfe, 0x00}, + {0xb0, 0x6e}, + {0xb1, 0x01}, + {0xb2, 0x00}, + {0xb3, 0x00}, + {0xb4, 0x00}, + {0xb6, 0x00}, + + /* ISP */ + {0xfe, 0x01}, + {0x53, 0x00}, + {0x89, 0x03}, + {0x60, 0x40}, + + /* BLK */ + {0xfe, 0x01}, + {0x42, 0x21}, + {0x49, 0x03}, + {0x4a, 0xff}, + {0x4b, 0xc0}, + {0x55, 0x00}, + + /* Anti_blooming */ + {0xfe, 0x01}, + {0x41, 0x28}, + {0x4c, 0x00}, + {0x4d, 0x00}, + {0x4e, 0x3c}, + {0x44, 0x08}, + {0x48, 0x02}, + + /* Crop */ + {0xfe, 0x01}, + {0x91, 0x00}, + {0x92, 0x08}, + {0x93, 0x00}, + {0x94, 0x07}, + {0x95, 0x07}, + {0x96, 0x98}, + {0x97, 0x0a}, + {0x98, 0x20}, + {0x99, 0x00}, + + /* MIPI */ + {0xfe, 0x03}, + {0x02, 0x57}, + {0x03, 0xb7}, + {0x15, 0x14}, + {0x18, 0x0f}, + {0x21, 0x22}, + {0x22, 0x06}, + {0x23, 0x48}, + {0x24, 0x12}, + {0x25, 0x28}, + {0x26, 0x08}, + {0x29, 0x06}, + {0x2a, 0x58}, + {0x2b, 0x08}, + {0xfe, 0x01}, + {0x8c, 0x10}, + + {0xfe, 0x00}, + {0x3e, 0x01}, +}; + +/* + * Xclk 24Mhz + * Pclk 87.6Mhz + * grabwindow_width 2592 + * grabwindow_height 1944 + * max_framerate 30fps + * mipi_datarate per lane 876Mbps + */ +static const struct gc5035_regval gc5035_2592x1944_regs[] = { + /* System */ + {0xfe, 0x00}, + {0x3e, 0x01}, + {0xfc, 0x01}, + {0xf4, 0x40}, + {0xf5, 0xe9}, + {0xf6, 0x14}, + {0xf8, 0x58}, + {0xf9, 0x82}, + {0xfa, 0x00}, + {0xfc, 0x81}, + {0xfe, 0x00}, + {0x36, 0x01}, + {0xd3, 0x87}, + {0x36, 0x00}, + {0x33, 0x00}, + {0xfe, 0x03}, + {0x01, 0xe7}, + {0xf7, 0x01}, + {0xfc, 0x8f}, + {0xfc, 0x8f}, + {0xfc, 0x8e}, + {0xfe, 0x00}, + {0xee, 0x30}, + {0x87, 0x18}, + {0xfe, 0x01}, + {0x8c, 0x90}, + {0xfe, 0x00}, + /*Analog & CISCTL*/ + {0x03, 0x03},//0x06 + {0x04, 0xd8}, + {0x41, 0x07},//08 + {0x42, 0xd8},//58 + {0x05, 0x02}, + {0x06, 0xda}, + {0x9d, 0x18}, + {0x09, 0x00}, + {0x0a, 0x04}, + {0x0b, 0x00}, + {0x0c, 0x03}, + {0x0d, 0x07}, + {0x0e, 0xa8}, + {0x0f, 0x0a}, + {0x10, 0x30}, + {0x11, 0x02}, + {0x17, 0x80}, + {0x19, 0x05}, + {0xfe, 0x02}, + {0x30, 0x03}, + {0x31, 0x03}, + {0xfe, 0x00}, + {0xd9, 0xc0}, + {0x1b, 0x20}, + {0x21, 0x40}, + {0x28, 0x22}, + {0x29, 0x56}, + {0x44, 0x20}, + {0x4b, 0x10}, + {0x4e, 0x1a}, + {0x50, 0x11}, + {0x52, 0x33}, + {0x53, 0x44}, + {0x55, 0x10}, + {0x5b, 0x11}, + {0xc5, 0x02}, + {0x8c, 0x1a}, + {0xfe, 0x02}, + {0x33, 0x05}, + {0x32, 0x38}, + {0xfe, 0x00}, + {0x91, 0x80}, + {0x92, 0x28}, + {0x93, 0x20}, + {0x95, 0xa0}, + {0x96, 0xe0}, + {0xd5, 0xfc}, + {0x97, 0x28}, + {0x16, 0x0c}, + {0x1a, 0x1a}, + {0x1f, 0x11}, + {0x20, 0x10}, + {0x46, 0x83}, + {0x4a, 0x04}, + {0x54, 0x02}, + {0x62, 0x00}, + {0x72, 0x8f}, + {0x73, 0x89}, + {0x7a, 0x05}, + {0x7d, 0xcc}, + {0x90, 0x00}, + {0xce, 0x18}, + {0xd0, 0xb2}, + {0xd2, 0x40}, + {0xe6, 0xe0}, + {0xfe, 0x02}, + {0x12, 0x01}, + {0x13, 0x01}, + {0x14, 0x01}, + {0x15, 0x02}, + {0x22, 0x7c}, + {0xfe, 0x00}, + {0xfc, 0x88}, + {0xfe, 0x10}, + {0xfe, 0x00}, + {0xfc, 0x8e}, + {0xfe, 0x00}, + {0xfe, 0x00}, + {0xfe, 0x00}, + {0xfc, 0x88}, + {0xfe, 0x10}, + {0xfe, 0x00}, + {0xfc, 0x8e}, + /*GAIN*/ + {0xfe, 0x00}, + {0xb0, 0x6e}, + {0xb1, 0x01}, + {0xb2, 0x00}, + {0xb3, 0x00}, + {0xb4, 0x00}, + {0xb6, 0x00}, + /*ISP*/ + {0xfe, 0x01}, + {0x53, 0x00}, + {0x89, 0x03}, + {0x60, 0x40}, + /*BLK*/ + {0xfe, 0x01}, + {0x42, 0x21}, + {0x49, 0x03}, + {0x4a, 0xff}, + {0x4b, 0xc0}, + {0x55, 0x00}, + /*anti_blooming*/ + {0xfe, 0x01}, + {0x41, 0x28}, + {0x4c, 0x00}, + {0x4d, 0x00}, + {0x4e, 0x3c}, + {0x44, 0x08}, + {0x48, 0x02}, + /*CROP*/ + {0xfe, 0x01}, + {0x91, 0x00}, + {0x92, 0x08}, + {0x93, 0x00}, + {0x94, 0x08}, + {0x95, 0x07}, + {0x96, 0x98}, + {0x97, 0x0a}, + {0x98, 0x20}, + {0x99, 0x00}, + /*MIPI*/ + {0xfe, 0x03}, + {0x02, 0x57}, + {0x03, 0xb7}, + {0x15, 0x14}, + {0x18, 0x0f}, + {0x21, 0x22}, + {0x22, 0x06}, + {0x23, 0x48}, + {0x24, 0x12}, + {0x25, 0x28}, + {0x26, 0x08}, + {0x29, 0x06}, + {0x2a, 0x58}, + {0x2b, 0x08}, + {0xfe, 0x01}, + {0x8c, 0x10}, + {0xfe, 0x00}, + {0x3e, 0x01}, +}; + +/* + * Xclk 24Mhz + * Pclk 87.6Mhz + * grabwindow_width 1296 + * grabwindow_height 972 + * mipi_datarate per lane 876Mbps + */ +static const struct gc5035_regval gc5035_1296x972_regs[] = { + /*NULL*/ + {0xfe, 0x00}, + {0x3e, 0x01}, + {0xfc, 0x01}, + {0xf4, 0x40}, + {0xf5, 0xe4}, + {0xf6, 0x14}, + {0xf8, 0x49}, + {0xf9, 0x12}, + {0xfa, 0x01}, + {0xfc, 0x81}, + {0xfe, 0x00}, + {0x36, 0x01}, + {0xd3, 0x87}, + {0x36, 0x00}, + {0x33, 0x20}, + {0xfe, 0x03}, + {0x01, 0x87}, + {0xf7, 0x11}, + {0xfc, 0x8f}, + {0xfc, 0x8f}, + {0xfc, 0x8e}, + {0xfe, 0x00}, + {0xee, 0x30}, + {0x87, 0x18}, + {0xfe, 0x01}, + {0x8c, 0x90}, + {0xfe, 0x00}, + + /* Analog & CISCTL */ + {0xfe, 0x00}, + {0x05, 0x02}, + {0x06, 0xda}, + {0x9d, 0x0c}, + {0x09, 0x00}, + {0x0a, 0x04}, + {0x0b, 0x00}, + {0x0c, 0x03}, + {0x0d, 0x07}, + {0x0e, 0xa8}, + {0x0f, 0x0a}, + {0x10, 0x30}, + {0x21, 0x60}, + {0x29, 0x30}, + {0x44, 0x18}, + {0x4e, 0x20}, + {0x8c, 0x20}, + {0x91, 0x15}, + {0x92, 0x3a}, + {0x93, 0x20}, + {0x95, 0x45}, + {0x96, 0x35}, + {0xd5, 0xf0}, + {0x97, 0x20}, + {0x1f, 0x19}, + {0xce, 0x18}, + {0xd0, 0xb3}, + {0xfe, 0x02}, + {0x14, 0x02}, + {0x15, 0x00}, + {0xfe, 0x00}, + {0xfc, 0x88}, + {0xfe, 0x10}, + {0xfe, 0x00}, + {0xfc, 0x8e}, + {0xfe, 0x00}, + {0xfe, 0x00}, + {0xfe, 0x00}, + {0xfc, 0x88}, + {0xfe, 0x10}, + {0xfe, 0x00}, + {0xfc, 0x8e}, + + /* BLK */ + {0xfe, 0x01}, + {0x49, 0x00}, + {0x4a, 0x01}, + {0x4b, 0xf8}, + + /* Anti_blooming */ + {0xfe, 0x01}, + {0x4e, 0x06}, + {0x44, 0x02}, + + /* Crop */ + {0xfe, 0x01}, + {0x91, 0x00}, + {0x92, 0x04}, + {0x93, 0x00}, + {0x94, 0x03}, + {0x95, 0x03}, + {0x96, 0xcc}, + {0x97, 0x05}, + {0x98, 0x10}, + {0x99, 0x00}, + + /* MIPI */ + {0xfe, 0x03}, + {0x02, 0x58}, + {0x22, 0x03}, + {0x26, 0x06}, + {0x29, 0x03}, + {0x2b, 0x06}, + {0xfe, 0x01}, + {0x8c, 0x10}, +}; + +/* + * Xclk 24Mhz + * Pclk 87.6Mhz + * linelength 672{0x2a0) + * framelength 2232{0x8b8) + * grabwindow_width 1280 + * grabwindow_height 720 + * max_framerate 30fps + * mipi_datarate per lane 876Mbps + */ +static const struct gc5035_regval gc5035_1280x720_regs[] = { + /* System */ + {0xfe, 0x00}, + {0x3e, 0x01}, + {0xfc, 0x01}, + {0xf4, 0x40}, + {0xf5, 0xe4}, + {0xf6, 0x14}, + {0xf8, 0x49}, + {0xf9, 0x12}, + {0xfa, 0x01}, + {0xfc, 0x81}, + {0xfe, 0x00}, + {0x36, 0x01}, + {0xd3, 0x87}, + {0x36, 0x00}, + {0x33, 0x20}, + {0xfe, 0x03}, + {0x01, 0x87}, + {0xf7, 0x11}, + {0xfc, 0x8f}, + {0xfc, 0x8f}, + {0xfc, 0x8e}, + {0xfe, 0x00}, + {0xee, 0x30}, + {0x87, 0x18}, + {0xfe, 0x01}, + {0x8c, 0x90}, + {0xfe, 0x00}, + + /* Analog & CISCTL */ + {0xfe, 0x00}, + {0x05, 0x02}, + {0x06, 0xda}, + {0x9d, 0x0c}, + {0x09, 0x00}, + {0x0a, 0x04}, + {0x0b, 0x00}, + {0x0c, 0x03}, + {0x0d, 0x07}, + {0x0e, 0xa8}, + {0x0f, 0x0a}, + {0x10, 0x30}, + {0x21, 0x60}, + {0x29, 0x30}, + {0x44, 0x18}, + {0x4e, 0x20}, + {0x8c, 0x20}, + {0x91, 0x15}, + {0x92, 0x3a}, + {0x93, 0x20}, + {0x95, 0x45}, + {0x96, 0x35}, + {0xd5, 0xf0}, + {0x97, 0x20}, + {0x1f, 0x19}, + {0xce, 0x18}, + {0xd0, 0xb3}, + {0xfe, 0x02}, + {0x14, 0x02}, + {0x15, 0x00}, + {0xfe, 0x00}, + {0xfc, 0x88}, + {0xfe, 0x10}, + {0xfe, 0x00}, + {0xfc, 0x8e}, + {0xfe, 0x00}, + {0xfe, 0x00}, + {0xfe, 0x00}, + {0xfc, 0x88}, + {0xfe, 0x10}, + {0xfe, 0x00}, + {0xfc, 0x8e}, + + /* BLK */ + {0xfe, 0x01}, + {0x49, 0x00}, + {0x4a, 0x01}, + {0x4b, 0xf8}, + + /* Anti_blooming */ + {0xfe, 0x01}, + {0x4e, 0x06}, + {0x44, 0x02}, + + /* Crop */ + {0xfe, 0x01}, + {0x91, 0x00}, + {0x92, 0x0a}, + {0x93, 0x00}, + {0x94, 0x0b}, + {0x95, 0x02}, + {0x96, 0xd0}, + {0x97, 0x05}, + {0x98, 0x00}, + {0x99, 0x00}, + + /* MIPI */ + {0xfe, 0x03}, + {0x02, 0x58}, + {0x22, 0x03}, + {0x26, 0x06}, + {0x29, 0x03}, + {0x2b, 0x06}, + {0xfe, 0x01}, + {0x8c, 0x10}, + {0xfe, 0x00}, + {0x3e, 0x91}, +}; + +static const struct gc5035_mode gc5035_modes[] = { + { + .width = 2592, + .height = 1944, + .max_fps = 30, + .exp_def = 0x258, + .hts_def = 2920, + .vts_def = 2008, + .reg_list = gc5035_2592x1944_regs, + .num_regs = ARRAY_SIZE(gc5035_2592x1944_regs), + }, + { + .width = 1296, + .height = 972, + .max_fps = 30, + .exp_def = 0x258, + .hts_def = 1460, + .vts_def = 2008, + .reg_list = gc5035_1296x972_regs, + .num_regs = ARRAY_SIZE(gc5035_1296x972_regs), + }, + { + .width = 1280, + .height = 720, + .max_fps = 60, + .exp_def = 0x258, + .hts_def = 1896, + .vts_def = 1536, + .reg_list = gc5035_1280x720_regs, + .num_regs = ARRAY_SIZE(gc5035_1280x720_regs), + }, +}; + +static const char * const gc5035_test_pattern_menu[] = { + "Disabled", + "Color Bar", +}; + +static const s64 gc5035_link_freqs[] = { + 438000000, +}; + +static u64 gc5035_link_to_pixel_rate(u32 f_index) +{ + u64 pixel_rate = gc5035_link_freqs[f_index] * 2 * GC5035_DATA_LANES; + + do_div(pixel_rate, GC5035_BITS_PER_SAMPLE); + + return pixel_rate; +} + +static struct gpio_desc* gc5035_get_gpio(struct gc5035 *gc5035, + const char* name) +{ + struct device *dev = &gc5035->client->dev; + struct gpio_desc* gpio; + int ret; + + gpio = devm_gpiod_get(dev, name, GPIOD_OUT_HIGH); + ret = PTR_ERR_OR_ZERO(gpio); + if (ret < 0) { + gpio = NULL; + dev_warn(dev, "failed to get %s gpio: %d\n", name, ret); + } + + return gpio; +} + +static void gc5035_init_power_ctrl(struct gc5035 *gc5035) +{ + struct gc5035_power_ctrl* power = &gc5035->power; + acpi_handle handle = ACPI_HANDLE(&gc5035->client->dev); + struct acpi_handle_list dep_devices; + acpi_status status; + int i = 0; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *obj; + + power->ctrl_logic = NULL; + if (!acpi_has_method(handle, "_DEP")) + return; + + /* Call _DEP method of OV13B */ + status = acpi_evaluate_reference(handle, "_DEP", NULL, &dep_devices); + if (ACPI_FAILURE(status)) { + acpi_handle_debug(handle, "Failed to evaluate _DEP.\n"); + return; + } + + /* Find INT3472 in _DEP */ + for (i = 0; i < dep_devices.count; i++) { + struct acpi_device *dep_device = NULL; + const char* dep_hid = NULL; + + if (dep_devices.handles[i]) { + #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0) + acpi_bus_get_device(dep_devices.handles[i], &dep_device); + #else + dep_device = acpi_fetch_acpi_dev(dep_devices.handles[i]); + #endif + } + if (dep_device) + dep_hid = acpi_device_hid(dep_device); + if (dep_hid && strcmp("INT3472", dep_hid) == 0) { + power->ctrl_logic = dep_device; + break; + } + } + + /* If we got INT3472 acpi device, read which clock source we'll use */ + if (power->ctrl_logic == NULL) + return; + status = acpi_evaluate_object(power->ctrl_logic->handle, + "CLDB", NULL, &buffer); + if (ACPI_FAILURE(status)) { + dev_warn(&gc5035->client->dev, "Read INT3472 CLDB failed"); + return; + } + + obj = buffer.pointer; + if (!obj) + dev_warn(&gc5035->client->dev, "INT3472 CLDB return NULL"); + if (obj->type != ACPI_TYPE_BUFFER) { + acpi_handle_err(power->ctrl_logic->handle, + "CLDB object is not an ACPI buffer\n"); + kfree(obj); + return; + } + if (obj->buffer.length < INT3472_CLDB_CLKSRC_INDEX + 1) { + acpi_handle_err(power->ctrl_logic->handle, + "The CLDB buffer size is wrong\n"); + kfree(obj); + return; + } + + /* Get the clock source index */ + gc5035->power.clk_source_index = + obj->buffer.pointer[INT3472_CLDB_CLKSRC_INDEX]; + kfree(obj); + + /* Get gpios mapped by INT3472 driver */ + power->reset_gpio = gc5035_get_gpio(gc5035, "reset"); + power->pwren_gpio = gc5035_get_gpio(gc5035, "pwren"); + power->pled_gpio = gc5035_get_gpio(gc5035, "pled"); + power->status = 0; +} + +static void gc5035_set_power(struct gc5035 *gc5035, int on) +{ + struct gc5035_power_ctrl* power = &gc5035->power; + + on = (on ? 1 : 0); + if (on == power->status) + return; + + /* First, set reset pin as low */ + if (power->reset_gpio) { + gpiod_set_value_cansleep(power->reset_gpio, 0); + msleep(5); + } + + /* Use _DSM of INT3472 to enable clock */ + if (power->ctrl_logic) { + u8 clock_args[] = { power->clk_source_index, on, 0x01,}; + union acpi_object clock_ctrl_args = { + .buffer = { + .type = ACPI_TYPE_BUFFER, + .length = 3, + .pointer = clock_args, + }, + }; + acpi_evaluate_dsm(power->ctrl_logic->handle, + &clock_ctrl_guid, 0x00, 0x01, + &clock_ctrl_args); + } + + /* Set power pin and privacy LED pin */ + if (power->pwren_gpio) + gpiod_set_value_cansleep(power->pwren_gpio, on); + if (power->pled_gpio) + gpiod_set_value_cansleep(power->pled_gpio, on); + + /* If we need to power on, set reset pin to high at last */ + if (on && power->reset_gpio) { + gpiod_set_value_cansleep(power->reset_gpio, 1); + msleep(5); + } + power->status = on; +} + +static int gc5035_write_reg(struct gc5035 *gc5035, u8 reg, u8 val) +{ + return i2c_smbus_write_byte_data(gc5035->client, reg, val); +} + +static int gc5035_write_array(struct gc5035 *gc5035, + const struct gc5035_regval *regs, + size_t num_regs) +{ + unsigned int i; + int ret; + + for (i = 0; i < num_regs; i++) { + ret = gc5035_write_reg(gc5035, regs[i].addr, regs[i].val); + if (ret) + return ret; + } + + return 0; +} + +static int gc5035_read_reg(struct gc5035 *gc5035, u8 reg, u8 *val) +{ + int ret; + + ret = i2c_smbus_read_byte_data(gc5035->client, reg); + if (ret < 0) + return ret; + + *val = (unsigned char)ret; + + return 0; +} + +static int gc5035_otp_read_data(struct gc5035 *gc5035, u16 bit_addr, u8 *data, + size_t length) +{ + size_t i; + int ret; + + if (WARN_ON(bit_addr % 8)) + return -EINVAL; + + if (WARN_ON(bit_addr / 8 + length > GC5035_OTP_DATA_LENGTH)) + return -EINVAL; + + ret = gc5035_write_reg(gc5035, GC5035_PAGE_REG, 2); + if (ret) + return ret; + + ret = gc5035_write_reg(gc5035, GC5035_REG_OTP_ACCESS_ADDR_H, + (bit_addr >> 8) & + GC5035_OTP_ACCESS_ADDR_H_MASK); + if (ret) + return ret; + + ret = gc5035_write_reg(gc5035, GC5035_REG_OTP_ACCESS_ADDR_L, + bit_addr & 0xff); + if (ret) + return ret; + + ret = gc5035_write_reg(gc5035, GC5035_REG_OTP_MODE, + GC5035_OTP_PRE_READ); + if (ret) + goto out_read_done; + + ret = gc5035_write_reg(gc5035, GC5035_REG_OTP_MODE, + GC5035_OTP_READ_MODE); + if (ret) + goto out_read_done; + + for (i = 0; i < length; i++) { + ret = gc5035_read_reg(gc5035, GC5035_REG_OTP_DATA, &data[i]); + if (ret) + goto out_read_done; + } + +out_read_done: + gc5035_write_reg(gc5035, GC5035_REG_OTP_MODE, GC5035_OTP_READ_DONE); + + return ret; +} + +static int gc5035_read_otp_regs(struct gc5035 *gc5035) +{ + struct device *dev = &gc5035->client->dev; + struct gc5035_otp_regs *otp_regs = &gc5035->otp_regs; + u8 regs[GC5035_OTP_REG_DATA_SIZE] = {0}; + unsigned int i, j; + u8 flag; + int ret; + + ret = gc5035_otp_read_data(gc5035, GC5035_OTP_REG_FLAG_OFFSET, + &flag, 1); + if (ret) { + dev_err(dev, "failed to read otp reg flag\n"); + return ret; + } + + dev_dbg(dev, "register update flag = 0x%x\n", flag); + + gc5035->otp_regs.num_regs = 0; + if (flag != GC5035_OTP_FLAG_VALID) + return 0; + + ret = gc5035_otp_read_data(gc5035, GC5035_OTP_REG_DATA_OFFSET, + regs, sizeof(regs)); + if (ret) { + dev_err(dev, "failed to read otp reg data\n"); + return ret; + } + + for (i = 0; i < GC5035_OTP_REG_MAX_GROUP; i++) { + unsigned int base_group = i * GC5035_OTP_REG_BYTE_PER_GROUP; + + for (j = 0; j < GC5035_OTP_REG_PER_GROUP; j++) { + struct gc5035_reg *reg; + + if (!(regs[base_group] & + BIT((GC5035_OTP_PER_PAGE_SIZE * j + + GC5035_OTP_PAGE_FLAG_OFFSET)))) + continue; + + reg = &otp_regs->regs[otp_regs->num_regs++]; + reg->page = (regs[base_group] >> + (GC5035_OTP_PER_PAGE_SIZE * j)) & + GC5035_OTP_REG_PAGE_MASK; + reg->regval.addr = regs[base_group + j * + GC5035_OTP_REG_BYTE_PER_REG + + GC5035_OTP_REG_ADDR_OFFSET]; + reg->regval.val = regs[base_group + j * + GC5035_OTP_REG_BYTE_PER_REG + + GC5035_OTP_REG_VAL_OFFSET]; + } + } + + return 0; +} + +static int gc5035_read_dpc(struct gc5035 *gc5035) +{ + struct device *dev = &gc5035->client->dev; + struct gc5035_dpc *dpc = &gc5035->dpc; + u8 dpc_flag = 0; + u8 error_number = 0; + u8 total_number = 0; + int ret; + + ret = gc5035_otp_read_data(gc5035, GC5035_OTP_DPC_FLAG_OFFSET, + &dpc_flag, 1); + if (ret) { + dev_err(dev, "failed to read dpc flag\n"); + return ret; + } + + dev_dbg(dev, "dpc flag = 0x%x\n", dpc_flag); + + dpc->valid = false; + + switch (dpc_flag & GC5035_OTP_DPC_FLAG_MASK) { + case GC5035_OTP_FLAG_EMPTY: + dev_dbg(dev, "dpc info is empty!!\n"); + break; + + case GC5035_OTP_FLAG_VALID: + dev_dbg(dev, "dpc info is valid!\n"); + ret = gc5035_otp_read_data(gc5035, + GC5035_OTP_DPC_TOTAL_NUMBER_OFFSET, + &total_number, 1); + if (ret) { + dev_err(dev, "failed to read dpc total number\n"); + return ret; + } + + ret = gc5035_otp_read_data(gc5035, + GC5035_OTP_DPC_ERROR_NUMBER_OFFSET, + &error_number, 1); + if (ret) { + dev_err(dev, "failed to read dpc error number\n"); + return ret; + } + + dpc->total_num = total_number + error_number; + dpc->valid = true; + dev_dbg(dev, "total_num = %d\n", dpc->total_num); + break; + + default: + break; + } + + return ret; +} + +static int gc5035_otp_read_sensor_info(struct gc5035 *gc5035) +{ + int ret; + + ret = gc5035_read_dpc(gc5035); + if (ret) + return ret; + + return gc5035_read_otp_regs(gc5035); +} + +static int gc5035_check_dd_load_status(struct gc5035 *gc5035) +{ + u8 status; + int ret; + + ret = gc5035_read_reg(gc5035, GC5035_REG_DD_LOAD_STATUS, &status); + if (ret) + return ret; + + if (status & GC5035_OTP_BIT_LOAD) + return status; + else + return 0; +} + +static int gc5035_otp_update_dd(struct gc5035 *gc5035) +{ + struct device *dev = &gc5035->client->dev; + struct gc5035_dpc *dpc = &gc5035->dpc; + int val, ret; + + if (!dpc->valid) { + dev_dbg(dev, "DPC table invalid, not updating DD.\n"); + return 0; + } + + dev_dbg(dev, "DD auto load start\n"); + + ret = gc5035_write_array(gc5035, gc5035_dd_auto_load_regs, + ARRAY_SIZE(gc5035_dd_auto_load_regs)); + if (ret) { + dev_err(dev, "failed to write dd auto load reg\n"); + return ret; + } + + ret = gc5035_write_reg(gc5035, GC5035_REG_DD_TOTALNUM_H, + (dpc->total_num >> 8) & + GC5035_DD_TOTALNUM_H_MASK); + if (ret) + return ret; + + ret = gc5035_write_reg(gc5035, GC5035_REG_DD_TOTALNUM_L, + dpc->total_num & 0xff); + if (ret) + return ret; + + ret = gc5035_write_array(gc5035, gc5035_otp_dd_regs, + ARRAY_SIZE(gc5035_otp_dd_regs)); + if (ret) + return ret; + + /* Wait for DD to finish loading automatically */ + ret = readx_poll_timeout(gc5035_check_dd_load_status, gc5035, + val, val <= 0, GC5035_DD_DELAY_US, + GC5035_DD_TIMEOUT_US); + if (ret < 0) { + dev_err(dev, "DD load timeout\n"); + return -EFAULT; + } + if (val < 0) { + dev_err(dev, "DD load failure\n"); + return val; + } + + ret = gc5035_write_array(gc5035, gc5035_otp_dd_enable_regs, + ARRAY_SIZE(gc5035_otp_dd_enable_regs)); + if (ret) + return ret; + + return 0; +} + +static int gc5035_otp_update_regs(struct gc5035 *gc5035) +{ + struct device *dev = &gc5035->client->dev; + struct gc5035_otp_regs *otp_regs = &gc5035->otp_regs; + unsigned int i; + int ret; + + dev_dbg(dev, "reg count = %d\n", otp_regs->num_regs); + + for (i = 0; i < otp_regs->num_regs; i++) { + ret = gc5035_write_reg(gc5035, GC5035_PAGE_REG, + otp_regs->regs[i].page); + if (ret) + return ret; + + ret = gc5035_write_reg(gc5035, + otp_regs->regs[i].regval.addr, + otp_regs->regs[i].regval.val); + if (ret) + return ret; + } + + return 0; +} + +static int gc5035_otp_update(struct gc5035 *gc5035) +{ + struct device *dev = &gc5035->client->dev; + int ret; + + ret = gc5035_otp_update_dd(gc5035); + if (ret) { + dev_err(dev, "failed to update otp dd\n"); + return ret; + } + + ret = gc5035_otp_update_regs(gc5035); + if (ret) { + dev_err(dev, "failed to update otp regs\n"); + return ret; + } + + return ret; +} + +static int gc5035_set_otp_config(struct gc5035 *gc5035) +{ + struct device *dev = &gc5035->client->dev; + u8 otp_id[GC5035_OTP_ID_SIZE]; + int ret; + + ret = gc5035_write_array(gc5035, gc5035_otp_init_regs, + ARRAY_SIZE(gc5035_otp_init_regs)); + if (ret) { + dev_err(dev, "failed to write otp init reg\n"); + return ret; + } + + ret = gc5035_otp_read_data(gc5035, GC5035_OTP_ID_DATA_OFFSET, + &otp_id[0], GC5035_OTP_ID_SIZE); + if (ret) { + dev_err(dev, "failed to read otp id\n"); + goto out_otp_exit; + } + + if (!gc5035->otp_read || memcmp(gc5035->otp_id, otp_id, sizeof(otp_id))) { + dev_dbg(dev, "reading OTP configuration\n"); + + memset(&gc5035->otp_regs, 0, sizeof(gc5035->otp_regs)); + memset(&gc5035->dpc, 0, sizeof(gc5035->dpc)); + + memcpy(gc5035->otp_id, otp_id, sizeof(gc5035->otp_id)); + + ret = gc5035_otp_read_sensor_info(gc5035); + if (ret < 0) { + dev_err(dev, "failed to read otp info\n"); + goto out_otp_exit; + } + + gc5035->otp_read = true; + } + + ret = gc5035_otp_update(gc5035); + if (ret < 0) + return ret; + +out_otp_exit: + ret = gc5035_write_array(gc5035, gc5035_otp_exit_regs, + ARRAY_SIZE(gc5035_otp_exit_regs)); + if (ret) { + dev_err(dev, "failed to write otp exit reg\n"); + return ret; + } + + return ret; +} + +static int gc5035_set_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_state *sd_state, + struct v4l2_subdev_format *fmt) +{ + struct gc5035 *gc5035 = to_gc5035(sd); + const struct gc5035_mode *mode; + s64 h_blank, vblank_def; + + mode = v4l2_find_nearest_size(gc5035_modes, + ARRAY_SIZE(gc5035_modes), width, + height, fmt->format.width, + fmt->format.height); + + + //fmt->format.code = MEDIA_BUS_FMT_SRGGB10_1X10; + fmt->format.code = MEDIA_BUS_FMT_SGRBG10_1X10; + fmt->format.width = mode->width; + fmt->format.height = mode->height; + fmt->format.field = V4L2_FIELD_NONE; + + mutex_lock(&gc5035->mutex); + if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { + *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format; + } else { + gc5035->cur_mode = mode; + h_blank = mode->hts_def - mode->width; + __v4l2_ctrl_modify_range(gc5035->hblank, h_blank, + h_blank, 1, h_blank); + vblank_def = round_up(mode->vts_def, 4) - mode->height; + __v4l2_ctrl_modify_range(gc5035->vblank, vblank_def, + GC5035_VTS_MAX - mode->height, + 1, vblank_def); + } + mutex_unlock(&gc5035->mutex); + + return 0; +} + +static int gc5035_get_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_state *sd_state, + struct v4l2_subdev_format *fmt) +{ + struct gc5035 *gc5035 = to_gc5035(sd); + const struct gc5035_mode *mode = gc5035->cur_mode; + + mutex_lock(&gc5035->mutex); + if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { + fmt->format = *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad); + } else { + fmt->format.width = mode->width; + fmt->format.height = mode->height; + //fmt->format.code = MEDIA_BUS_FMT_SRGGB10_1X10; + fmt->format.code = MEDIA_BUS_FMT_SGRBG10_1X10; + fmt->format.field = V4L2_FIELD_NONE; + } + mutex_unlock(&gc5035->mutex); + + return 0; +} + +static int gc5035_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_state *sd_state, + struct v4l2_subdev_mbus_code_enum *code) +{ + if (code->index != 0) + return -EINVAL; + + //code->code = MEDIA_BUS_FMT_SRGGB10_1X10; + code->code = MEDIA_BUS_FMT_SGRBG10_1X10; + + return 0; +} + +static int gc5035_enum_frame_sizes(struct v4l2_subdev *sd, + struct v4l2_subdev_state *sd_state, + struct v4l2_subdev_frame_size_enum *fse) +{ + if (fse->index >= ARRAY_SIZE(gc5035_modes)) + return -EINVAL; + + //if (fse->code != MEDIA_BUS_FMT_SRGGB10_1X10) + if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10) + return -EINVAL; + + fse->min_width = gc5035_modes[fse->index].width; + fse->max_width = gc5035_modes[fse->index].width; + fse->max_height = gc5035_modes[fse->index].height; + fse->min_height = gc5035_modes[fse->index].height; + + return 0; +} + +static int __gc5035_start_stream(struct gc5035 *gc5035) +{ + int ret; + + gc5035_set_power(gc5035, 1); + + ret = gc5035_write_array(gc5035, gc5035_global_regs, + ARRAY_SIZE(gc5035_global_regs)); + if (ret) + return ret; + + ret = gc5035_set_otp_config(gc5035); + if (ret) + return ret; + + ret = gc5035_write_array(gc5035, gc5035->cur_mode->reg_list, + gc5035->cur_mode->num_regs); + if (ret) + return ret; + + /* In case these controls are set before streaming */ + ret = __v4l2_ctrl_handler_setup(&gc5035->ctrl_handler); + if (ret) + return ret; + + gc5035_write_reg(gc5035, GC5035_PAGE_REG, 0); + if (ret) + return ret; + + return gc5035_write_reg(gc5035, GC5035_REG_CTRL_MODE, + GC5035_MODE_STREAMING); +} + +static void __gc5035_stop_stream(struct gc5035 *gc5035) +{ + int ret; + struct i2c_client *client = gc5035->client; + + ret = gc5035_write_reg(gc5035, GC5035_PAGE_REG, 0); + if (ret) + dev_err(&client->dev, "failed to stop streaming!"); + + if(gc5035_write_reg(gc5035, GC5035_REG_CTRL_MODE, + GC5035_MODE_SW_STANDBY)) + dev_err(&client->dev, "failed to stop streaming"); + gc5035_set_power(gc5035, 0); +} + +static int gc5035_s_stream(struct v4l2_subdev *sd, int on) +{ + struct gc5035 *gc5035 = to_gc5035(sd); + struct i2c_client *client = gc5035->client; + int ret = 0; + + mutex_lock(&gc5035->mutex); + on = !!on; + if (on == gc5035->streaming) + goto unlock_and_return; + + if (on) { + ret = pm_runtime_get_sync(&client->dev); + if (ret < 0) { + pm_runtime_put_noidle(&client->dev); + goto unlock_and_return; + } + + ret = __gc5035_start_stream(gc5035); + if (ret) { + dev_err(&client->dev, "start stream failed\n"); + pm_runtime_put(&client->dev); + goto unlock_and_return; + } + } else { + __gc5035_stop_stream(gc5035); + pm_runtime_put(&client->dev); + } + + gc5035->streaming = on; + +unlock_and_return: + mutex_unlock(&gc5035->mutex); + + return ret; +} + +static int gc5035_runtime_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct gc5035 *gc5035 = to_gc5035(sd); + int ret; + + if (gc5035->streaming) { + ret = __gc5035_start_stream(gc5035); + if (ret) + goto error; + } + + return 0; + +error: + __gc5035_stop_stream(gc5035); + gc5035->streaming = false; + + return ret; +} + +static int gc5035_runtime_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct gc5035 *gc5035 = to_gc5035(sd); + + if (gc5035->streaming) + __gc5035_stop_stream(gc5035); + + return 0; +} + +static int gc5035_entity_init_cfg(struct v4l2_subdev *subdev, + struct v4l2_subdev_state *sd_state) +{ + struct v4l2_subdev_format fmt = { + .which = V4L2_SUBDEV_FORMAT_TRY, + .format = { + .width = 2592, + .height = 1944, + } + }; + + gc5035_set_fmt(subdev, sd_state, &fmt); + + return 0; +} + +static const struct dev_pm_ops gc5035_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(gc5035_runtime_suspend, + gc5035_runtime_resume, NULL) +}; + +static const struct v4l2_subdev_video_ops gc5035_video_ops = { + .s_stream = gc5035_s_stream, +}; + +static const struct v4l2_subdev_pad_ops gc5035_pad_ops = { + .init_cfg = gc5035_entity_init_cfg, + .enum_mbus_code = gc5035_enum_mbus_code, + .enum_frame_size = gc5035_enum_frame_sizes, + .get_fmt = gc5035_get_fmt, + .set_fmt = gc5035_set_fmt, +}; + +static const struct v4l2_subdev_ops gc5035_subdev_ops = { + .video = &gc5035_video_ops, + .pad = &gc5035_pad_ops, +}; + +static const struct media_entity_operations gc5035_subdev_entity_ops = { + .link_validate = v4l2_subdev_link_validate, +}; + +static int gc5035_set_exposure(struct gc5035 *gc5035, u32 val) +{ + u32 caltime = 0; + int ret = 0; + + caltime = val / 2; + caltime = caltime * 2; + gc5035->Dgain_ratio = 256 * val / caltime; + + ret = gc5035_write_reg(gc5035, GC5035_PAGE_REG, 0); + if (ret) + return ret; + + ret = gc5035_write_reg(gc5035, GC5035_REG_EXPOSURE_H, + (val >> 8) & GC5035_EXPOSURE_H_MASK); + if (ret) + return ret; + + return gc5035_write_reg(gc5035, GC5035_REG_EXPOSURE_L, val & 0xff); +} + +static u32 GC5035_AGC_Param[17][2] = { + { 256, 0 }, + { 302, 1 }, + { 358, 2 }, + { 425, 3 }, + { 502, 8 }, + { 599, 9 }, + { 717, 10 }, + { 845, 11 }, + { 998, 12 }, + { 1203, 13 }, + { 1434, 14 }, + { 1710, 15 }, + { 1997, 16 }, + { 2355, 17 }, + { 2816, 18 }, + { 3318, 19 }, + { 3994, 20 }, +}; + +static int gc5035_set_analogue_gain(struct gc5035 *gc5035, u32 a_gain) +{ + struct device *dev = &gc5035->client->dev; + int ret = 0, i = 0; + u32 temp_gain = 0; + + if (a_gain < 0x100) + a_gain = 0x100; + else if (a_gain > GC5035_ANALOG_GAIN_MAX) + a_gain = GC5035_ANALOG_GAIN_MAX; + for (i = 16; i >= 0; i--) { + if (a_gain >= GC5035_AGC_Param[i][0]) + break; + } + + ret = gc5035_write_reg(gc5035, GC5035_PAGE_REG, 0); + if (ret) + return ret; + ret |= gc5035_write_reg(gc5035, + GC5035_REG_ANALOG_GAIN, GC5035_AGC_Param[i][1]); + temp_gain = a_gain; + temp_gain = temp_gain * gc5035->Dgain_ratio / GC5035_AGC_Param[i][0]; + + ret |= gc5035_write_reg(gc5035, + GC5035_REG_DIGI_GAIN_H, + (temp_gain >> 8) & 0x0f); + ret |= gc5035_write_reg(gc5035, + GC5035_REG_DIGI_GAIN_L, + temp_gain & 0xfc); + return ret; +} + +static int gc5035_set_vblank(struct gc5035 *gc5035, u32 val) +{ + int frame_length = 0; + int ret; + + frame_length = val + gc5035->cur_mode->height; + + ret = gc5035_write_reg(gc5035, GC5035_PAGE_REG, 0); + if (ret) + return ret; + + ret = gc5035_write_reg(gc5035, GC5035_REG_VTS_H, + (frame_length >> 8) & GC5035_VTS_H_MASK); + if (ret) + return ret; + + return gc5035_write_reg(gc5035, GC5035_REG_VTS_L, frame_length & 0xff); +} + +static int gc5035_enable_test_pattern(struct gc5035 *gc5035, u32 pattern) +{ + int ret; + + if (pattern) + pattern = GC5035_TEST_PATTERN_ENABLE; + else + pattern = GC5035_TEST_PATTERN_DISABLE; + + ret = gc5035_write_reg(gc5035, GC5035_PAGE_REG, 1); + if (ret) + return ret; + + ret = gc5035_write_reg(gc5035, GC5035_REG_TEST_PATTERN, pattern); + if (ret) + return ret; + + return gc5035_write_reg(gc5035, GC5035_PAGE_REG, 0); +} + +static int gc5035_set_ctrl(struct v4l2_ctrl *ctrl) +{ + struct gc5035 *gc5035 = container_of(ctrl->handler, + struct gc5035, ctrl_handler); + struct i2c_client *client = gc5035->client; + s64 max; + int ret = 0; + + /* Propagate change of current control to all related controls */ + switch (ctrl->id) { + case V4L2_CID_VBLANK: + /* Update max exposure while meeting expected vblanking */ + max = gc5035->cur_mode->height + ctrl->val + - GC5035_EXPOSURE_MARGIN; + __v4l2_ctrl_modify_range(gc5035->exposure, + gc5035->exposure->minimum, max, + gc5035->exposure->step, + gc5035->exposure->default_value); + break; + } + + if (!pm_runtime_get_if_in_use(&client->dev)) + return 0; + + switch (ctrl->id) { + case V4L2_CID_EXPOSURE: + ret = gc5035_set_exposure(gc5035, ctrl->val); + break; + case V4L2_CID_ANALOGUE_GAIN: + ret = gc5035_set_analogue_gain(gc5035, ctrl->val); + break; + case V4L2_CID_DIGITAL_GAIN: + case V4L2_CID_HFLIP: + case V4L2_CID_VFLIP: + break; + case V4L2_CID_VBLANK: + ret = gc5035_set_vblank(gc5035, ctrl->val); + break; + case V4L2_CID_TEST_PATTERN: + ret = gc5035_enable_test_pattern(gc5035, ctrl->val); + break; + default: + ret = -EINVAL; + break; + }; + + pm_runtime_put(&client->dev); + + return ret; +} + +static const struct v4l2_ctrl_ops gc5035_ctrl_ops = { + .s_ctrl = gc5035_set_ctrl, +}; + +static int gc5035_initialize_controls(struct gc5035 *gc5035) +{ + const struct gc5035_mode *mode; + struct v4l2_ctrl_handler *handler; + struct v4l2_ctrl *ctrl; + u64 exposure_max; + u32 h_blank, vblank_def; + int ret; + + handler = &gc5035->ctrl_handler; + mode = gc5035->cur_mode; + ret = v4l2_ctrl_handler_init(handler, 8); + if (ret) + return ret; + + handler->lock = &gc5035->mutex; + + ctrl = v4l2_ctrl_new_int_menu(handler, NULL, V4L2_CID_LINK_FREQ, + 0, 0, gc5035_link_freqs); + if (ctrl) + ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY; + + v4l2_ctrl_new_std(handler, NULL, V4L2_CID_PIXEL_RATE, + 0, GC5035_PIXEL_RATE, 1, GC5035_PIXEL_RATE); + + h_blank = mode->hts_def - mode->width; + gc5035->hblank = v4l2_ctrl_new_std(handler, NULL, V4L2_CID_HBLANK, + h_blank, h_blank, 1, h_blank); + if (gc5035->hblank) + gc5035->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY; + + vblank_def = round_up(mode->vts_def, 4) - mode->height; + gc5035->vblank = v4l2_ctrl_new_std(handler, &gc5035_ctrl_ops, + V4L2_CID_VBLANK, vblank_def, + GC5035_VTS_MAX - mode->height, + 4, vblank_def); + + exposure_max = mode->vts_def - GC5035_EXPOSURE_MARGIN; + gc5035->exposure = v4l2_ctrl_new_std(handler, &gc5035_ctrl_ops, + V4L2_CID_EXPOSURE, + GC5035_EXPOSURE_MIN, exposure_max, + GC5035_EXPOSURE_STEP, + mode->exp_def); + + v4l2_ctrl_new_std(handler, &gc5035_ctrl_ops, V4L2_CID_ANALOGUE_GAIN, + GC5035_ANALOG_GAIN_MIN, GC5035_ANALOG_GAIN_MAX, + GC5035_ANALOG_GAIN_STEP, GC5035_ANALOG_GAIN_DEFAULT); + + v4l2_ctrl_new_std(handler, &gc5035_ctrl_ops, V4L2_CID_DIGITAL_GAIN, + GC5035_DIGI_GAIN_MIN, GC5035_DIGI_GAIN_MAX, + GC5035_DIGI_GAIN_STEP, GC5035_DIGI_GAIN_DEFAULT); + + v4l2_ctrl_new_std_menu_items(handler, &gc5035_ctrl_ops, + V4L2_CID_TEST_PATTERN, + ARRAY_SIZE(gc5035_test_pattern_menu) - 1, + 0, 0, gc5035_test_pattern_menu); + + if (handler->error) { + ret = handler->error; + dev_err(&gc5035->client->dev, + "Failed to init controls(%d)\n", ret); + goto err_free_handler; + } + + gc5035->subdev.ctrl_handler = handler; + + return 0; + +err_free_handler: + v4l2_ctrl_handler_free(handler); + + return ret; +} + +static int gc5035_check_sensor_id(struct gc5035 *gc5035, + struct i2c_client *client) +{ + struct device *dev = &gc5035->client->dev; + u16 id; + u8 pid = 0; + u8 ver = 0; + int ret; + + ret = gc5035_read_reg(gc5035, GC5035_REG_CHIP_ID_H, &pid); + if (ret) + return ret; + + ret = gc5035_read_reg(gc5035, GC5035_REG_CHIP_ID_L, &ver); + if (ret) + return ret; + + id = GC5035_ID(pid, ver); + if (id != GC5035_CHIP_ID) { + dev_err(dev, "Unexpected sensor id(%04x)\n", id); + return -EINVAL; + } + + dev_dbg(dev, "Detected GC%04x sensor\n", id); + + return 0; +} + +static int gc5035_get_hwcfg(struct gc5035 *gc5035) +{ + struct device *dev = &gc5035->client->dev; + struct v4l2_fwnode_endpoint bus_cfg = { + .bus_type = V4L2_MBUS_CSI2_DPHY + }; + struct fwnode_handle *ep; + struct fwnode_handle *fwnode = dev_fwnode(dev); + unsigned long link_freq_mask = 0; + unsigned int i, j; + int ret; + + if (!fwnode) + return -ENODEV; + + ep = fwnode_graph_get_next_endpoint(fwnode, NULL); + if (!ep) + return -ENODEV; + + ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg); + if (ret) + goto out; + + dev_dbg(dev, "num of link freqs: %d", bus_cfg.nr_of_link_frequencies); + if (!bus_cfg.nr_of_link_frequencies) { + dev_warn(dev, "no link frequencies defined"); + goto out; + } + + for (i = 0; i < ARRAY_SIZE(gc5035_link_freqs); ++i) { + for (j = 0; j < bus_cfg.nr_of_link_frequencies; j++) { + if (bus_cfg.link_frequencies[j] + == gc5035_link_freqs[i]) { + link_freq_mask |= BIT(i); + dev_dbg(dev, "Link frequency %lld supported\n", + gc5035_link_freqs[i]); + break; + } + } + } + if (!link_freq_mask) { + dev_err(dev, "No supported link frequencies found\n"); + ret = -EINVAL; + goto out; + } + +out: + v4l2_fwnode_endpoint_free(&bus_cfg); + fwnode_handle_put(ep); + return ret; +} + +static int gc5035_probe(struct i2c_client *client) +{ + struct device *dev = &client->dev; + struct gc5035 *gc5035; + struct v4l2_subdev *sd; + int ret, i; + u32 freq = 192000000UL; + + gc5035 = devm_kzalloc(dev, sizeof(*gc5035), GFP_KERNEL); + if (!gc5035) + return -ENOMEM; + + gc5035->client = client; + gc5035_init_power_ctrl(gc5035); + gc5035_set_power(gc5035, 1); + + gc5035->cur_mode = &gc5035_modes[0]; + + ret = clk_set_rate(gc5035->mclk, freq); + if (ret < 0) + return dev_err_probe(dev, ret, "Failed to set mclk rate\n"); + gc5035->mclk_rate = clk_get_rate(gc5035->mclk); + if (gc5035->mclk_rate != freq) + dev_warn(dev, "mclk rate set to %lu instead of requested %u\n", + gc5035->mclk_rate, freq); + gc5035->iovdd_supply = devm_regulator_get(dev, "iovdd"); + if (IS_ERR(gc5035->iovdd_supply)) + return dev_err_probe(dev, PTR_ERR(gc5035->iovdd_supply), + "Failed to get iovdd regulator\n"); + + for (i = 0; i < ARRAY_SIZE(gc5035_supplies); i++) + gc5035->supplies[i].supply = gc5035_supplies[i]; + ret = devm_regulator_bulk_get(&gc5035->client->dev, + ARRAY_SIZE(gc5035_supplies), + gc5035->supplies); + if (ret) + return dev_err_probe(dev, ret, "Failed to get regulators\n"); + + mutex_init(&gc5035->mutex); + sd = &gc5035->subdev; + v4l2_i2c_subdev_init(sd, client, &gc5035_subdev_ops); + ret = gc5035_initialize_controls(gc5035); + if (ret) { + dev_err_probe(dev, ret, "Failed to initialize controls\n"); + goto err_destroy_mutex; + } + ret = gc5035_runtime_resume(dev); + if (ret) { + dev_err_probe(dev, ret, "Failed to power on\n"); + goto err_free_handler; + } + ret = gc5035_check_sensor_id(gc5035, client); + if (ret) { + dev_err_probe(dev, ret, "Sensor ID check failed\n"); + goto err_power_off; + } + + sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + sd->entity.ops = &gc5035_subdev_entity_ops; + sd->entity.function = MEDIA_ENT_F_CAM_SENSOR; + gc5035->pad.flags = MEDIA_PAD_FL_SOURCE; + ret = media_entity_pads_init(&sd->entity, 1, &gc5035->pad); + if (ret < 0) { + dev_err_probe(dev, ret, "Failed to initialize pads\n"); + goto err_power_off; + } + ret = v4l2_async_register_subdev_sensor(sd); + if (ret) { + dev_err_probe(dev, ret, "v4l2 async register subdev failed\n"); + goto err_clean_entity; + } + + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + pm_runtime_idle(dev); + gc5035_set_power(gc5035, 0); + + return 0; + +err_clean_entity: + media_entity_cleanup(&sd->entity); +err_power_off: + gc5035_runtime_suspend(dev); +err_free_handler: + v4l2_ctrl_handler_free(&gc5035->ctrl_handler); + gc5035_set_power(gc5035, 0); +err_destroy_mutex: + mutex_destroy(&gc5035->mutex); + + return ret; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) +static int gc5035_remove(struct i2c_client *client) +#else +static void gc5035_remove(struct i2c_client *client) +#endif +{ + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct gc5035 *gc5035 = to_gc5035(sd); + + v4l2_async_unregister_subdev(sd); + media_entity_cleanup(&sd->entity); + v4l2_ctrl_handler_free(&gc5035->ctrl_handler); + mutex_destroy(&gc5035->mutex); + pm_runtime_disable(&client->dev); + if (!pm_runtime_status_suspended(&client->dev)) + gc5035_runtime_suspend(&client->dev); + pm_runtime_set_suspended(&client->dev); + + #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) + return 0; + #endif +} + +#ifdef CONFIG_ACPI +static const struct acpi_device_id gc5035_acpi_ids[] = { + {"GCTI5035"}, + {} +}; + +MODULE_DEVICE_TABLE(acpi, gc5035_acpi_ids); +#endif + +static const struct of_device_id gc5035_of_match[] = { + { .compatible = "galaxycore,gc5035" }, + {}, +}; +MODULE_DEVICE_TABLE(of, gc5035_of_match); + +static struct i2c_driver gc5035_i2c_driver = { + .driver = { + .name = "gc5035", + .pm = &gc5035_pm_ops, + .acpi_match_table = ACPI_PTR(gc5035_acpi_ids), + .of_match_table = gc5035_of_match, + }, + .probe_new = gc5035_probe, + .remove = gc5035_remove, +}; +module_i2c_driver(gc5035_i2c_driver); + +MODULE_AUTHOR("Liang Wang "); +MODULE_AUTHOR("Hao He "); +MODULE_AUTHOR("Xingyu Wu "); +MODULE_AUTHOR("Tomasz Figa "); +MODULE_DESCRIPTION("GalaxyCore gc5035 sensor driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/x86/intel/int3472/common.h b/drivers/platform/x86/intel/int3472/common.h index 53270d19c73a..7350a3840438 100644 --- a/drivers/platform/x86/intel/int3472/common.h +++ b/drivers/platform/x86/intel/int3472/common.h @@ -23,7 +23,7 @@ #define INT3472_GPIO_TYPE_PRIVACY_LED 0x0d #define INT3472_PDEV_MAX_NAME_LEN 23 -#define INT3472_MAX_SENSOR_GPIOS 3 +#define INT3472_MAX_SENSOR_GPIOS 4 #define GPIO_REGULATOR_NAME_LENGTH 21 #define GPIO_REGULATOR_SUPPLY_NAME_LENGTH 9 @@ -73,6 +73,7 @@ struct int3472_sensor_config { const char *sensor_module_name; struct regulator_consumer_supply supply_map; const struct int3472_gpio_function_remap *function_maps; + const bool use_independent_gpio; }; struct int3472_discrete_device { diff --git a/drivers/platform/x86/intel/int3472/discrete.c b/drivers/platform/x86/intel/int3472/discrete.c index c42c3faa2c32..427552c14c76 100644 --- a/drivers/platform/x86/intel/int3472/discrete.c +++ b/drivers/platform/x86/intel/int3472/discrete.c @@ -57,11 +57,25 @@ static const struct int3472_gpio_function_remap ov2680_gpio_function_remaps[] = static const struct int3472_sensor_config int3472_sensor_configs[] = { /* Lenovo Miix 510-12ISK - OV2680, Front */ - { "GNDF140809R", { 0 }, ov2680_gpio_function_remaps }, + { "GNDF140809R", { 0 }, ov2680_gpio_function_remaps, false }, /* Lenovo Miix 510-12ISK - OV5648, Rear */ - { "GEFF150023R", REGULATOR_SUPPLY("avdd", NULL), NULL }, + { "GEFF150023R", REGULATOR_SUPPLY("avdd", NULL), NULL, false }, /* Surface Go 1&2 - OV5693, Front */ - { "YHCU", REGULATOR_SUPPLY("avdd", NULL), NULL }, + { "YHCU", REGULATOR_SUPPLY("avdd", NULL), NULL, false }, + /* Dell Latitude 9420 - OV01A1S, Front */ + { "0BF111N3", { 0 }, NULL, true }, + /* Dell Latitude 9420 - HM11B1, Front */ + { "9BF123N3", { 0 }, NULL, true }, + /* Lenovo X1 Yoga - OV2740, Front */ + { "CJFLE23", { 0 }, NULL, true }, + /* OV13B10 */ + { "09B13", { 0 }, NULL, true }, + /* HIMX1092 */ + { "KPFB297", { 0 }, NULL, true }, + /* GC5035 */ + { "CJAK519", { 0 }, NULL, true }, + /* S5K3L6 */ + { "KBAG296", { 0 }, NULL, true }, }; static const struct int3472_sensor_config * @@ -229,6 +243,8 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares, const char *err_msg; int ret; u8 type; + u8 active_value; + u32 polarity = GPIO_LOOKUP_FLAGS_DEFAULT; if (!acpi_gpio_get_io_resource(ares, &agpio)) return 1; @@ -249,30 +265,60 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares, } type = obj->integer.value & 0xff; + active_value = obj->integer.value >> 24; + if (!active_value) + polarity = GPIO_ACTIVE_LOW; switch (type) { case INT3472_GPIO_TYPE_RESET: ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, "reset", - GPIO_ACTIVE_LOW); + polarity); if (ret) err_msg = "Failed to map reset pin to sensor\n"; break; case INT3472_GPIO_TYPE_POWERDOWN: ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, "powerdown", - GPIO_ACTIVE_LOW); + polarity); if (ret) err_msg = "Failed to map powerdown pin to sensor\n"; break; case INT3472_GPIO_TYPE_CLK_ENABLE: + if (!IS_ERR(int3472->sensor_config) && + int3472->sensor_config->use_independent_gpio) { + ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, + "clken", polarity); + if (ret) + err_msg = "Failed to map clken pin to sensor\n"; + + break; + } case INT3472_GPIO_TYPE_PRIVACY_LED: + if (!IS_ERR(int3472->sensor_config) && + int3472->sensor_config->use_independent_gpio) { + ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, + "pled", polarity); + if (ret) + err_msg = "Failed to map pled pin to sensor\n"; + + break; + } ret = skl_int3472_map_gpio_to_clk(int3472, agpio, type); if (ret) err_msg = "Failed to map GPIO to clock\n"; break; case INT3472_GPIO_TYPE_POWER_ENABLE: + if (!IS_ERR(int3472->sensor_config) && + int3472->sensor_config->use_independent_gpio) { + ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, + "pwren", polarity); + if (ret) + err_msg = "Failed to map clken pin to sensor\n"; + + break; + } ret = skl_int3472_register_regulator(int3472, agpio); if (ret) err_msg = "Failed to map regulator to sensor\n"; -- 2.17.1 ipu6-drivers-0~git202411190607.0ad49882/patch/int3472-v5.15/000077500000000000000000000000001471702545500216765ustar00rootroot00000000000000int3472-support-independent-clock-and-LED-gpios.patch000066400000000000000000000100371471702545500333320ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patch/int3472-v5.15diff --git a/drivers/platform/x86/intel/int3472/intel_skl_int3472_common.h b/drivers/platform/x86/intel/int3472/intel_skl_int3472_common.h index 714fde73b524..10d93ed4ea98 100644 --- a/drivers/platform/x86/intel/int3472/intel_skl_int3472_common.h +++ b/drivers/platform/x86/intel/int3472/intel_skl_int3472_common.h @@ -23,7 +23,7 @@ #define INT3472_GPIO_TYPE_PRIVACY_LED 0x0d #define INT3472_PDEV_MAX_NAME_LEN 23 -#define INT3472_MAX_SENSOR_GPIOS 3 +#define INT3472_MAX_SENSOR_GPIOS 4 #define GPIO_REGULATOR_NAME_LENGTH 21 #define GPIO_REGULATOR_SUPPLY_NAME_LENGTH 9 @@ -73,6 +73,7 @@ struct int3472_sensor_config { const char *sensor_module_name; struct regulator_consumer_supply supply_map; const struct int3472_gpio_function_remap *function_maps; + const bool independent_clk_gpios; }; struct int3472_discrete_device { diff --git a/drivers/platform/x86/intel/int3472/intel_skl_int3472_discrete.c b/drivers/platform/x86/intel/int3472/intel_skl_int3472_discrete.c index e59d79c7e82f..5cf6dd63d43f 100644 --- a/drivers/platform/x86/intel/int3472/intel_skl_int3472_discrete.c +++ b/drivers/platform/x86/intel/int3472/intel_skl_int3472_discrete.c @@ -57,11 +57,17 @@ static const struct int3472_gpio_function_remap ov2680_gpio_function_remaps[] = static const struct int3472_sensor_config int3472_sensor_configs[] = { /* Lenovo Miix 510-12ISK - OV2680, Front */ - { "GNDF140809R", { 0 }, ov2680_gpio_function_remaps }, + { "GNDF140809R", { 0 }, ov2680_gpio_function_remaps, false }, /* Lenovo Miix 510-12ISK - OV5648, Rear */ - { "GEFF150023R", REGULATOR_SUPPLY("avdd", NULL), NULL }, + { "GEFF150023R", REGULATOR_SUPPLY("avdd", NULL), NULL, false }, /* Surface Go 1&2 - OV5693, Front */ - { "YHCU", REGULATOR_SUPPLY("avdd", NULL), NULL }, + { "YHCU", REGULATOR_SUPPLY("avdd", NULL), NULL, false }, + /* Dell Latitude 9420 - OV01A1S, Front */ + { "0BF111N3", { 0 }, NULL, true }, + /* Dell Latitude 9420 - HM11B1, Front */ + { "9BF123N3", { 0 }, NULL, true }, + /* Lenovo X1 Yoga - OV2740, Front */ + { "CJFLE23", { 0 }, NULL, true }, }; static const struct int3472_sensor_config * @@ -226,6 +232,8 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares, const char *err_msg; int ret; u8 type; + u8 active_value; + u32 polarity = GPIO_LOOKUP_FLAGS_DEFAULT; if (!acpi_gpio_get_io_resource(ares, &agpio)) return 1; @@ -246,28 +254,45 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares, } type = obj->integer.value & 0xff; + active_value = obj->integer.value >> 24; + if (!active_value) + polarity = GPIO_ACTIVE_LOW; switch (type) { case INT3472_GPIO_TYPE_RESET: ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, "reset", - GPIO_ACTIVE_LOW); + polarity ^ GPIO_ACTIVE_LOW); if (ret) err_msg = "Failed to map reset pin to sensor\n"; break; case INT3472_GPIO_TYPE_POWERDOWN: ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, "powerdown", - GPIO_ACTIVE_LOW); + polarity ^ GPIO_ACTIVE_LOW); if (ret) err_msg = "Failed to map powerdown pin to sensor\n"; break; case INT3472_GPIO_TYPE_CLK_ENABLE: case INT3472_GPIO_TYPE_PRIVACY_LED: - ret = skl_int3472_map_gpio_to_clk(int3472, agpio, type); - if (ret) - err_msg = "Failed to map GPIO to clock\n"; - + if (!IS_ERR(int3472->sensor_config) && + int3472->sensor_config->independent_clk_gpios) { + if (type == INT3472_GPIO_TYPE_CLK_ENABLE) { + ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, + "clken", polarity); + if (ret) + err_msg = "Failed to map clken pin to sensor\n"; + } else if (type == INT3472_GPIO_TYPE_PRIVACY_LED) { + ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, + "pled", polarity); + if (ret) + err_msg = "Failed to map pled pin to sensor\n"; + } + } else { + ret = skl_int3472_map_gpio_to_clk(int3472, agpio, type); + if (ret) + err_msg = "Failed to map GPIO to clock\n"; + } break; case INT3472_GPIO_TYPE_POWER_ENABLE: ret = skl_int3472_register_regulator(int3472, agpio); -- 2.34.1 ipu6-drivers-0~git202411190607.0ad49882/patch/int3472-v5.17/000077500000000000000000000000001471702545500217005ustar00rootroot00000000000000int3472-support-independent-clock-and-LED-gpios.patch000066400000000000000000000076031471702545500333410ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patch/int3472-v5.17diff --git a/drivers/platform/x86/intel/int3472/common.h b/drivers/platform/x86/intel/int3472/common.h index 53270d19c73a..6c5b52ac6c2c 100644 --- a/drivers/platform/x86/intel/int3472/common.h +++ b/drivers/platform/x86/intel/int3472/common.h @@ -23,7 +23,7 @@ #define INT3472_GPIO_TYPE_PRIVACY_LED 0x0d #define INT3472_PDEV_MAX_NAME_LEN 23 -#define INT3472_MAX_SENSOR_GPIOS 3 +#define INT3472_MAX_SENSOR_GPIOS 4 #define GPIO_REGULATOR_NAME_LENGTH 21 #define GPIO_REGULATOR_SUPPLY_NAME_LENGTH 9 @@ -73,6 +73,7 @@ struct int3472_sensor_config { const char *sensor_module_name; struct regulator_consumer_supply supply_map; const struct int3472_gpio_function_remap *function_maps; + const bool independent_clk_gpios; }; struct int3472_discrete_device { diff --git a/drivers/platform/x86/intel/int3472/discrete.c b/drivers/platform/x86/intel/int3472/discrete.c index ed4c9d760757..f5857ec334fa 100644 --- a/drivers/platform/x86/intel/int3472/discrete.c +++ b/drivers/platform/x86/intel/int3472/discrete.c @@ -57,11 +57,17 @@ static const struct int3472_gpio_function_remap ov2680_gpio_function_remaps[] = static const struct int3472_sensor_config int3472_sensor_configs[] = { /* Lenovo Miix 510-12ISK - OV2680, Front */ - { "GNDF140809R", { 0 }, ov2680_gpio_function_remaps }, + { "GNDF140809R", { 0 }, ov2680_gpio_function_remaps, false }, /* Lenovo Miix 510-12ISK - OV5648, Rear */ - { "GEFF150023R", REGULATOR_SUPPLY("avdd", NULL), NULL }, + { "GEFF150023R", REGULATOR_SUPPLY("avdd", NULL), NULL, false }, /* Surface Go 1&2 - OV5693, Front */ - { "YHCU", REGULATOR_SUPPLY("avdd", NULL), NULL }, + { "YHCU", REGULATOR_SUPPLY("avdd", NULL), NULL, false }, + /* Dell Latitude 9420 - OV01A1S, Front */ + { "0BF111N3", { 0 }, NULL, true }, + /* Dell Latitude 9420 - HM11B1, Front */ + { "9BF123N3", { 0 }, NULL, true }, + /* Lenovo X1 Yoga - OV2740, Front */ + { "CJFLE23", { 0 }, NULL, true }, }; static const struct int3472_sensor_config * @@ -225,6 +231,8 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares, const char *err_msg; int ret; u8 type; + u8 active_value; + u32 polarity = GPIO_LOOKUP_FLAGS_DEFAULT; if (!acpi_gpio_get_io_resource(ares, &agpio)) return 1; @@ -245,28 +253,45 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares, } type = obj->integer.value & 0xff; + active_value = obj->integer.value >> 24; + if (!active_value) + polarity = GPIO_ACTIVE_LOW; switch (type) { case INT3472_GPIO_TYPE_RESET: ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, "reset", - GPIO_ACTIVE_LOW); + polarity ^ GPIO_ACTIVE_LOW); if (ret) err_msg = "Failed to map reset pin to sensor\n"; break; case INT3472_GPIO_TYPE_POWERDOWN: ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, "powerdown", - GPIO_ACTIVE_LOW); + polarity ^ GPIO_ACTIVE_LOW); if (ret) err_msg = "Failed to map powerdown pin to sensor\n"; break; case INT3472_GPIO_TYPE_CLK_ENABLE: case INT3472_GPIO_TYPE_PRIVACY_LED: - ret = skl_int3472_map_gpio_to_clk(int3472, agpio, type); - if (ret) - err_msg = "Failed to map GPIO to clock\n"; - + if (!IS_ERR(int3472->sensor_config) && + int3472->sensor_config->independent_clk_gpios) { + if (type == INT3472_GPIO_TYPE_CLK_ENABLE) { + ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, + "clken", polarity); + if (ret) + err_msg = "Failed to map clken pin to sensor\n"; + } else if (type == INT3472_GPIO_TYPE_PRIVACY_LED) { + ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, + "pled", polarity); + if (ret) + err_msg = "Failed to map pled pin to sensor\n"; + } + } else { + ret = skl_int3472_map_gpio_to_clk(int3472, agpio, type); + if (ret) + err_msg = "Failed to map GPIO to clock\n"; + } break; case INT3472_GPIO_TYPE_POWER_ENABLE: ret = skl_int3472_register_regulator(int3472, agpio); ipu6-drivers-0~git202411190607.0ad49882/patch/int3472-v6.1.7/000077500000000000000000000000001471702545500217575ustar00rootroot000000000000000001-MAINT_GIT-platform-x86-int3472-Evaluate-device-s-_DS.patch000066400000000000000000000177711471702545500341500ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patch/int3472-v6.1.7From 1c07fff6e0ce61bc48dcd36494d5e057b46c2733 Mon Sep 17 00:00:00 2001 From: Bingbu Cao Date: Wed, 31 May 2023 15:44:29 +0200 Subject: [PATCH 1/8] MAINT_GIT: platform/x86: int3472: Evaluate device's _DSM method to control imaging clock On some platforms, the imaging clock should be controlled by evaluating specific clock device's _DSM method instead of setting gpio, so this change register clock if no gpio based clock and then use the _DSM method to enable and disable clock. Signed-off-by: Bingbu Cao Signed-off-by: Hao Yao Link: https://lore.kernel.org/r/20230524035135.90315-2-bingbu.cao@intel.com Signed-off-by: Hans de Goede Link: https://lore.kernel.org/r/20230531134429.171337-1-hdegoede@redhat.com --- .../x86/intel/int3472/clk_and_regulator.c | 92 +++++++++++++++++-- drivers/platform/x86/intel/int3472/common.h | 14 ++- drivers/platform/x86/intel/int3472/discrete.c | 8 +- 3 files changed, 99 insertions(+), 15 deletions(-) diff --git a/drivers/platform/x86/intel/int3472/clk_and_regulator.c b/drivers/platform/x86/intel/int3472/clk_and_regulator.c index 399f0623ca1b..410073ca371c 100644 --- a/drivers/platform/x86/intel/int3472/clk_and_regulator.c +++ b/drivers/platform/x86/intel/int3472/clk_and_regulator.c @@ -11,6 +11,41 @@ #include "common.h" +/* + * 82c0d13a-78c5-4244-9bb1-eb8b539a8d11 + * This _DSM GUID allows controlling the sensor clk when it is not controlled + * through a GPIO. + */ +static const guid_t img_clk_guid = + GUID_INIT(0x82c0d13a, 0x78c5, 0x4244, + 0x9b, 0xb1, 0xeb, 0x8b, 0x53, 0x9a, 0x8d, 0x11); + +static void skl_int3472_enable_clk(struct int3472_clock *clk, int enable) +{ + struct int3472_discrete_device *int3472 = to_int3472_device(clk); + union acpi_object args[3]; + union acpi_object argv4; + + if (clk->ena_gpio) { + gpiod_set_value_cansleep(clk->ena_gpio, enable); + return; + } + + args[0].integer.type = ACPI_TYPE_INTEGER; + args[0].integer.value = clk->imgclk_index; + args[1].integer.type = ACPI_TYPE_INTEGER; + args[1].integer.value = enable; + args[2].integer.type = ACPI_TYPE_INTEGER; + args[2].integer.value = 1; + + argv4.type = ACPI_TYPE_PACKAGE; + argv4.package.count = 3; + argv4.package.elements = args; + + acpi_evaluate_dsm(acpi_device_handle(int3472->adev), &img_clk_guid, + 0, 1, &argv4); +} + /* * The regulators have to have .ops to be valid, but the only ops we actually * support are .enable and .disable which are handled via .ena_gpiod. Pass an @@ -20,17 +55,13 @@ static const struct regulator_ops int3472_gpio_regulator_ops; static int skl_int3472_clk_prepare(struct clk_hw *hw) { - struct int3472_gpio_clock *clk = to_int3472_clk(hw); - - gpiod_set_value_cansleep(clk->ena_gpio, 1); + skl_int3472_enable_clk(to_int3472_clk(hw), 1); return 0; } static void skl_int3472_clk_unprepare(struct clk_hw *hw) { - struct int3472_gpio_clock *clk = to_int3472_clk(hw); - - gpiod_set_value_cansleep(clk->ena_gpio, 0); + skl_int3472_enable_clk(to_int3472_clk(hw), 0); } static int skl_int3472_clk_enable(struct clk_hw *hw) @@ -73,7 +104,7 @@ static unsigned int skl_int3472_get_clk_frequency(struct int3472_discrete_device static unsigned long skl_int3472_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { - struct int3472_gpio_clock *clk = to_int3472_clk(hw); + struct int3472_clock *clk = to_int3472_clk(hw); return clk->frequency; } @@ -86,8 +117,51 @@ static const struct clk_ops skl_int3472_clock_ops = { .recalc_rate = skl_int3472_clk_recalc_rate, }; -int skl_int3472_register_clock(struct int3472_discrete_device *int3472, - struct acpi_resource_gpio *agpio, u32 polarity) +int skl_int3472_register_dsm_clock(struct int3472_discrete_device *int3472) +{ + struct acpi_device *adev = int3472->adev; + struct clk_init_data init = { + .ops = &skl_int3472_clock_ops, + .flags = CLK_GET_RATE_NOCACHE, + }; + int ret; + + if (int3472->clock.cl) + return 0; /* A GPIO controlled clk has already been registered */ + + if (!acpi_check_dsm(adev->handle, &img_clk_guid, 0, BIT(1))) + return 0; /* DSM clock control is not available */ + + init.name = kasprintf(GFP_KERNEL, "%s-clk", acpi_dev_name(adev)); + if (!init.name) + return -ENOMEM; + + int3472->clock.frequency = skl_int3472_get_clk_frequency(int3472); + int3472->clock.clk_hw.init = &init; + int3472->clock.clk = clk_register(&adev->dev, &int3472->clock.clk_hw); + if (IS_ERR(int3472->clock.clk)) { + ret = PTR_ERR(int3472->clock.clk); + goto out_free_init_name; + } + + int3472->clock.cl = clkdev_create(int3472->clock.clk, NULL, int3472->sensor_name); + if (!int3472->clock.cl) { + ret = -ENOMEM; + goto err_unregister_clk; + } + + kfree(init.name); + return 0; + +err_unregister_clk: + clk_unregister(int3472->clock.clk); +out_free_init_name: + kfree(init.name); + return ret; +} + +int skl_int3472_register_gpio_clock(struct int3472_discrete_device *int3472, + struct acpi_resource_gpio *agpio, u32 polarity) { char *path = agpio->resource_source.string_ptr; struct clk_init_data init = { diff --git a/drivers/platform/x86/intel/int3472/common.h b/drivers/platform/x86/intel/int3472/common.h index 61688e450ce5..0c9c899e017b 100644 --- a/drivers/platform/x86/intel/int3472/common.h +++ b/drivers/platform/x86/intel/int3472/common.h @@ -43,7 +43,7 @@ } #define to_int3472_clk(hw) \ - container_of(hw, struct int3472_gpio_clock, clk_hw) + container_of(hw, struct int3472_clock, clk_hw) #define to_int3472_device(clk) \ container_of(clk, struct int3472_discrete_device, clock) @@ -64,7 +64,9 @@ struct int3472_cldb { u8 control_logic_type; u8 control_logic_id; u8 sensor_card_sku; - u8 reserved[28]; + u8 reserved[10]; + u8 clock_source; + u8 reserved2[17]; }; struct int3472_gpio_function_remap { @@ -94,12 +96,13 @@ struct int3472_discrete_device { struct regulator_desc rdesc; } regulator; - struct int3472_gpio_clock { + struct int3472_clock { struct clk *clk; struct clk_hw clk_hw; struct clk_lookup *cl; struct gpio_desc *ena_gpio; u32 frequency; + u8 imgclk_index; } clock; struct int3472_pled { @@ -121,8 +124,9 @@ int skl_int3472_get_sensor_adev_and_name(struct device *dev, struct acpi_device **sensor_adev_ret, const char **name_ret); -int skl_int3472_register_clock(struct int3472_discrete_device *int3472, - struct acpi_resource_gpio *agpio, u32 polarity); +int skl_int3472_register_gpio_clock(struct int3472_discrete_device *int3472, + struct acpi_resource_gpio *agpio, u32 polarity); +int skl_int3472_register_dsm_clock(struct int3472_discrete_device *int3472); void skl_int3472_unregister_clock(struct int3472_discrete_device *int3472); int skl_int3472_register_regulator(struct int3472_discrete_device *int3472, diff --git a/drivers/platform/x86/intel/int3472/discrete.c b/drivers/platform/x86/intel/int3472/discrete.c index ef020e23e596..8111579a59d4 100644 --- a/drivers/platform/x86/intel/int3472/discrete.c +++ b/drivers/platform/x86/intel/int3472/discrete.c @@ -258,7 +258,7 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares, break; case INT3472_GPIO_TYPE_CLK_ENABLE: - ret = skl_int3472_register_clock(int3472, agpio, polarity); + ret = skl_int3472_register_gpio_clock(int3472, agpio, polarity); if (ret) err_msg = "Failed to register clock\n"; @@ -311,6 +311,11 @@ static int skl_int3472_parse_crs(struct int3472_discrete_device *int3472) acpi_dev_free_resource_list(&resource_list); + /* Register _DSM based clock (no-op if a GPIO clock was already registered) */ + ret = skl_int3472_register_dsm_clock(int3472); + if (ret < 0) + return ret; + int3472->gpios.dev_id = int3472->sensor_name; gpiod_add_lookup_table(&int3472->gpios); @@ -356,6 +361,7 @@ static int skl_int3472_discrete_probe(struct platform_device *pdev) int3472->adev = adev; int3472->dev = &pdev->dev; platform_set_drvdata(pdev, int3472); + int3472->clock.imgclk_index = cldb.clock_source; ret = skl_int3472_get_sensor_adev_and_name(&pdev->dev, &int3472->sensor, &int3472->sensor_name); -- 2.34.1 0002-MAINT_GIT-platform-x86-int3472-discrete-Drop-GPIO-re.patch000066400000000000000000000103161471702545500340730ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patch/int3472-v6.1.7From 950c605c54c8b7f863dfc56950aa710a9a4286f3 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Fri, 16 Jun 2023 19:21:27 +0200 Subject: [PATCH 2/8] MAINT_GIT: platform/x86: int3472: discrete: Drop GPIO remapping support The only sensor driver which needs GPIO remapping support is the ov2680 driver and ACPI enumeration support + other necessary changes to the ov2680 driver were never upstreamed. A new series updating the ov2680 driver is pending upstream now and in this series the ov2680 driver is patched to look for "powerdown" as con-id, instead of relying on GPIO remapping in the int3472 code, so the GPIO remapping is no longer necessary. Tested-by: Hao Yao Reviewed-by: Daniel Scally Signed-off-by: Hans de Goede Link: https://lore.kernel.org/r/20230616172132.37859-2-hdegoede@redhat.com --- drivers/platform/x86/intel/int3472/common.h | 6 --- drivers/platform/x86/intel/int3472/discrete.c | 37 ++----------------- 2 files changed, 3 insertions(+), 40 deletions(-) diff --git a/drivers/platform/x86/intel/int3472/common.h b/drivers/platform/x86/intel/int3472/common.h index 0c9c899e017b..735567f374a6 100644 --- a/drivers/platform/x86/intel/int3472/common.h +++ b/drivers/platform/x86/intel/int3472/common.h @@ -69,15 +69,9 @@ struct int3472_cldb { u8 reserved2[17]; }; -struct int3472_gpio_function_remap { - const char *documented; - const char *actual; -}; - struct int3472_sensor_config { const char *sensor_module_name; struct regulator_consumer_supply supply_map; - const struct int3472_gpio_function_remap *function_maps; }; struct int3472_discrete_device { diff --git a/drivers/platform/x86/intel/int3472/discrete.c b/drivers/platform/x86/intel/int3472/discrete.c index 8111579a59d4..2ab3c7466986 100644 --- a/drivers/platform/x86/intel/int3472/discrete.c +++ b/drivers/platform/x86/intel/int3472/discrete.c @@ -39,27 +39,13 @@ static const guid_t cio2_sensor_module_guid = * the functions mapping resources to the sensors. Where the sensors have * a power enable pin defined in DSDT we need to provide a supply name so * the sensor drivers can find the regulator. The device name will be derived - * from the sensor's ACPI device within the code. Optionally, we can provide a - * NULL terminated array of function name mappings to deal with any platform - * specific deviations from the documented behaviour of GPIOs. - * - * Map a GPIO function name to NULL to prevent the driver from mapping that - * GPIO at all. + * from the sensor's ACPI device within the code. */ - -static const struct int3472_gpio_function_remap ov2680_gpio_function_remaps[] = { - { "reset", NULL }, - { "powerdown", "reset" }, - { } -}; - static const struct int3472_sensor_config int3472_sensor_configs[] = { - /* Lenovo Miix 510-12ISK - OV2680, Front */ - { "GNDF140809R", { 0 }, ov2680_gpio_function_remaps }, /* Lenovo Miix 510-12ISK - OV5648, Rear */ - { "GEFF150023R", REGULATOR_SUPPLY("avdd", NULL), NULL }, + { "GEFF150023R", REGULATOR_SUPPLY("avdd", NULL) }, /* Surface Go 1&2 - OV5693, Front */ - { "YHCU", REGULATOR_SUPPLY("avdd", NULL), NULL }, + { "YHCU", REGULATOR_SUPPLY("avdd", NULL) }, }; static const struct int3472_sensor_config * @@ -96,7 +82,6 @@ static int skl_int3472_map_gpio_to_sensor(struct int3472_discrete_device *int347 struct acpi_resource_gpio *agpio, const char *func, u32 polarity) { - const struct int3472_sensor_config *sensor_config; char *path = agpio->resource_source.string_ptr; struct gpiod_lookup *table_entry; struct acpi_device *adev; @@ -108,22 +93,6 @@ static int skl_int3472_map_gpio_to_sensor(struct int3472_discrete_device *int347 return -EINVAL; } - sensor_config = int3472->sensor_config; - if (!IS_ERR(sensor_config) && sensor_config->function_maps) { - const struct int3472_gpio_function_remap *remap; - - for (remap = sensor_config->function_maps; remap->documented; remap++) { - if (!strcmp(func, remap->documented)) { - func = remap->actual; - break; - } - } - } - - /* Functions mapped to NULL should not be mapped to the sensor */ - if (!func) - return 0; - status = acpi_get_handle(NULL, path, &handle); if (ACPI_FAILURE(status)) return -EINVAL; -- 2.34.1 0003-MAINT_GIT-platform-x86-int3472-discrete-Remove-senso.patch000066400000000000000000000166231471702545500344210ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patch/int3472-v6.1.7From 0302c0d48718d491f37c690a22258fa320101a8a Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Fri, 16 Jun 2023 19:21:28 +0200 Subject: [PATCH 3/8] MAINT_GIT: platform/x86: int3472: discrete: Remove sensor_config-s Currently the only 2 sensor_config-s both specify "avdd" as supply-id. The INT3472 device is going to be the only supplier of a regulator for the sensor device. So there is no chance of collisions with other regulator suppliers and it is undesirable to need to manually add new entries to int3472_sensor_configs[] for each new sensor module which uses a GPIO regulator. Instead just always use "avdd" as supply-id when registering the GPIO regulator. If necessary for specific sensor drivers then other supply-ids can be added as aliases in the future, adding aliases will be safe since INT3472 will be the only regulator supplier for the sensor. Cc: Bingbu Cao Tested-by: Hao Yao Signed-off-by: Hans de Goede Reviewed-by: Daniel Scally Link: https://lore.kernel.org/r/20230616172132.37859-3-hdegoede@redhat.com --- .../x86/intel/int3472/clk_and_regulator.c | 40 ++++++++++------- drivers/platform/x86/intel/int3472/common.h | 7 +-- drivers/platform/x86/intel/int3472/discrete.c | 45 +++---------------- 3 files changed, 31 insertions(+), 61 deletions(-) diff --git a/drivers/platform/x86/intel/int3472/clk_and_regulator.c b/drivers/platform/x86/intel/int3472/clk_and_regulator.c index 410073ca371c..5487f3ab66ad 100644 --- a/drivers/platform/x86/intel/int3472/clk_and_regulator.c +++ b/drivers/platform/x86/intel/int3472/clk_and_regulator.c @@ -234,32 +234,40 @@ void skl_int3472_unregister_clock(struct int3472_discrete_device *int3472) gpiod_put(int3472->clock.ena_gpio); } +/* + * The INT3472 device is going to be the only supplier of a regulator for + * the sensor device. But unlike the clk framework the regulator framework + * does not allow matching by consumer-device-name only. + * + * Ideally all sensor drivers would use "avdd" as supply-id. But for drivers + * where this cannot be changed because another supply-id is already used in + * e.g. DeviceTree files an alias for the other supply-id can be added here. + * + * Do not forget to update GPIO_REGULATOR_SUPPLY_MAP_COUNT when changing this. + */ +static const char * const skl_int3472_regulator_map_supplies[] = { + "avdd", +}; + +static_assert(ARRAY_SIZE(skl_int3472_regulator_map_supplies) == + GPIO_REGULATOR_SUPPLY_MAP_COUNT); + int skl_int3472_register_regulator(struct int3472_discrete_device *int3472, struct acpi_resource_gpio *agpio) { - const struct int3472_sensor_config *sensor_config; char *path = agpio->resource_source.string_ptr; - struct regulator_consumer_supply supply_map; struct regulator_init_data init_data = { }; struct regulator_config cfg = { }; - int ret; - - sensor_config = int3472->sensor_config; - if (IS_ERR(sensor_config)) { - dev_err(int3472->dev, "No sensor module config\n"); - return PTR_ERR(sensor_config); - } + int i, ret; - if (!sensor_config->supply_map.supply) { - dev_err(int3472->dev, "No supply name defined\n"); - return -ENODEV; + for (i = 0; i < ARRAY_SIZE(skl_int3472_regulator_map_supplies); i++) { + int3472->regulator.supply_map[i].supply = skl_int3472_regulator_map_supplies[i]; + int3472->regulator.supply_map[i].dev_name = int3472->sensor_name; } init_data.constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS; - init_data.num_consumer_supplies = 1; - supply_map = sensor_config->supply_map; - supply_map.dev_name = int3472->sensor_name; - init_data.consumer_supplies = &supply_map; + init_data.consumer_supplies = int3472->regulator.supply_map; + init_data.num_consumer_supplies = GPIO_REGULATOR_SUPPLY_MAP_COUNT; snprintf(int3472->regulator.regulator_name, sizeof(int3472->regulator.regulator_name), "%s-regulator", diff --git a/drivers/platform/x86/intel/int3472/common.h b/drivers/platform/x86/intel/int3472/common.h index 735567f374a6..225b067c854d 100644 --- a/drivers/platform/x86/intel/int3472/common.h +++ b/drivers/platform/x86/intel/int3472/common.h @@ -28,6 +28,7 @@ #define GPIO_REGULATOR_NAME_LENGTH 21 #define GPIO_REGULATOR_SUPPLY_NAME_LENGTH 9 +#define GPIO_REGULATOR_SUPPLY_MAP_COUNT 1 #define INT3472_LED_MAX_NAME_LEN 32 @@ -69,11 +70,6 @@ struct int3472_cldb { u8 reserved2[17]; }; -struct int3472_sensor_config { - const char *sensor_module_name; - struct regulator_consumer_supply supply_map; -}; - struct int3472_discrete_device { struct acpi_device *adev; struct device *dev; @@ -83,6 +79,7 @@ struct int3472_discrete_device { const struct int3472_sensor_config *sensor_config; struct int3472_gpio_regulator { + struct regulator_consumer_supply supply_map[GPIO_REGULATOR_SUPPLY_MAP_COUNT]; char regulator_name[GPIO_REGULATOR_NAME_LENGTH]; char supply_name[GPIO_REGULATOR_SUPPLY_NAME_LENGTH]; struct gpio_desc *gpio; diff --git a/drivers/platform/x86/intel/int3472/discrete.c b/drivers/platform/x86/intel/int3472/discrete.c index 2ab3c7466986..3b410428cec2 100644 --- a/drivers/platform/x86/intel/int3472/discrete.c +++ b/drivers/platform/x86/intel/int3472/discrete.c @@ -34,48 +34,17 @@ static const guid_t cio2_sensor_module_guid = GUID_INIT(0x822ace8f, 0x2814, 0x4174, 0xa5, 0x6b, 0x5f, 0x02, 0x9f, 0xe0, 0x79, 0xee); -/* - * Here follows platform specific mapping information that we can pass to - * the functions mapping resources to the sensors. Where the sensors have - * a power enable pin defined in DSDT we need to provide a supply name so - * the sensor drivers can find the regulator. The device name will be derived - * from the sensor's ACPI device within the code. - */ -static const struct int3472_sensor_config int3472_sensor_configs[] = { - /* Lenovo Miix 510-12ISK - OV5648, Rear */ - { "GEFF150023R", REGULATOR_SUPPLY("avdd", NULL) }, - /* Surface Go 1&2 - OV5693, Front */ - { "YHCU", REGULATOR_SUPPLY("avdd", NULL) }, -}; - -static const struct int3472_sensor_config * -skl_int3472_get_sensor_module_config(struct int3472_discrete_device *int3472) +static void skl_int3472_log_sensor_module_name(struct int3472_discrete_device *int3472) { union acpi_object *obj; - unsigned int i; obj = acpi_evaluate_dsm_typed(int3472->sensor->handle, &cio2_sensor_module_guid, 0x00, 0x01, NULL, ACPI_TYPE_STRING); - - if (!obj) { - dev_err(int3472->dev, - "Failed to get sensor module string from _DSM\n"); - return ERR_PTR(-ENODEV); - } - - for (i = 0; i < ARRAY_SIZE(int3472_sensor_configs); i++) { - if (!strcmp(int3472_sensor_configs[i].sensor_module_name, - obj->string.pointer)) - break; + if (obj) { + dev_dbg(int3472->dev, "Sensor module id: '%s'\n", obj->string.pointer); + ACPI_FREE(obj); } - - ACPI_FREE(obj); - - if (i >= ARRAY_SIZE(int3472_sensor_configs)) - return ERR_PTR(-EINVAL); - - return &int3472_sensor_configs[i]; } static int skl_int3472_map_gpio_to_sensor(struct int3472_discrete_device *int3472, @@ -266,11 +235,7 @@ static int skl_int3472_parse_crs(struct int3472_discrete_device *int3472) LIST_HEAD(resource_list); int ret; - /* - * No error check, because not having a sensor config is not necessarily - * a failure mode. - */ - int3472->sensor_config = skl_int3472_get_sensor_module_config(int3472); + skl_int3472_log_sensor_module_name(int3472); ret = acpi_dev_get_resources(int3472->adev, &resource_list, skl_int3472_handle_gpio_resources, -- 2.34.1 0004-MAINT_GIT-platform-x86-int3472-discrete-Use-FIELD_GE.patch000066400000000000000000000043551471702545500337670ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patch/int3472-v6.1.7From 6df3af9f6574234639af2477a1beb020bf6eeb0b Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Fri, 16 Jun 2023 19:21:31 +0200 Subject: [PATCH 4/8] MAINT_GIT: platform/x86: int3472: discrete: Use FIELD_GET() on the GPIO _DSM return value MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add defines for the various fields encoded in the GPIO _DSM integer return value and then use FIELD_GET() to get field values. Suggested-by: Ilpo Järvinen Reviewed-by: Ilpo Järvinen Signed-off-by: Hans de Goede Link: https://lore.kernel.org/r/20230616172132.37859-6-hdegoede@redhat.com --- drivers/platform/x86/intel/int3472/discrete.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/platform/x86/intel/int3472/discrete.c b/drivers/platform/x86/intel/int3472/discrete.c index 3b410428cec2..557517f43ede 100644 --- a/drivers/platform/x86/intel/int3472/discrete.c +++ b/drivers/platform/x86/intel/int3472/discrete.c @@ -2,6 +2,7 @@ /* Author: Dan Scally */ #include +#include #include #include #include @@ -25,6 +26,10 @@ static const guid_t int3472_gpio_guid = GUID_INIT(0x79234640, 0x9e10, 0x4fea, 0xa5, 0xc1, 0xb5, 0xaa, 0x8b, 0x19, 0x75, 0x6f); +#define INT3472_GPIO_DSM_TYPE GENMASK(7, 0) +#define INT3472_GPIO_DSM_PIN GENMASK(15, 8) +#define INT3472_GPIO_DSM_SENSOR_ON_VAL GENMASK(31, 24) + /* * 822ace8f-2814-4174-a56b-5f029fe079ee * This _DSM GUID returns a string from the sensor device, which acts as a @@ -174,12 +179,11 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares, return 1; } - type = obj->integer.value & 0xff; + type = FIELD_GET(INT3472_GPIO_DSM_TYPE, obj->integer.value); int3472_get_func_and_polarity(type, &func, &polarity); - /* If bits 31-24 of the _DSM entry are all 0 then the signal is inverted */ - active_value = obj->integer.value >> 24; + active_value = FIELD_GET(INT3472_GPIO_DSM_SENSOR_ON_VAL, obj->integer.value); if (!active_value) polarity ^= GPIO_ACTIVE_LOW; -- 2.34.1 0005-MAINT_GIT-platform-x86-int3472-discrete-Log-a-warnin.patch000066400000000000000000000041321471702545500342640ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patch/int3472-v6.1.7From f317447e42b2f6c664d4799725beb0d32bbca39b Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Fri, 16 Jun 2023 19:21:32 +0200 Subject: [PATCH 5/8] MAINT_GIT: platform/x86: int3472: discrete: Log a warning if the pin-numbers don't match MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The INT3472 discrete code assumes that the ACPI GPIO resources are in the same order as the pin-info _DSM entries. The returned pin-info includes the pin-number in bits 15-8. Add a check that this matches with the ACPI GPIO resource pin-number in case the assumption is not true with some ACPI tables. Reviewed-by: Daniel Scally Reviewed-by: Ilpo Järvinen Signed-off-by: Hans de Goede Link: https://lore.kernel.org/r/20230616172132.37859-7-hdegoede@redhat.com --- drivers/platform/x86/intel/int3472/discrete.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/platform/x86/intel/int3472/discrete.c b/drivers/platform/x86/intel/int3472/discrete.c index 557517f43ede..e33c2d75975c 100644 --- a/drivers/platform/x86/intel/int3472/discrete.c +++ b/drivers/platform/x86/intel/int3472/discrete.c @@ -154,8 +154,8 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares, { struct int3472_discrete_device *int3472 = data; struct acpi_resource_gpio *agpio; + u8 active_value, pin, type; union acpi_object *obj; - u8 active_value, type; const char *err_msg; const char *func; u32 polarity; @@ -183,6 +183,12 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares, int3472_get_func_and_polarity(type, &func, &polarity); + pin = FIELD_GET(INT3472_GPIO_DSM_PIN, obj->integer.value); + if (pin != agpio->pin_table[0]) + dev_warn(int3472->dev, "%s %s pin number mismatch _DSM %d resource %d\n", + func, agpio->resource_source.string_ptr, pin, + agpio->pin_table[0]); + active_value = FIELD_GET(INT3472_GPIO_DSM_SENSOR_ON_VAL, obj->integer.value); if (!active_value) polarity ^= GPIO_ACTIVE_LOW; -- 2.34.1 ipu6-drivers-0~git202411190607.0ad49882/patch/int3472-v6.5/000077500000000000000000000000001471702545500216165ustar00rootroot000000000000000001-platform-x86-int3472-Add-handshake-GPIO-function.patch000066400000000000000000000037641471702545500335200ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patch/int3472-v6.5From 8b3c29ef041a410e303d553ca019b42c2e2765e6 Mon Sep 17 00:00:00 2001 From: Hao Yao Date: Thu, 28 Sep 2023 14:17:25 +0800 Subject: [PATCH] platform/x86: int3472: Add handshake GPIO function Handshake pin is used for Lattice MIPI aggregator to enable the camera sensor. After pulled up, recommend to wail ~250ms to get everything ready. Signed-off-by: Hao Yao --- drivers/platform/x86/intel/int3472/common.h | 1 + drivers/platform/x86/intel/int3472/discrete.c | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/drivers/platform/x86/intel/int3472/common.h b/drivers/platform/x86/intel/int3472/common.h index 655ae3ec0593..3ad4c72afb45 100644 --- a/drivers/platform/x86/intel/int3472/common.h +++ b/drivers/platform/x86/intel/int3472/common.h @@ -23,6 +23,7 @@ #define INT3472_GPIO_TYPE_POWER_ENABLE 0x0b #define INT3472_GPIO_TYPE_CLK_ENABLE 0x0c #define INT3472_GPIO_TYPE_PRIVACY_LED 0x0d +#define INT3472_GPIO_TYPE_HANDSHAKE 0x12 #define INT3472_PDEV_MAX_NAME_LEN 23 #define INT3472_MAX_SENSOR_GPIOS 3 diff --git a/drivers/platform/x86/intel/int3472/discrete.c b/drivers/platform/x86/intel/int3472/discrete.c index b644ce65c990..4753161b4080 100644 --- a/drivers/platform/x86/intel/int3472/discrete.c +++ b/drivers/platform/x86/intel/int3472/discrete.c @@ -111,6 +111,10 @@ static void int3472_get_func_and_polarity(u8 type, const char **func, u32 *polar *func = "power-enable"; *polarity = GPIO_ACTIVE_HIGH; break; + case INT3472_GPIO_TYPE_HANDSHAKE: + *func = "handshake"; + *polarity = GPIO_ACTIVE_HIGH; + break; default: *func = "unknown"; *polarity = GPIO_ACTIVE_HIGH; @@ -201,6 +205,7 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares, switch (type) { case INT3472_GPIO_TYPE_RESET: case INT3472_GPIO_TYPE_POWERDOWN: + case INT3472_GPIO_TYPE_HANDSHAKE: ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, func, polarity); if (ret) err_msg = "Failed to map GPIO pin to sensor\n"; -- 2.34.1 ipu6-drivers-0~git202411190607.0ad49882/patch/ov08x40-v6.5/000077500000000000000000000000001471702545500216345ustar00rootroot000000000000000001-media-i2c-Add-new-sensor-OV08X40-driver.patch000066400000000000000000000274731471702545500317660ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patch/ov08x40-v6.5From ec2a596e8c7e12a3f655a652bcf6cf84bd318201 Mon Sep 17 00:00:00 2001 From: Serin Yeh Date: Tue, 19 Mar 2024 17:43:56 +0800 Subject: [PATCH] media/i2c:Add new sensor OV08X40 driver Add new sensor OV08X40 driver. Add Tline calculation and handshake pin support for sensor Change-Id: I4cf3f683dcedbbd9b882e48d948ab121f9d5d33c Tracked-On: #JILCCI-19 Signed-off-by: Serin Yeh --- drivers/media/i2c/ov08x40.c | 199 +++++++++++++++++++++++++++++++----- 1 file changed, 172 insertions(+), 27 deletions(-) diff --git a/drivers/media/i2c/ov08x40.c b/drivers/media/i2c/ov08x40.c index 77bcdcd082..b9d0b6b4be 100644 --- a/drivers/media/i2c/ov08x40.c +++ b/drivers/media/i2c/ov08x40.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -34,7 +35,7 @@ /* V_TIMING internal */ #define OV08X40_REG_VTS 0x380e -#define OV08X40_VTS_30FPS 0x1388 +#define OV08X40_VTS_30FPS 0x09c4 #define OV08X40_VTS_BIN_30FPS 0x115c #define OV08X40_VTS_MAX 0x7fff @@ -44,8 +45,9 @@ /* Exposure control */ #define OV08X40_REG_EXPOSURE 0x3500 -#define OV08X40_EXPOSURE_MAX_MARGIN 31 -#define OV08X40_EXPOSURE_MIN 1 +#define OV08X40_EXPOSURE_MAX_MARGIN 8 +#define OV08X40_EXPOSURE_BIN_MAX_MARGIN 2 +#define OV08X40_EXPOSURE_MIN 4 #define OV08X40_EXPOSURE_STEP 1 #define OV08X40_EXPOSURE_DEFAULT 0x40 @@ -127,11 +129,17 @@ struct ov08x40_mode { /* V-timing */ u32 vts_def; u32 vts_min; + /* Line Length Pixels */ + u32 llp; /* Index of Link frequency config to be used */ u32 link_freq_index; /* Default register values */ struct ov08x40_reg_list reg_list; + + /* Exposure calculation */ + u16 exposure_margin; + u16 exposure_shift; }; static const struct ov08x40_reg mipi_data_rate_800mbps[] = { @@ -2353,7 +2361,7 @@ static const char * const ov08x40_test_pattern_menu[] = { /* Configurations for supported link frequencies */ #define OV08X40_LINK_FREQ_400MHZ 400000000ULL - +#define OV08X40_SCLK_96MHZ 96000000ULL #define OV08X40_EXT_CLK 19200000 #define OV08X40_DATA_LANES 4 @@ -2391,24 +2399,30 @@ static const struct ov08x40_mode supported_modes[] = { .height = 2416, .vts_def = OV08X40_VTS_30FPS, .vts_min = OV08X40_VTS_30FPS, + .llp = 0x10aa, /* in normal mode, tline time = 2 * HTS / SCLK */ .lanes = 4, .reg_list = { .num_of_regs = ARRAY_SIZE(mode_3856x2416_regs), .regs = mode_3856x2416_regs, }, .link_freq_index = OV08X40_LINK_FREQ_400MHZ_INDEX, + .exposure_shift = 1, + .exposure_margin = OV08X40_EXPOSURE_MAX_MARGIN, }, { .width = 1928, .height = 1208, .vts_def = OV08X40_VTS_BIN_30FPS, .vts_min = OV08X40_VTS_BIN_30FPS, + .llp = 0x960, .lanes = 4, .reg_list = { .num_of_regs = ARRAY_SIZE(mode_1928x1208_regs), .regs = mode_1928x1208_regs, }, .link_freq_index = OV08X40_LINK_FREQ_400MHZ_INDEX, + .exposure_shift = 0, + .exposure_margin = OV08X40_EXPOSURE_BIN_MAX_MARGIN, }, }; @@ -2424,6 +2438,15 @@ struct ov08x40 { struct v4l2_ctrl *hblank; struct v4l2_ctrl *exposure; + /* GPIO for reset */ + struct gpio_desc *reset; + /* GPIO for Lattice handshake */ + struct gpio_desc *handshake; + /* regulator */ + struct regulator *avdd; + /* Clock provider */ + struct clk *img_clk; + /* Current mode */ const struct ov08x40_mode *cur_mode; @@ -2664,13 +2687,24 @@ static int ov08x40_set_ctrl(struct v4l2_ctrl *ctrl) struct ov08x40, ctrl_handler); struct i2c_client *client = v4l2_get_subdevdata(&ov08x->sd); s64 max; + int exp; + int fll; int ret = 0; /* Propagate change of current control to all related controls */ switch (ctrl->id) { case V4L2_CID_VBLANK: /* Update max exposure while meeting expected vblanking */ - max = ov08x->cur_mode->height + ctrl->val - OV08X40_EXPOSURE_MAX_MARGIN; + // max = ov08x->cur_mode->height + ctrl->val - OV08X40_EXPOSURE_MAX_MARGIN; + /* + * because in normal mode, 1 HTS = 0.5 tline + * fps = sclk / hts / vts + * so the vts value needs to be double + */ + max = ((ov08x->cur_mode->height + ctrl->val) << + ov08x->cur_mode->exposure_shift) - + ov08x->cur_mode->exposure_margin; + __v4l2_ctrl_modify_range(ov08x->exposure, ov08x->exposure->minimum, max, ov08x->exposure->step, max); @@ -2694,15 +2728,20 @@ static int ov08x40_set_ctrl(struct v4l2_ctrl *ctrl) ret = ov08x40_update_digital_gain(ov08x, ctrl->val); break; case V4L2_CID_EXPOSURE: + exp = (ctrl->val << ov08x->cur_mode->exposure_shift) - + ov08x->cur_mode->exposure_margin; + ret = ov08x40_write_reg(ov08x, OV08X40_REG_EXPOSURE, - OV08X40_REG_VALUE_24BIT, - ctrl->val); + OV08X40_REG_VALUE_24BIT, + exp); break; case V4L2_CID_VBLANK: + fll = ((ov08x->cur_mode->height + ctrl->val) << + ov08x->cur_mode->exposure_shift); + ret = ov08x40_write_reg(ov08x, OV08X40_REG_VTS, OV08X40_REG_VALUE_16BIT, - ov08x->cur_mode->height - + ctrl->val); + fll); break; case V4L2_CID_TEST_PATTERN: ret = ov08x40_enable_test_pattern(ov08x, ctrl->val); @@ -2813,6 +2852,7 @@ ov08x40_set_pad_format(struct v4l2_subdev *sd, s64 h_blank; s64 pixel_rate; s64 link_freq; + u64 steps; mutex_lock(&ov08x->mutex); @@ -2840,15 +2880,22 @@ ov08x40_set_pad_format(struct v4l2_subdev *sd, ov08x->cur_mode->height; vblank_min = ov08x->cur_mode->vts_min - ov08x->cur_mode->height; + + /* + * The frame length line should be aligned to a multiple of 4, + * as provided by the sensor vendor, in normal mode. + */ + steps = mode->exposure_shift == 1 ? 4 : 1; + __v4l2_ctrl_modify_range(ov08x->vblank, vblank_min, OV08X40_VTS_MAX - ov08x->cur_mode->height, - 1, + steps, vblank_def); __v4l2_ctrl_s_ctrl(ov08x->vblank, vblank_def); - h_blank = - link_freq_configs[mode->link_freq_index].pixels_per_line - - ov08x->cur_mode->width; + + h_blank = ov08x->cur_mode->llp - ov08x->cur_mode->width; + __v4l2_ctrl_modify_range(ov08x->hblank, h_blank, h_blank, 1, h_blank); } @@ -2949,6 +2996,51 @@ static int ov08x40_set_stream(struct v4l2_subdev *sd, int enable) return ret; } +static int ov08x40_power_off(struct device *dev) +{ + struct v4l2_subdev *sd = dev_get_drvdata(dev); + struct ov08x40 *ov08x40 = to_ov08x40(sd); + int ret = 0; + + gpiod_set_value_cansleep(ov08x40->reset, 1); + gpiod_set_value_cansleep(ov08x40->handshake, 0); + if (ov08x40->avdd) + ret = regulator_disable(ov08x40->avdd); + clk_disable_unprepare(ov08x40->img_clk); + + return ret; +} + +static int ov08x40_power_on(struct device *dev) +{ + struct v4l2_subdev *sd = dev_get_drvdata(dev); + struct ov08x40 *ov08x40 = to_ov08x40(sd); + int ret; + + ret = clk_prepare_enable(ov08x40->img_clk); + if (ret < 0) { + dev_err(dev, "failed to enable imaging clock: %d", ret); + return ret; + } + if (ov08x40->avdd) { + ret = regulator_enable(ov08x40->avdd); + if (ret < 0) { + dev_err(dev, "failed to enable avdd: %d", ret); + clk_disable_unprepare(ov08x40->img_clk); + return ret; + } + } + gpiod_set_value_cansleep(ov08x40->handshake, 1); + gpiod_set_value_cansleep(ov08x40->reset, 0); + + /* Lattice MIPI aggregator with some version FW needs longer delay + after handshake triggered. We set 25ms as a safe value and wait + for a stable version FW. */ + msleep_interruptible(25); + + return ret; +} + static int __maybe_unused ov08x40_suspend(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); @@ -3074,8 +3166,8 @@ static int ov08x40_init_controls(struct ov08x40 *ov08x) OV08X40_VTS_MAX - mode->height, 1, vblank_def); - hblank = link_freq_configs[mode->link_freq_index].pixels_per_line - - mode->width; + hblank = ov08x->cur_mode->llp - ov08x->cur_mode->width; + ov08x->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov08x40_ctrl_ops, V4L2_CID_HBLANK, hblank, hblank, 1, hblank); @@ -3141,6 +3233,41 @@ static void ov08x40_free_controls(struct ov08x40 *ov08x) mutex_destroy(&ov08x->mutex); } + +static int ov08x40_get_pm_resources(struct device *dev) +{ + struct v4l2_subdev *sd = dev_get_drvdata(dev); + struct ov08x40 *ov08x40 = to_ov08x40(sd); + int ret; + + ov08x40->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(ov08x40->reset)) + return dev_err_probe(dev, PTR_ERR(ov08x40->reset), + "failed to get reset gpio\n"); + + ov08x40->handshake = devm_gpiod_get_optional(dev, "handshake", + GPIOD_OUT_LOW); + if (IS_ERR(ov08x40->handshake)) + return dev_err_probe(dev, PTR_ERR(ov08x40->handshake), + "failed to get handshake gpio\n"); + + ov08x40->img_clk = devm_clk_get_optional(dev, NULL); + if (IS_ERR(ov08x40->img_clk)) + return dev_err_probe(dev, PTR_ERR(ov08x40->img_clk), + "failed to get imaging clock\n"); + + ov08x40->avdd = devm_regulator_get_optional(dev, "avdd"); + if (IS_ERR(ov08x40->avdd)) { + ret = PTR_ERR(ov08x40->avdd); + ov08x40->avdd = NULL; + if (ret != -ENODEV) + return dev_err_probe(dev, ret, + "failed to get avdd regulator\n"); + } + + return 0; +} + static int ov08x40_check_hwcfg(struct device *dev) { struct v4l2_fwnode_endpoint bus_cfg = { @@ -3152,8 +3279,10 @@ static int ov08x40_check_hwcfg(struct device *dev) int ret; u32 ext_clk; - if (!fwnode) - return -ENXIO; + ep = fwnode_graph_get_next_endpoint(fwnode, NULL); + dev_dbg(dev, "fwnode_graph_get_next_endpoint = %d\n", ep); + if (!ep) + return -EPROBE_DEFER; ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency", &ext_clk); @@ -3168,10 +3297,6 @@ static int ov08x40_check_hwcfg(struct device *dev) return -EINVAL; } - ep = fwnode_graph_get_next_endpoint(fwnode, NULL); - if (!ep) - return -ENXIO; - ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg); fwnode_handle_put(ep); if (ret) @@ -3214,6 +3339,7 @@ static int ov08x40_check_hwcfg(struct device *dev) static int ov08x40_probe(struct i2c_client *client) { struct ov08x40 *ov08x; + bool full_power; int ret; /* Check HW config */ @@ -3229,12 +3355,25 @@ static int ov08x40_probe(struct i2c_client *client) /* Initialize subdev */ v4l2_i2c_subdev_init(&ov08x->sd, client, &ov08x40_subdev_ops); + full_power = acpi_dev_state_d0(&client->dev); + if (full_power) { + dev_err(&client->dev, "start full_power\n"); + ret = ov08x40_get_pm_resources(&client->dev); + if (ret) + return ret; + ret = ov08x40_power_on(&client->dev); + if (ret) { + dev_err_probe(&client->dev, ret, + "failed to power on\n"); + goto probe_error_ret; + } - /* Check module identity */ - ret = ov08x40_identify_module(ov08x); - if (ret) { - dev_err(&client->dev, "failed to find sensor: %d\n", ret); - return ret; + /* Check module identity */ + ret = ov08x40_identify_module(ov08x); + if (ret) { + dev_err(&client->dev, "failed to find sensor: %d\n", ret); + return ret; + } } /* Set default mode to max resolution */ @@ -3266,7 +3405,10 @@ static int ov08x40_probe(struct i2c_client *client) * Device is already turned on by i2c-core with ACPI domain PM. * Enable runtime PM and turn off the device. */ - pm_runtime_set_active(&client->dev); + + /* Set the device's state to active if it's in D0 state. */ + if (full_power) + pm_runtime_set_active(&client->dev); pm_runtime_enable(&client->dev); pm_runtime_idle(&client->dev); @@ -3278,6 +3420,8 @@ static int ov08x40_probe(struct i2c_client *client) error_handler_free: ov08x40_free_controls(ov08x); +probe_error_ret: + ov08x40_power_off(&client->dev); return ret; } @@ -3320,6 +3464,7 @@ static struct i2c_driver ov08x40_i2c_driver = { module_i2c_driver(ov08x40_i2c_driver); MODULE_AUTHOR("Jason Chen "); +MODULE_AUTHOR("Qingwu Zhang "); MODULE_AUTHOR("Shawn Tu "); MODULE_DESCRIPTION("OmniVision OV08X40 sensor driver"); MODULE_LICENSE("GPL"); -- 2.43.2 ipu6-drivers-0~git202411190607.0ad49882/patch/ov13b10-v6.3/000077500000000000000000000000001471702545500215755ustar00rootroot000000000000000002-media-ov13b10-Add-1364x768-register-settings.patch000066400000000000000000000046221471702545500324700ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patch/ov13b10-v6.3From 52bf7eabd410f9990f2e132bbc6aa88932d4dc9a Mon Sep 17 00:00:00 2001 From: Hao Yao Date: Thu, 15 Jun 2023 10:58:26 +0800 Subject: [PATCH 2/2] media: ov13b10: Add 1364x768 register settings Signed-off-by: Hao Yao --- drivers/media/i2c/ov13b10.c | 58 ++++++++++++++++++++++++++++++++++++- 1 file changed, 57 insertions(+), 1 deletion(-) diff --git a/drivers/media/i2c/ov13b10.c b/drivers/media/i2c/ov13b10.c index d7317e1e7f3f..f1f17765236c 100644 --- a/drivers/media/i2c/ov13b10.c +++ b/drivers/media/i2c/ov13b10.c @@ -31,6 +31,7 @@ #define OV13B10_REG_VTS 0x380e #define OV13B10_VTS_30FPS 0x0c7c #define OV13B10_VTS_60FPS 0x063e +#define OV13B10_VTS_120FPS 0x0320 #define OV13B10_VTS_MAX 0x7fff /* HBLANK control - read only */ @@ -461,6 +462,50 @@ static const struct ov13b10_reg mode_2080x1170_regs[] = { {0x5001, 0x0d}, }; +static const struct ov13b10_reg mode_1364x768_120fps_regs[] = { + {0x0305, 0xaf}, + {0x3011, 0x7c}, + {0x3501, 0x03}, + {0x3502, 0x00}, + {0x3662, 0x88}, + {0x3714, 0x28}, + {0x3739, 0x10}, + {0x37c2, 0x14}, + {0x37d9, 0x06}, + {0x37e2, 0x0c}, + {0x37e4, 0x00}, + {0x3800, 0x02}, /* X start 740 */ + {0x3801, 0xe4}, + {0x3802, 0x03}, /* Y start 840 */ + {0x3803, 0x48}, + {0x3804, 0x0d}, /* X end 3499 */ + {0x3805, 0xab}, + {0x3806, 0x09}, /* Y end 2400 */ + {0x3807, 0x60}, + {0x3808, 0x05}, /* X out size 1364 */ + {0x3809, 0x54}, + {0x380a, 0x03}, /* Y out size 768 */ + {0x380b, 0x00}, + {0x380c, 0x04}, + {0x380d, 0x8e}, + {0x380e, 0x03}, + {0x380f, 0x20}, + {0x3811, 0x07}, /* isp x offset 7 */ + {0x3813, 0x07}, /* isp y offset 7 */ + {0x3814, 0x03}, + {0x3816, 0x03}, + {0x3820, 0x8b}, + {0x3c8c, 0x18}, + {0x4008, 0x00}, + {0x4009, 0x05}, + {0x4050, 0x00}, + {0x4051, 0x05}, + {0x4501, 0x08}, + {0x4505, 0x04}, + {0x5000, 0xfd}, + {0x5001, 0x0d}, +}; + static const char * const ov13b10_test_pattern_menu[] = { "Disabled", "Vertical Color Bar Type 1", @@ -561,7 +606,18 @@ static const struct ov13b10_mode supported_modes[] = { .regs = mode_2080x1170_regs, }, .link_freq_index = OV13B10_LINK_FREQ_INDEX_0, - } + }, + { + .width = 1364, + .height = 768, + .vts_def = OV13B10_VTS_120FPS, + .vts_min = OV13B10_VTS_120FPS, + .link_freq_index = OV13B10_LINK_FREQ_INDEX_0, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_1364x768_120fps_regs), + .regs = mode_1364x768_120fps_regs, + }, + }, }; struct ov13b10 { -- 2.34.1 0006-MLIST-media-ov13b10-support-new-ACPI-HID-OVTI13B1.patch000066400000000000000000000016111471702545500327020ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patch/ov13b10-v6.3From 3c307ed62acf60b55611e7a69eb4c7ec72bf19ed Mon Sep 17 00:00:00 2001 From: Bingbu Cao Date: Fri, 26 May 2023 18:07:23 +0800 Subject: [PATCH 6/8] MLIST: media: ov13b10: support new ACPI HID 'OVTI13B1' On ACPI systems, the HID of ov13b10 is 'OVTI13B1', add this new HID in acpi IDs table to make driver support it. Signed-off-by: Hao Yao Signed-off-by: Bingbu Cao --- drivers/media/i2c/ov13b10.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/media/i2c/ov13b10.c b/drivers/media/i2c/ov13b10.c index c1430044fb1e..ede33899248c 100644 --- a/drivers/media/i2c/ov13b10.c +++ b/drivers/media/i2c/ov13b10.c @@ -1484,6 +1484,7 @@ static const struct dev_pm_ops ov13b10_pm_ops = { #ifdef CONFIG_ACPI static const struct acpi_device_id ov13b10_acpi_ids[] = { {"OVTIDB10"}, + {"OVTI13B1"}, { /* sentinel */ } }; -- 2.34.1 0007-MLIST-media-ov13b10-Defer-probe-if-no-endpoint-found.patch000066400000000000000000000027031471702545500342500ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patch/ov13b10-v6.3From 4118cc61955e7c8d4f88d05d38fe1ea9fa690b8b Mon Sep 17 00:00:00 2001 From: Bingbu Cao Date: Fri, 26 May 2023 18:07:24 +0800 Subject: [PATCH 7/8] MLIST: media: ov13b10: Defer probe if no endpoint found The ov13b10 need be connected to a CIO2 or IPU device by bridge, sometimes the bridge driver was not probed before ov13b10 driver, then the absence of the fwnode endpoint for this device is expected, so driver return -EPROBE_DEFER in this case to let the probe occur after bridge driver. Signed-off-by: Hao Yao Signed-off-by: Bingbu Cao --- drivers/media/i2c/ov13b10.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/media/i2c/ov13b10.c b/drivers/media/i2c/ov13b10.c index ede33899248c..2d48c94659a4 100644 --- a/drivers/media/i2c/ov13b10.c +++ b/drivers/media/i2c/ov13b10.c @@ -1331,6 +1331,10 @@ static int ov13b10_check_hwcfg(struct device *dev) if (!fwnode) return -ENXIO; + ep = fwnode_graph_get_next_endpoint(fwnode, NULL); + if (!ep) + return -EPROBE_DEFER; + ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency", &ext_clk); if (ret) { @@ -1344,10 +1348,6 @@ static int ov13b10_check_hwcfg(struct device *dev) return -EINVAL; } - ep = fwnode_graph_get_next_endpoint(fwnode, NULL); - if (!ep) - return -ENXIO; - ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg); fwnode_handle_put(ep); if (ret) -- 2.34.1 0008-MLIST-media-ov13b10-add-PM-control-support-based-on-.patch000066400000000000000000000147741471702545500341720ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patch/ov13b10-v6.3From 7682c98857e3486cc5a3f20759c072ad425859a7 Mon Sep 17 00:00:00 2001 From: Bingbu Cao Date: Thu, 15 Jun 2023 13:54:16 +0800 Subject: [PATCH 8/8] MLIST: media: ov13b10: add PM control support based on power resources On ACPI based platforms, the ov13b10 camera sensor need to be powered up by acquire the PM resource from INT3472. INT3472 will register one regulator 'avdd', one reset gpio and clock source for ov13b10. Add 2 power interfaces that are registered as runtime PM callbacks. Signed-off-by: Bingbu Cao Signed-off-by: Hao Yao Suggested-by: Hans de Goede Reviewed-by: Hans de Goede --- drivers/media/i2c/ov13b10.c | 120 +++++++++++++++++++++++++++++++++--- 1 file changed, 110 insertions(+), 10 deletions(-) diff --git a/drivers/media/i2c/ov13b10.c b/drivers/media/i2c/ov13b10.c index 2d48c94659a4..a6ac53e4fd89 100644 --- a/drivers/media/i2c/ov13b10.c +++ b/drivers/media/i2c/ov13b10.c @@ -2,6 +2,9 @@ // Copyright (c) 2021 Intel Corporation. #include +#include +#include +#include #include #include #include @@ -573,6 +576,11 @@ struct ov13b10 { struct media_pad pad; struct v4l2_ctrl_handler ctrl_handler; + + struct clk *img_clk; + struct regulator *avdd; + struct gpio_desc *reset; + /* V4L2 Controls */ struct v4l2_ctrl *link_freq; struct v4l2_ctrl *pixel_rate; @@ -1051,6 +1059,49 @@ static int ov13b10_identify_module(struct ov13b10 *ov13b) return 0; } +static int ov13b10_power_off(struct device *dev) +{ + struct v4l2_subdev *sd = dev_get_drvdata(dev); + struct ov13b10 *ov13b10 = to_ov13b10(sd); + + gpiod_set_value_cansleep(ov13b10->reset, 1); + + if (ov13b10->avdd) + regulator_disable(ov13b10->avdd); + + clk_disable_unprepare(ov13b10->img_clk); + + return 0; +} + +static int ov13b10_power_on(struct device *dev) +{ + struct v4l2_subdev *sd = dev_get_drvdata(dev); + struct ov13b10 *ov13b10 = to_ov13b10(sd); + int ret; + + ret = clk_prepare_enable(ov13b10->img_clk); + if (ret < 0) { + dev_err(dev, "failed to enable imaging clock: %d", ret); + return ret; + } + + if (ov13b10->avdd) { + ret = regulator_enable(ov13b10->avdd); + if (ret < 0) { + dev_err(dev, "failed to enable avdd: %d", ret); + clk_disable_unprepare(ov13b10->img_clk); + return ret; + } + } + + gpiod_set_value_cansleep(ov13b10->reset, 0); + /* 5ms to wait ready after XSHUTDN assert */ + usleep_range(5000, 5500); + + return 0; +} + static int ov13b10_start_streaming(struct ov13b10 *ov13b) { struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd); @@ -1145,7 +1196,7 @@ static int ov13b10_set_stream(struct v4l2_subdev *sd, int enable) return ret; } -static int __maybe_unused ov13b10_suspend(struct device *dev) +static int ov13b10_suspend(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct ov13b10 *ov13b = to_ov13b10(sd); @@ -1153,26 +1204,35 @@ static int __maybe_unused ov13b10_suspend(struct device *dev) if (ov13b->streaming) ov13b10_stop_streaming(ov13b); + ov13b10_power_off(dev); + return 0; } -static int __maybe_unused ov13b10_resume(struct device *dev) +static int ov13b10_resume(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct ov13b10 *ov13b = to_ov13b10(sd); int ret; + ret = ov13b10_power_on(dev); + if (ret) + goto pm_fail; + if (ov13b->streaming) { ret = ov13b10_start_streaming(ov13b); if (ret) - goto error; + goto stop_streaming; } return 0; -error: +stop_streaming: ov13b10_stop_streaming(ov13b); + ov13b10_power_off(dev); +pm_fail: ov13b->streaming = false; + return ret; } @@ -1317,6 +1377,34 @@ static void ov13b10_free_controls(struct ov13b10 *ov13b) mutex_destroy(&ov13b->mutex); } +static int ov13b10_get_pm_resources(struct device *dev) +{ + struct v4l2_subdev *sd = dev_get_drvdata(dev); + struct ov13b10 *ov13b = to_ov13b10(sd); + int ret; + + ov13b->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(ov13b->reset)) + return dev_err_probe(dev, PTR_ERR(ov13b->reset), + "failed to get reset gpio\n"); + + ov13b->img_clk = devm_clk_get_optional(dev, NULL); + if (IS_ERR(ov13b->img_clk)) + return dev_err_probe(dev, PTR_ERR(ov13b->img_clk), + "failed to get imaging clock\n"); + + ov13b->avdd = devm_regulator_get_optional(dev, "avdd"); + if (IS_ERR(ov13b->avdd)) { + ret = PTR_ERR(ov13b->avdd); + ov13b->avdd = NULL; + if (ret != -ENODEV) + return dev_err_probe(dev, ret, + "failed to get avdd regulator\n"); + } + + return 0; +} + static int ov13b10_check_hwcfg(struct device *dev) { struct v4l2_fwnode_endpoint bus_cfg = { @@ -1407,13 +1495,23 @@ static int ov13b10_probe(struct i2c_client *client) /* Initialize subdev */ v4l2_i2c_subdev_init(&ov13b->sd, client, &ov13b10_subdev_ops); + ret = ov13b10_get_pm_resources(&client->dev); + if (ret) + return ret; + full_power = acpi_dev_state_d0(&client->dev); if (full_power) { + ov13b10_power_on(&client->dev); + if (ret) { + dev_err(&client->dev, "failed to power on\n"); + return ret; + } + /* Check module identity */ ret = ov13b10_identify_module(ov13b); if (ret) { dev_err(&client->dev, "failed to find sensor: %d\n", ret); - return ret; + goto error_power_off; } } @@ -1422,7 +1520,7 @@ static int ov13b10_probe(struct i2c_client *client) ret = ov13b10_init_controls(ov13b); if (ret) - return ret; + goto error_power_off; /* Initialize subdev */ ov13b->sd.internal_ops = &ov13b10_internal_ops; @@ -1462,6 +1560,9 @@ static int ov13b10_probe(struct i2c_client *client) ov13b10_free_controls(ov13b); dev_err(&client->dev, "%s failed:%d\n", __func__, ret); +error_power_off: + ov13b10_power_off(&client->dev); + return ret; } @@ -1477,9 +1578,8 @@ static void ov13b10_remove(struct i2c_client *client) pm_runtime_disable(&client->dev); } -static const struct dev_pm_ops ov13b10_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(ov13b10_suspend, ov13b10_resume) -}; +static DEFINE_RUNTIME_DEV_PM_OPS(ov13b10_pm_ops, ov13b10_suspend, + ov13b10_resume, NULL); #ifdef CONFIG_ACPI static const struct acpi_device_id ov13b10_acpi_ids[] = { @@ -1494,7 +1594,7 @@ MODULE_DEVICE_TABLE(acpi, ov13b10_acpi_ids); static struct i2c_driver ov13b10_i2c_driver = { .driver = { .name = "ov13b10", - .pm = &ov13b10_pm_ops, + .pm = pm_ptr(&ov13b10_pm_ops), .acpi_match_table = ACPI_PTR(ov13b10_acpi_ids), }, .probe_new = ov13b10_probe, -- 2.34.1 ipu6-drivers-0~git202411190607.0ad49882/patch/v6.10/000077500000000000000000000000001471702545500205625ustar00rootroot000000000000000003-media-ipu6-Use-module-parameter-to-set-isys-psys-fre.patch000066400000000000000000000043511471702545500340060ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patch/v6.10From d9fb0461964244c26f8fec257ae1b725f0ef5873 Mon Sep 17 00:00:00 2001 From: Dongcheng Yan Date: Tue, 30 Jul 2024 11:03:10 +0800 Subject: [PATCH 3/5] media: ipu6: Use module parameter to set isys/psys freq Signed-off-by: Hongju Wang Signed-off-by: Dongcheng Yan --- drivers/media/pci/intel/ipu6/ipu6.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/drivers/media/pci/intel/ipu6/ipu6.c b/drivers/media/pci/intel/ipu6/ipu6.c index bbd646378ab3..5a683dbdf03a 100644 --- a/drivers/media/pci/intel/ipu6/ipu6.c +++ b/drivers/media/pci/intel/ipu6/ipu6.c @@ -33,6 +33,14 @@ #include "ipu6-platform-isys-csi2-reg.h" #include "ipu6-platform-regs.h" +static unsigned int isys_freq_override; +module_param(isys_freq_override, uint, 0660); +MODULE_PARM_DESC(isys_freq_override, "Override ISYS freq(mhz)"); + +static unsigned int psys_freq_override; +module_param(psys_freq_override, uint, 0660); +MODULE_PARM_DESC(psys_freq_override, "Override PSYS freq(mhz)"); + #define IPU6_PCI_BAR 0 struct ipu6_cell_program { @@ -387,6 +395,14 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, pdata->base = base; pdata->ipdata = ipdata; + /* Override the isys freq */ + if (isys_freq_override >= BUTTRESS_MIN_FORCE_IS_FREQ && + isys_freq_override <= BUTTRESS_MAX_FORCE_IS_FREQ) { + ctrl->ratio = isys_freq_override / BUTTRESS_IS_FREQ_STEP; + dev_dbg(&pdev->dev, "Override the isys freq:%u(mhz)\n", + isys_freq_override); + } + isys_adev = ipu6_bus_initialize_device(pdev, parent, pdata, ctrl, IPU6_ISYS_NAME); if (IS_ERR(isys_adev)) { @@ -433,6 +449,15 @@ ipu6_psys_init(struct pci_dev *pdev, struct device *parent, pdata->base = base; pdata->ipdata = ipdata; + /* Override the psys freq */ + if (psys_freq_override >= BUTTRESS_MIN_FORCE_PS_FREQ && + psys_freq_override <= BUTTRESS_MAX_FORCE_PS_FREQ) { + ctrl->ratio = psys_freq_override / BUTTRESS_PS_FREQ_STEP; + ctrl->qos_floor = psys_freq_override; + dev_dbg(&pdev->dev, "Override the psys freq:%u(mhz)\n", + psys_freq_override); + } + psys_adev = ipu6_bus_initialize_device(pdev, parent, pdata, ctrl, IPU6_PSYS_NAME); if (IS_ERR(psys_adev)) { -- 2.43.0 0004-media-ipu6-Don-t-disable-ATS-when-CPU-is-not-Meteor-.patch000066400000000000000000000026311471702545500331550ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patch/v6.10From ee48e82af9634b4b7da6767a897504331016daf6 Mon Sep 17 00:00:00 2001 From: Hao Yao Date: Wed, 18 Sep 2024 14:47:40 +0800 Subject: [PATCH 4/5] media: ipu6: Don't disable ATS when CPU is not Meteor Lake IPU6 on Arrow Lake shares the same PCI ID with Meteor Lake, so we can't use PCI ID to distinguish whether the hardware is Meteor Lake. Use boot_cpu_data.x86_vfm instead. Signed-off-by: Hao Yao --- drivers/media/pci/intel/ipu6/ipu6.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/media/pci/intel/ipu6/ipu6.c b/drivers/media/pci/intel/ipu6/ipu6.c index 5a683dbdf03a..00ae55816f9d 100644 --- a/drivers/media/pci/intel/ipu6/ipu6.c +++ b/drivers/media/pci/intel/ipu6/ipu6.c @@ -3,6 +3,8 @@ * Copyright (C) 2013--2024 Intel Corporation */ +#include + #include #include #include @@ -493,8 +495,9 @@ static int ipu6_pci_config_setup(struct pci_dev *dev, u8 hw_ver) int ret; /* disable IPU6 PCI ATS on mtl ES2 */ - if (is_ipu6ep_mtl(hw_ver) && boot_cpu_data.x86_stepping == 0x2 && - pci_ats_supported(dev)) + if ((boot_cpu_data.x86_vfm == INTEL_METEORLAKE || + boot_cpu_data.x86_vfm == INTEL_METEORLAKE_L) && + boot_cpu_data.x86_stepping == 0x2 && pci_ats_supported(dev)) pci_disable_ats(dev); /* No PCI msi capability for IPU6EP */ -- 2.43.0 ipu6-drivers-0~git202411190607.0ad49882/patch/v6.10/0005-media-ipu-bridge-Support-more-sensors.patch000066400000000000000000000032121471702545500313730ustar00rootroot00000000000000From d054e0f6a17fcff494dffedcece4e3c34978ad6a Mon Sep 17 00:00:00 2001 From: Hao Yao Date: Tue, 26 Mar 2024 10:50:41 +0800 Subject: [PATCH 5/5] media: ipu-bridge: Support more sensors Signed-off-by: Hao Yao --- drivers/media/pci/intel/ipu-bridge.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/drivers/media/pci/intel/ipu-bridge.c b/drivers/media/pci/intel/ipu-bridge.c index 61750cc98d70..0248d119a597 100644 --- a/drivers/media/pci/intel/ipu-bridge.c +++ b/drivers/media/pci/intel/ipu-bridge.c @@ -61,10 +61,29 @@ static const struct ipu_sensor_config ipu_supported_sensors[] = { IPU_SENSOR_CONFIG("INT3537", 1, 437000000), /* Omnivision ov13b10 */ IPU_SENSOR_CONFIG("OVTIDB10", 1, 560000000), + IPU_SENSOR_CONFIG("OVTI13B1", 1, 560000000), /* GalaxyCore GC0310 */ IPU_SENSOR_CONFIG("INT0310", 0), /* Omnivision ov01a10 */ IPU_SENSOR_CONFIG("OVTI01A0", 1, 400000000), + /* Omnivision ov08x40 */ + IPU_SENSOR_CONFIG("OVTI08F4", 1, 400000000), + /* Himax hm11b1 */ + IPU_SENSOR_CONFIG("HIMX11B1", 1, 384000000), + /* Himax hm2170 */ + IPU_SENSOR_CONFIG("HIMX2170", 1, 384000000), + /* Himax hm2172 */ + IPU_SENSOR_CONFIG("HIMX2172", 1, 384000000), + /* Omnivision ov01a1s */ + IPU_SENSOR_CONFIG("OVTI01AS", 1, 400000000), + /* Omnivision ov02c10 */ + IPU_SENSOR_CONFIG("OVTI02C1", 1, 400000000), + /* Omnivision ov02e10 */ + IPU_SENSOR_CONFIG("OVTI02E1", 1, 360000000), + /* Omnivision ov05c10 */ + IPU_SENSOR_CONFIG("OVTI05C1", 1, 480000000), + /* Omnivision ov08a10 */ + IPU_SENSOR_CONFIG("OVTI08A1", 1, 500000000), }; static const struct ipu_property_names prop_names = { -- 2.43.0 ipu6-drivers-0~git202411190607.0ad49882/patch/v6.10/in-tree-build/000077500000000000000000000000001471702545500232225ustar00rootroot000000000000000001-media-ipu6-Workaround-to-build-PSYS.patch000066400000000000000000000013671471702545500331530ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patch/v6.10/in-tree-buildFrom 9204b09b79fd63f6d9c14fff8680d837bfc5824a Mon Sep 17 00:00:00 2001 From: Dongcheng Yan Date: Fri, 13 Sep 2024 18:02:37 +0800 Subject: [PATCH 1/5] media: ipu6: Workaround to build PSYS Signed-off-by: Dongcheng Yan --- drivers/media/pci/intel/ipu6/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/media/pci/intel/ipu6/Makefile b/drivers/media/pci/intel/ipu6/Makefile index a821b0a1567f..8a4f4cd7610d 100644 --- a/drivers/media/pci/intel/ipu6/Makefile +++ b/drivers/media/pci/intel/ipu6/Makefile @@ -21,3 +21,4 @@ intel-ipu6-isys-y := ipu6-isys.o \ ipu6-isys-dwc-phy.o obj-$(CONFIG_VIDEO_INTEL_IPU6) += intel-ipu6-isys.o +obj-$(CONFIG_VIDEO_INTEL_IPU6) += psys/ -- 2.43.0 0002-media-i2c-Add-sensors-config.patch000066400000000000000000000104341471702545500317220ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patch/v6.10/in-tree-buildFrom d1a3458bab8845555b5cb760cf9db4b3654fa4e7 Mon Sep 17 00:00:00 2001 From: Hao Yao Date: Mon, 25 Mar 2024 14:40:09 +0800 Subject: [PATCH 2/5] media: i2c: Add sensors config Signed-off-by: Hao Yao --- drivers/media/i2c/Kconfig | 71 ++++++++++++++++++++++++++++++++++++++ drivers/media/i2c/Makefile | 8 +++++ 2 files changed, 79 insertions(+) diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig index c6d3ee472d81..5fa7d3e047e4 100644 --- a/drivers/media/i2c/Kconfig +++ b/drivers/media/i2c/Kconfig @@ -107,6 +107,33 @@ config VIDEO_HI847 To compile this driver as a module, choose M here: the module will be called hi847. +config VIDEO_HM11B1 + tristate "Himax HM11B1 sensor support" + help + This is a Video4Linux2 sensor driver for the Himax + HM11B1 camera. + + To compile this driver as a module, choose M here: the + module will be called hm11b1. + +config VIDEO_HM2170 + tristate "Himax HM2170 sensor support" + help + This is a Video4Linux2 sensor driver for the Himax + HM2170 camera. + + To compile this driver as a module, choose M here: the + module will be called hm2170. + +config VIDEO_HM2172 + tristate "Himax HM2172 sensor support" + help + This is a Video4Linux2 sensor driver for the Himax + HM2172 camera. + + To compile this driver as a module, choose M here: the + module will be called hm2172. + config VIDEO_IMX208 tristate "Sony IMX208 sensor support" help @@ -315,6 +342,15 @@ config VIDEO_OV01A10 To compile this driver as a module, choose M here: the module will be called ov01a10. +config VIDEO_OV01A1S + tristate "OmniVision OV01A1S sensor support" + help + This is a Video4Linux2 sensor driver for the OmniVision + OV01A1S camera. + + To compile this driver as a module, choose M here: the + module will be called ov01a1s. + config VIDEO_OV02A10 tristate "OmniVision OV02A10 sensor support" help @@ -324,6 +360,41 @@ config VIDEO_OV02A10 To compile this driver as a module, choose M here: the module will be called ov02a10. +config VIDEO_OV02C10 + tristate "OmniVision OV02C10 sensor support" + help + This is a Video4Linux2 sensor driver for the OmniVision + OV02C10 camera. + + To compile this driver as a module, choose M here: the + module will be called ov02c10. + +config VIDEO_OV02E10 + tristate "OmniVision OV02E10 sensor support" + help + This is a Video4Linux2 sensor driver for the OmniVision + ov02e10 camera. + +config VIDEO_OV05C10 + tristate "OmniVision OV05C10 sensor support" + depends on ACPI || COMPILE_TEST + select V4L2_CCI_I2C + help + This is a Video4Linux2 sensor driver for the OmniVision + OV05C10 camera. + + To compile this driver as a module, choose M here: the + module will be called ov05c10. + +config VIDEO_OV08A10 + tristate "OmniVision OV08A10 sensor support" + help + This is a Video4Linux2 sensor driver for the OmniVision + OV08A10 camera sensor. + + To compile this driver as a module, choose M here: the + module will be called ov08a10. + config VIDEO_OV08D10 tristate "OmniVision OV08D10 sensor support" help diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile index dfbe6448b549..2c1765d5650c 100644 --- a/drivers/media/i2c/Makefile +++ b/drivers/media/i2c/Makefile @@ -42,6 +42,9 @@ obj-$(CONFIG_VIDEO_GC2145) += gc2145.o obj-$(CONFIG_VIDEO_HI556) += hi556.o obj-$(CONFIG_VIDEO_HI846) += hi846.o obj-$(CONFIG_VIDEO_HI847) += hi847.o +obj-$(CONFIG_VIDEO_HM11B1) += hm11b1.o +obj-$(CONFIG_VIDEO_HM2170) += hm2170.o +obj-$(CONFIG_VIDEO_HM2172) += hm2172.o obj-$(CONFIG_VIDEO_I2C) += video-i2c.o obj-$(CONFIG_VIDEO_IMX208) += imx208.o obj-$(CONFIG_VIDEO_IMX214) += imx214.o @@ -76,7 +79,12 @@ obj-$(CONFIG_VIDEO_MT9V032) += mt9v032.o obj-$(CONFIG_VIDEO_MT9V111) += mt9v111.o obj-$(CONFIG_VIDEO_OG01A1B) += og01a1b.o obj-$(CONFIG_VIDEO_OV01A10) += ov01a10.o +obj-$(CONFIG_VIDEO_OV01A1S) += ov01a1s.o obj-$(CONFIG_VIDEO_OV02A10) += ov02a10.o +obj-$(CONFIG_VIDEO_OV02C10) += ov02c10.o +obj-$(CONFIG_VIDEO_OV02E10) += ov02e10.o +obj-$(CONFIG_VIDEO_OV05C10) += ov05c10.o +obj-$(CONFIG_VIDEO_OV08A10) += ov08a10.o obj-$(CONFIG_VIDEO_OV08D10) += ov08d10.o obj-$(CONFIG_VIDEO_OV08X40) += ov08x40.o obj-$(CONFIG_VIDEO_OV13858) += ov13858.o -- 2.43.0 ipu6-drivers-0~git202411190607.0ad49882/patch/v6.11/000077500000000000000000000000001471702545500205635ustar00rootroot000000000000000003-media-ipu6-Use-module-parameter-to-set-isys-psys-fre.patch000066400000000000000000000043511471702545500340070ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patch/v6.11From cde3265769fe1ad857137367dea4ec16fcb80548 Mon Sep 17 00:00:00 2001 From: Dongcheng Yan Date: Tue, 30 Jul 2024 11:03:10 +0800 Subject: [PATCH 3/5] media: ipu6: Use module parameter to set isys/psys freq Signed-off-by: Hongju Wang Signed-off-by: Dongcheng Yan --- drivers/media/pci/intel/ipu6/ipu6.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/drivers/media/pci/intel/ipu6/ipu6.c b/drivers/media/pci/intel/ipu6/ipu6.c index bbd646378ab3..5a683dbdf03a 100644 --- a/drivers/media/pci/intel/ipu6/ipu6.c +++ b/drivers/media/pci/intel/ipu6/ipu6.c @@ -33,6 +33,14 @@ #include "ipu6-platform-isys-csi2-reg.h" #include "ipu6-platform-regs.h" +static unsigned int isys_freq_override; +module_param(isys_freq_override, uint, 0660); +MODULE_PARM_DESC(isys_freq_override, "Override ISYS freq(mhz)"); + +static unsigned int psys_freq_override; +module_param(psys_freq_override, uint, 0660); +MODULE_PARM_DESC(psys_freq_override, "Override PSYS freq(mhz)"); + #define IPU6_PCI_BAR 0 struct ipu6_cell_program { @@ -387,6 +395,14 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, pdata->base = base; pdata->ipdata = ipdata; + /* Override the isys freq */ + if (isys_freq_override >= BUTTRESS_MIN_FORCE_IS_FREQ && + isys_freq_override <= BUTTRESS_MAX_FORCE_IS_FREQ) { + ctrl->ratio = isys_freq_override / BUTTRESS_IS_FREQ_STEP; + dev_dbg(&pdev->dev, "Override the isys freq:%u(mhz)\n", + isys_freq_override); + } + isys_adev = ipu6_bus_initialize_device(pdev, parent, pdata, ctrl, IPU6_ISYS_NAME); if (IS_ERR(isys_adev)) { @@ -433,6 +449,15 @@ ipu6_psys_init(struct pci_dev *pdev, struct device *parent, pdata->base = base; pdata->ipdata = ipdata; + /* Override the psys freq */ + if (psys_freq_override >= BUTTRESS_MIN_FORCE_PS_FREQ && + psys_freq_override <= BUTTRESS_MAX_FORCE_PS_FREQ) { + ctrl->ratio = psys_freq_override / BUTTRESS_PS_FREQ_STEP; + ctrl->qos_floor = psys_freq_override; + dev_dbg(&pdev->dev, "Override the psys freq:%u(mhz)\n", + psys_freq_override); + } + psys_adev = ipu6_bus_initialize_device(pdev, parent, pdata, ctrl, IPU6_PSYS_NAME); if (IS_ERR(psys_adev)) { -- 2.43.0 0004-media-ipu6-Don-t-disable-ATS-when-CPU-is-not-Meteor-.patch000066400000000000000000000026311471702545500331560ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patch/v6.11From 1b07d0caa6542fe17adf2d9f553e3f72525c5df0 Mon Sep 17 00:00:00 2001 From: Hao Yao Date: Wed, 18 Sep 2024 14:47:40 +0800 Subject: [PATCH 4/5] media: ipu6: Don't disable ATS when CPU is not Meteor Lake IPU6 on Arrow Lake shares the same PCI ID with Meteor Lake, so we can't use PCI ID to distinguish whether the hardware is Meteor Lake. Use boot_cpu_data.x86_vfm instead. Signed-off-by: Hao Yao --- drivers/media/pci/intel/ipu6/ipu6.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/media/pci/intel/ipu6/ipu6.c b/drivers/media/pci/intel/ipu6/ipu6.c index 5a683dbdf03a..00ae55816f9d 100644 --- a/drivers/media/pci/intel/ipu6/ipu6.c +++ b/drivers/media/pci/intel/ipu6/ipu6.c @@ -3,6 +3,8 @@ * Copyright (C) 2013--2024 Intel Corporation */ +#include + #include #include #include @@ -493,8 +495,9 @@ static int ipu6_pci_config_setup(struct pci_dev *dev, u8 hw_ver) int ret; /* disable IPU6 PCI ATS on mtl ES2 */ - if (is_ipu6ep_mtl(hw_ver) && boot_cpu_data.x86_stepping == 0x2 && - pci_ats_supported(dev)) + if ((boot_cpu_data.x86_vfm == INTEL_METEORLAKE || + boot_cpu_data.x86_vfm == INTEL_METEORLAKE_L) && + boot_cpu_data.x86_stepping == 0x2 && pci_ats_supported(dev)) pci_disable_ats(dev); /* No PCI msi capability for IPU6EP */ -- 2.43.0 0005-media-ipu-bridge-Support-ov05c10-sensor.patch000066400000000000000000000016541471702545500312750ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patch/v6.11From 4777258d86079bc5c1da4a50e2d6547c1fcf277c Mon Sep 17 00:00:00 2001 From: Hao Yao Date: Thu, 10 Oct 2024 15:13:52 +0800 Subject: [PATCH 5/5] media: ipu-bridge: Support ov05c10 sensor Signed-off-by: Hao Yao --- drivers/media/pci/intel/ipu-bridge.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/media/pci/intel/ipu-bridge.c b/drivers/media/pci/intel/ipu-bridge.c index a0e9a71580b5..dd868286c02e 100644 --- a/drivers/media/pci/intel/ipu-bridge.c +++ b/drivers/media/pci/intel/ipu-bridge.c @@ -72,6 +72,8 @@ static const struct ipu_sensor_config ipu_supported_sensors[] = { IPU_SENSOR_CONFIG("OVTI02C1", 1, 400000000), /* Omnivision OV02E10 */ IPU_SENSOR_CONFIG("OVTI02E1", 1, 360000000), + /* Omnivision ov05c10 */ + IPU_SENSOR_CONFIG("OVTI05C1", 1, 480000000), /* Omnivision OV08A10 */ IPU_SENSOR_CONFIG("OVTI08A1", 1, 500000000), /* Omnivision OV08x40 */ -- 2.43.0 0006-media-ipu-bridge-Add-support-for-additional-link.patch000066400000000000000000000021161471702545500332410ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patch/v6.11From a47f5c632f28e0fc7f0d472b631206d7060576a5 Mon Sep 17 00:00:00 2001 From: Jason Chen Date: Fri, 18 Oct 2024 20:34:50 +0800 Subject: [PATCH 2/2] media: ipu-bridge: Add support for additional link Two additional frequencies can be supported with 2 lanes for the ov08x40 sensor Signed-off-by: Jason Chen --- drivers/media/pci/intel/ipu-bridge.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/media/pci/intel/ipu-bridge.c b/drivers/media/pci/intel/ipu-bridge.c index a0e9a71580b5..91a22dcfd79c 100644 --- a/drivers/media/pci/intel/ipu-bridge.c +++ b/drivers/media/pci/intel/ipu-bridge.c @@ -75,7 +75,7 @@ static const struct ipu_sensor_config ipu_supported_sensors[] = { /* Omnivision OV08A10 */ IPU_SENSOR_CONFIG("OVTI08A1", 1, 500000000), /* Omnivision OV08x40 */ - IPU_SENSOR_CONFIG("OVTI08F4", 1, 400000000), + IPU_SENSOR_CONFIG("OVTI08F4", 3, 400000000, 749000000, 800000000), /* Omnivision OV13B10 */ IPU_SENSOR_CONFIG("OVTI13B1", 1, 560000000), IPU_SENSOR_CONFIG("OVTIDB10", 1, 560000000), -- 2.34.1 0007-media-ov08x40-Add-support-for-2-4-lanes-support-at-1.patch000066400000000000000000002650571471702545500331730ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patch/v6.11From 152985e4e24d712e02a09d9215478fc449077b84 Mon Sep 17 00:00:00 2001 From: Jason Chen Date: Thu, 24 Oct 2024 19:20:04 +0800 Subject: [PATCH] media: ov08x40: Add support for 2/4 lanes support at 1500 Mbps Add support for 2/4 lanes with 1500 mbps in the supported modes. Also Add a detecting function that check lanes from SSDB Signed-off-by: Jason Chen --- drivers/media/i2c/ov08x40.c | 4436 ++++++++++++++++++++++++++++++++++- 1 file changed, 4360 insertions(+), 76 deletions(-) diff --git a/drivers/media/i2c/ov08x40.c b/drivers/media/i2c/ov08x40.c index 48df077522ad..25b1d02583f7 100644 --- a/drivers/media/i2c/ov08x40.c +++ b/drivers/media/i2c/ov08x40.c @@ -2,6 +2,7 @@ // Copyright (c) 2022 Intel Corporation. #include +#include #include #include #include @@ -104,8 +105,15 @@ enum { OV08X40_LINK_FREQ_400MHZ_INDEX, + OV08X40_LINK_FREQ_749MHZ_INDEX, + OV08X40_LINK_FREQ_800MHZ_INDEX, }; +enum module_names { + MODULE_OTHERS = 0, + MODULE_BBG802N3, + MODULE_CJFME55, +}; struct ov08x40_reg { u16 address; u8 val; @@ -147,11 +155,2779 @@ struct ov08x40_mode { u16 exposure_shift; }; -static const struct ov08x40_reg mipi_data_rate_800mbps[] = { - {0x0103, 0x01}, - {0x1000, 0x00}, - {0x1601, 0xd0}, - {0x1001, 0x04}, +struct mipi_camera_link_ssdb { + u8 version; + u8 sku; + u8 guid_csi2[16]; + u8 devfunction; + u8 bus; + u32 dphylinkenfuses; + u32 clockdiv; + u8 link; + u8 lanes; + u32 csiparams[10]; + u32 maxlanespeed; + u8 sensorcalibfileidx; + u8 sensorcalibfileidxInMBZ[3]; + u8 romtype; + u8 vcmtype; + u8 platforminfo; + u8 platformsubinfo; + u8 flash; + u8 privacyled; + u8 degree; + u8 mipilinkdefined; + u32 mclkspeed; + u8 controllogicid; + u8 reserved1[3]; + u8 mclkport; + u8 reserved2[13]; +} __packed; + +static const guid_t cio2_sensor_module_guid = + GUID_INIT(0x822ace8f, 0x2814, 0x4174, + 0xa5, 0x6b, 0x5f, 0x02, 0x9f, 0xe0, 0x79, 0xee); + +static const struct ov08x40_reg mipi_data_rate_800mbps[] = { + {0x0103, 0x01}, + {0x1000, 0x00}, + {0x1601, 0xd0}, + {0x1001, 0x04}, + {0x5004, 0x53}, + {0x5110, 0x00}, + {0x5111, 0x14}, + {0x5112, 0x01}, + {0x5113, 0x7b}, + {0x5114, 0x00}, + {0x5152, 0xa3}, + {0x5a52, 0x1f}, + {0x5a1a, 0x0e}, + {0x5a1b, 0x10}, + {0x5a1f, 0x0e}, + {0x5a27, 0x0e}, + {0x6002, 0x2e}, +}; + +static const struct ov08x40_reg mode_3856x2416_regs[] = { + {0x5000, 0x5d}, + {0x5001, 0x20}, + {0x5008, 0xb0}, + {0x50c1, 0x00}, + {0x53c1, 0x00}, + {0x5f40, 0x00}, + {0x5f41, 0x40}, + {0x0300, 0x3a}, + {0x0301, 0xc8}, + {0x0302, 0x31}, + {0x0303, 0x03}, + {0x0304, 0x01}, + {0x0305, 0xa1}, + {0x0306, 0x04}, + {0x0307, 0x01}, + {0x0308, 0x03}, + {0x0309, 0x03}, + {0x0310, 0x0a}, + {0x0311, 0x02}, + {0x0312, 0x01}, + {0x0313, 0x08}, + {0x0314, 0x66}, + {0x0315, 0x00}, + {0x0316, 0x34}, + {0x0320, 0x02}, + {0x0321, 0x03}, + {0x0323, 0x05}, + {0x0324, 0x01}, + {0x0325, 0xb8}, + {0x0326, 0x4a}, + {0x0327, 0x04}, + {0x0329, 0x00}, + {0x032a, 0x05}, + {0x032b, 0x00}, + {0x032c, 0x00}, + {0x032d, 0x00}, + {0x032e, 0x02}, + {0x032f, 0xa0}, + {0x0350, 0x00}, + {0x0360, 0x01}, + {0x1216, 0x60}, + {0x1217, 0x5b}, + {0x1218, 0x00}, + {0x1220, 0x24}, + {0x198a, 0x00}, + {0x198b, 0x01}, + {0x198e, 0x00}, + {0x198f, 0x01}, + {0x3009, 0x04}, + {0x3012, 0x41}, + {0x3015, 0x00}, + {0x3016, 0xb0}, + {0x3017, 0xf0}, + {0x3018, 0xf0}, + {0x3019, 0xd2}, + {0x301a, 0xb0}, + {0x301c, 0x81}, + {0x301d, 0x02}, + {0x301e, 0x80}, + {0x3022, 0xf0}, + {0x3025, 0x89}, + {0x3030, 0x03}, + {0x3044, 0xc2}, + {0x3050, 0x35}, + {0x3051, 0x60}, + {0x3052, 0x25}, + {0x3053, 0x00}, + {0x3054, 0x00}, + {0x3055, 0x02}, + {0x3056, 0x80}, + {0x3057, 0x80}, + {0x3058, 0x80}, + {0x3059, 0x00}, + {0x3107, 0x86}, + {0x3400, 0x1c}, + {0x3401, 0x80}, + {0x3402, 0x8c}, + {0x3419, 0x13}, + {0x341a, 0x89}, + {0x341b, 0x30}, + {0x3420, 0x00}, + {0x3421, 0x00}, + {0x3422, 0x00}, + {0x3423, 0x00}, + {0x3424, 0x00}, + {0x3425, 0x00}, + {0x3426, 0x00}, + {0x3427, 0x00}, + {0x3428, 0x0f}, + {0x3429, 0x00}, + {0x342a, 0x00}, + {0x342b, 0x00}, + {0x342c, 0x00}, + {0x342d, 0x00}, + {0x342e, 0x00}, + {0x342f, 0x11}, + {0x3430, 0x11}, + {0x3431, 0x10}, + {0x3432, 0x00}, + {0x3433, 0x00}, + {0x3434, 0x00}, + {0x3435, 0x00}, + {0x3436, 0x00}, + {0x3437, 0x00}, + {0x3442, 0x02}, + {0x3443, 0x02}, + {0x3444, 0x07}, + {0x3450, 0x00}, + {0x3451, 0x00}, + {0x3452, 0x18}, + {0x3453, 0x18}, + {0x3454, 0x00}, + {0x3455, 0x80}, + {0x3456, 0x08}, + {0x3500, 0x00}, + {0x3501, 0x02}, + {0x3502, 0x00}, + {0x3504, 0x4c}, + {0x3506, 0x30}, + {0x3507, 0x00}, + {0x3508, 0x01}, + {0x3509, 0x00}, + {0x350a, 0x01}, + {0x350b, 0x00}, + {0x350c, 0x00}, + {0x3540, 0x00}, + {0x3541, 0x01}, + {0x3542, 0x00}, + {0x3544, 0x4c}, + {0x3546, 0x30}, + {0x3547, 0x00}, + {0x3548, 0x01}, + {0x3549, 0x00}, + {0x354a, 0x01}, + {0x354b, 0x00}, + {0x354c, 0x00}, + {0x3688, 0x02}, + {0x368a, 0x2e}, + {0x368e, 0x71}, + {0x3696, 0xd1}, + {0x3699, 0x00}, + {0x369a, 0x00}, + {0x36a4, 0x00}, + {0x36a6, 0x00}, + {0x3711, 0x00}, + {0x3712, 0x51}, + {0x3713, 0x00}, + {0x3714, 0x24}, + {0x3716, 0x00}, + {0x3718, 0x07}, + {0x371a, 0x1c}, + {0x371b, 0x00}, + {0x3720, 0x08}, + {0x3725, 0x32}, + {0x3727, 0x05}, + {0x3760, 0x02}, + {0x3761, 0x17}, + {0x3762, 0x02}, + {0x3763, 0x02}, + {0x3764, 0x02}, + {0x3765, 0x2c}, + {0x3766, 0x04}, + {0x3767, 0x2c}, + {0x3768, 0x02}, + {0x3769, 0x00}, + {0x376b, 0x20}, + {0x376e, 0x03}, + {0x37b0, 0x00}, + {0x37b1, 0xab}, + {0x37b2, 0x01}, + {0x37b3, 0x82}, + {0x37b4, 0x00}, + {0x37b5, 0xe4}, + {0x37b6, 0x01}, + {0x37b7, 0xee}, + {0x3800, 0x00}, + {0x3801, 0x00}, + {0x3802, 0x00}, + {0x3803, 0x00}, + {0x3804, 0x0f}, + {0x3805, 0x1f}, + {0x3806, 0x09}, + {0x3807, 0x7f}, + {0x3808, 0x0f}, + {0x3809, 0x10}, + {0x380a, 0x09}, + {0x380b, 0x70}, + {0x380c, 0x02}, + {0x380d, 0x80}, + {0x380e, 0x13}, + {0x380f, 0x88}, + {0x3810, 0x00}, + {0x3811, 0x08}, + {0x3812, 0x00}, + {0x3813, 0x07}, + {0x3814, 0x11}, + {0x3815, 0x11}, + {0x3820, 0x00}, + {0x3821, 0x04}, + {0x3822, 0x00}, + {0x3823, 0x04}, + {0x3828, 0x0f}, + {0x382a, 0x80}, + {0x382e, 0x41}, + {0x3837, 0x08}, + {0x383a, 0x81}, + {0x383b, 0x81}, + {0x383c, 0x11}, + {0x383d, 0x11}, + {0x383e, 0x00}, + {0x383f, 0x38}, + {0x3840, 0x00}, + {0x3847, 0x00}, + {0x384a, 0x00}, + {0x384c, 0x02}, + {0x384d, 0x80}, + {0x3856, 0x50}, + {0x3857, 0x30}, + {0x3858, 0x80}, + {0x3859, 0x40}, + {0x3860, 0x00}, + {0x3888, 0x00}, + {0x3889, 0x00}, + {0x388a, 0x00}, + {0x388b, 0x00}, + {0x388c, 0x00}, + {0x388d, 0x00}, + {0x388e, 0x00}, + {0x388f, 0x00}, + {0x3894, 0x00}, + {0x3895, 0x00}, + {0x3c84, 0x00}, + {0x3d85, 0x8b}, + {0x3daa, 0x80}, + {0x3dab, 0x14}, + {0x3dac, 0x80}, + {0x3dad, 0xc8}, + {0x3dae, 0x81}, + {0x3daf, 0x7b}, + {0x3f00, 0x10}, + {0x3f01, 0x11}, + {0x3f06, 0x0d}, + {0x3f07, 0x0b}, + {0x3f08, 0x0d}, + {0x3f09, 0x0b}, + {0x3f0a, 0x01}, + {0x3f0b, 0x11}, + {0x3f0c, 0x33}, + {0x4001, 0x07}, + {0x4007, 0x20}, + {0x4008, 0x00}, + {0x4009, 0x05}, + {0x400a, 0x00}, + {0x400b, 0x08}, + {0x400c, 0x00}, + {0x400d, 0x08}, + {0x400e, 0x14}, + {0x4010, 0xf4}, + {0x4011, 0x03}, + {0x4012, 0x55}, + {0x4015, 0x00}, + {0x4016, 0x2d}, + {0x4017, 0x00}, + {0x4018, 0x0f}, + {0x401b, 0x08}, + {0x401c, 0x00}, + {0x401d, 0x10}, + {0x401e, 0x02}, + {0x401f, 0x00}, + {0x4050, 0x06}, + {0x4051, 0xff}, + {0x4052, 0xff}, + {0x4053, 0xff}, + {0x4054, 0xff}, + {0x4055, 0xff}, + {0x4056, 0xff}, + {0x4057, 0x7f}, + {0x4058, 0x00}, + {0x4059, 0x00}, + {0x405a, 0x00}, + {0x405b, 0x00}, + {0x405c, 0x07}, + {0x405d, 0xff}, + {0x405e, 0x07}, + {0x405f, 0xff}, + {0x4080, 0x78}, + {0x4081, 0x78}, + {0x4082, 0x78}, + {0x4083, 0x78}, + {0x4019, 0x00}, + {0x401a, 0x40}, + {0x4020, 0x04}, + {0x4021, 0x00}, + {0x4022, 0x04}, + {0x4023, 0x00}, + {0x4024, 0x04}, + {0x4025, 0x00}, + {0x4026, 0x04}, + {0x4027, 0x00}, + {0x4030, 0x00}, + {0x4031, 0x00}, + {0x4032, 0x00}, + {0x4033, 0x00}, + {0x4034, 0x00}, + {0x4035, 0x00}, + {0x4036, 0x00}, + {0x4037, 0x00}, + {0x4040, 0x00}, + {0x4041, 0x80}, + {0x4042, 0x00}, + {0x4043, 0x80}, + {0x4044, 0x00}, + {0x4045, 0x80}, + {0x4046, 0x00}, + {0x4047, 0x80}, + {0x4060, 0x00}, + {0x4061, 0x00}, + {0x4062, 0x00}, + {0x4063, 0x00}, + {0x4064, 0x00}, + {0x4065, 0x00}, + {0x4066, 0x00}, + {0x4067, 0x00}, + {0x4068, 0x00}, + {0x4069, 0x00}, + {0x406a, 0x00}, + {0x406b, 0x00}, + {0x406c, 0x00}, + {0x406d, 0x00}, + {0x406e, 0x00}, + {0x406f, 0x00}, + {0x4070, 0x00}, + {0x4071, 0x00}, + {0x4072, 0x00}, + {0x4073, 0x00}, + {0x4074, 0x00}, + {0x4075, 0x00}, + {0x4076, 0x00}, + {0x4077, 0x00}, + {0x4078, 0x00}, + {0x4079, 0x00}, + {0x407a, 0x00}, + {0x407b, 0x00}, + {0x407c, 0x00}, + {0x407d, 0x00}, + {0x407e, 0x00}, + {0x407f, 0x00}, + {0x40e0, 0x00}, + {0x40e1, 0x00}, + {0x40e2, 0x00}, + {0x40e3, 0x00}, + {0x40e4, 0x00}, + {0x40e5, 0x00}, + {0x40e6, 0x00}, + {0x40e7, 0x00}, + {0x40e8, 0x00}, + {0x40e9, 0x80}, + {0x40ea, 0x00}, + {0x40eb, 0x80}, + {0x40ec, 0x00}, + {0x40ed, 0x80}, + {0x40ee, 0x00}, + {0x40ef, 0x80}, + {0x40f0, 0x02}, + {0x40f1, 0x04}, + {0x4300, 0x00}, + {0x4301, 0x00}, + {0x4302, 0x00}, + {0x4303, 0x00}, + {0x4304, 0x00}, + {0x4305, 0x00}, + {0x4306, 0x00}, + {0x4307, 0x00}, + {0x4308, 0x00}, + {0x4309, 0x00}, + {0x430a, 0x00}, + {0x430b, 0xff}, + {0x430c, 0xff}, + {0x430d, 0x00}, + {0x430e, 0x00}, + {0x4315, 0x00}, + {0x4316, 0x00}, + {0x4317, 0x00}, + {0x4318, 0x00}, + {0x4319, 0x00}, + {0x431a, 0x00}, + {0x431b, 0x00}, + {0x431c, 0x00}, + {0x4500, 0x07}, + {0x4501, 0x00}, + {0x4502, 0x00}, + {0x4503, 0x0f}, + {0x4504, 0x80}, + {0x4506, 0x01}, + {0x4509, 0x05}, + {0x450c, 0x00}, + {0x450d, 0x20}, + {0x450e, 0x00}, + {0x450f, 0x00}, + {0x4510, 0x00}, + {0x4523, 0x00}, + {0x4526, 0x00}, + {0x4542, 0x00}, + {0x4543, 0x00}, + {0x4544, 0x00}, + {0x4545, 0x00}, + {0x4546, 0x00}, + {0x4547, 0x10}, + {0x4602, 0x00}, + {0x4603, 0x15}, + {0x460b, 0x07}, + {0x4680, 0x11}, + {0x4686, 0x00}, + {0x4687, 0x00}, + {0x4700, 0x00}, + {0x4800, 0x64}, + {0x4806, 0x40}, + {0x480b, 0x10}, + {0x480c, 0x80}, + {0x480f, 0x32}, + {0x4813, 0xe4}, + {0x4837, 0x14}, + {0x4850, 0x42}, + {0x4884, 0x04}, + {0x4c00, 0xf8}, + {0x4c01, 0x44}, + {0x4c03, 0x00}, + {0x4d00, 0x00}, + {0x4d01, 0x16}, + {0x4d04, 0x10}, + {0x4d05, 0x00}, + {0x4d06, 0x0c}, + {0x4d07, 0x00}, + {0x3d84, 0x04}, + {0x3680, 0xa4}, + {0x3682, 0x80}, + {0x3601, 0x40}, + {0x3602, 0x90}, + {0x3608, 0x0a}, + {0x3938, 0x09}, + {0x3a74, 0x84}, + {0x3a99, 0x84}, + {0x3ab9, 0xa6}, + {0x3aba, 0xba}, + {0x3b12, 0x84}, + {0x3b14, 0xbb}, + {0x3b15, 0xbf}, + {0x3a29, 0x26}, + {0x3a1f, 0x8a}, + {0x3a22, 0x91}, + {0x3a25, 0x96}, + {0x3a28, 0xb4}, + {0x3a2b, 0xba}, + {0x3a2e, 0xbf}, + {0x3a31, 0xc1}, + {0x3a20, 0x00}, + {0x3939, 0x9d}, + {0x3902, 0x0e}, + {0x3903, 0x0e}, + {0x3904, 0x0e}, + {0x3905, 0x0e}, + {0x3906, 0x07}, + {0x3907, 0x0d}, + {0x3908, 0x11}, + {0x3909, 0x12}, + {0x360f, 0x99}, + {0x390c, 0x33}, + {0x390d, 0x66}, + {0x390e, 0xaa}, + {0x3911, 0x90}, + {0x3913, 0x90}, + {0x3915, 0x90}, + {0x3917, 0x90}, + {0x3b3f, 0x9d}, + {0x3b45, 0x9d}, + {0x3b1b, 0xc9}, + {0x3b21, 0xc9}, + {0x3440, 0xa4}, + {0x3a23, 0x15}, + {0x3a26, 0x1d}, + {0x3a2c, 0x4a}, + {0x3a2f, 0x18}, + {0x3a32, 0x55}, + {0x3b0a, 0x01}, + {0x3b0b, 0x00}, + {0x3b0e, 0x01}, + {0x3b0f, 0x00}, + {0x392c, 0x02}, + {0x392d, 0x02}, + {0x392e, 0x04}, + {0x392f, 0x03}, + {0x3930, 0x08}, + {0x3931, 0x07}, + {0x3932, 0x10}, + {0x3933, 0x0c}, + {0x3609, 0x08}, + {0x3921, 0x0f}, + {0x3928, 0x15}, + {0x3929, 0x2a}, + {0x392a, 0x54}, + {0x392b, 0xa8}, + {0x3426, 0x10}, + {0x3407, 0x01}, + {0x3404, 0x01}, + {0x3500, 0x00}, + {0x3501, 0x10}, + {0x3502, 0x10}, + {0x3508, 0x0f}, + {0x3509, 0x80}, +}; + +static const struct ov08x40_reg mode_1928x1208_regs[] = { + {0x5000, 0x55}, + {0x5001, 0x00}, + {0x5008, 0xb0}, + {0x50c1, 0x00}, + {0x53c1, 0x00}, + {0x5f40, 0x00}, + {0x5f41, 0x40}, + {0x0300, 0x3a}, + {0x0301, 0xc8}, + {0x0302, 0x31}, + {0x0303, 0x03}, + {0x0304, 0x01}, + {0x0305, 0xa1}, + {0x0306, 0x04}, + {0x0307, 0x01}, + {0x0308, 0x03}, + {0x0309, 0x03}, + {0x0310, 0x0a}, + {0x0311, 0x02}, + {0x0312, 0x01}, + {0x0313, 0x08}, + {0x0314, 0x66}, + {0x0315, 0x00}, + {0x0316, 0x34}, + {0x0320, 0x02}, + {0x0321, 0x03}, + {0x0323, 0x05}, + {0x0324, 0x01}, + {0x0325, 0xb8}, + {0x0326, 0x4a}, + {0x0327, 0x04}, + {0x0329, 0x00}, + {0x032a, 0x05}, + {0x032b, 0x00}, + {0x032c, 0x00}, + {0x032d, 0x00}, + {0x032e, 0x02}, + {0x032f, 0xa0}, + {0x0350, 0x00}, + {0x0360, 0x01}, + {0x1216, 0x60}, + {0x1217, 0x5b}, + {0x1218, 0x00}, + {0x1220, 0x24}, + {0x198a, 0x00}, + {0x198b, 0x01}, + {0x198e, 0x00}, + {0x198f, 0x01}, + {0x3009, 0x04}, + {0x3012, 0x41}, + {0x3015, 0x00}, + {0x3016, 0xb0}, + {0x3017, 0xf0}, + {0x3018, 0xf0}, + {0x3019, 0xd2}, + {0x301a, 0xb0}, + {0x301c, 0x81}, + {0x301d, 0x02}, + {0x301e, 0x80}, + {0x3022, 0xf0}, + {0x3025, 0x89}, + {0x3030, 0x03}, + {0x3044, 0xc2}, + {0x3050, 0x35}, + {0x3051, 0x60}, + {0x3052, 0x25}, + {0x3053, 0x00}, + {0x3054, 0x00}, + {0x3055, 0x02}, + {0x3056, 0x80}, + {0x3057, 0x80}, + {0x3058, 0x80}, + {0x3059, 0x00}, + {0x3107, 0x86}, + {0x3400, 0x1c}, + {0x3401, 0x80}, + {0x3402, 0x8c}, + {0x3419, 0x08}, + {0x341a, 0xaf}, + {0x341b, 0x30}, + {0x3420, 0x00}, + {0x3421, 0x00}, + {0x3422, 0x00}, + {0x3423, 0x00}, + {0x3424, 0x00}, + {0x3425, 0x00}, + {0x3426, 0x00}, + {0x3427, 0x00}, + {0x3428, 0x0f}, + {0x3429, 0x00}, + {0x342a, 0x00}, + {0x342b, 0x00}, + {0x342c, 0x00}, + {0x342d, 0x00}, + {0x342e, 0x00}, + {0x342f, 0x11}, + {0x3430, 0x11}, + {0x3431, 0x10}, + {0x3432, 0x00}, + {0x3433, 0x00}, + {0x3434, 0x00}, + {0x3435, 0x00}, + {0x3436, 0x00}, + {0x3437, 0x00}, + {0x3442, 0x02}, + {0x3443, 0x02}, + {0x3444, 0x07}, + {0x3450, 0x00}, + {0x3451, 0x00}, + {0x3452, 0x18}, + {0x3453, 0x18}, + {0x3454, 0x00}, + {0x3455, 0x80}, + {0x3456, 0x08}, + {0x3500, 0x00}, + {0x3501, 0x02}, + {0x3502, 0x00}, + {0x3504, 0x4c}, + {0x3506, 0x30}, + {0x3507, 0x00}, + {0x3508, 0x01}, + {0x3509, 0x00}, + {0x350a, 0x01}, + {0x350b, 0x00}, + {0x350c, 0x00}, + {0x3540, 0x00}, + {0x3541, 0x01}, + {0x3542, 0x00}, + {0x3544, 0x4c}, + {0x3546, 0x30}, + {0x3547, 0x00}, + {0x3548, 0x01}, + {0x3549, 0x00}, + {0x354a, 0x01}, + {0x354b, 0x00}, + {0x354c, 0x00}, + {0x3688, 0x02}, + {0x368a, 0x2e}, + {0x368e, 0x71}, + {0x3696, 0xd1}, + {0x3699, 0x00}, + {0x369a, 0x00}, + {0x36a4, 0x00}, + {0x36a6, 0x00}, + {0x3711, 0x00}, + {0x3712, 0x50}, + {0x3713, 0x00}, + {0x3714, 0x21}, + {0x3716, 0x00}, + {0x3718, 0x07}, + {0x371a, 0x1c}, + {0x371b, 0x00}, + {0x3720, 0x08}, + {0x3725, 0x32}, + {0x3727, 0x05}, + {0x3760, 0x02}, + {0x3761, 0x28}, + {0x3762, 0x02}, + {0x3763, 0x02}, + {0x3764, 0x02}, + {0x3765, 0x2c}, + {0x3766, 0x04}, + {0x3767, 0x2c}, + {0x3768, 0x02}, + {0x3769, 0x00}, + {0x376b, 0x20}, + {0x376e, 0x07}, + {0x37b0, 0x01}, + {0x37b1, 0x0f}, + {0x37b2, 0x01}, + {0x37b3, 0xd6}, + {0x37b4, 0x01}, + {0x37b5, 0x48}, + {0x37b6, 0x02}, + {0x37b7, 0x40}, + {0x3800, 0x00}, + {0x3801, 0x00}, + {0x3802, 0x00}, + {0x3803, 0x00}, + {0x3804, 0x0f}, + {0x3805, 0x1f}, + {0x3806, 0x09}, + {0x3807, 0x7f}, + {0x3808, 0x07}, + {0x3809, 0x88}, + {0x380a, 0x04}, + {0x380b, 0xb8}, + {0x380c, 0x02}, + {0x380d, 0xd0}, + {0x380e, 0x11}, + {0x380f, 0x5c}, + {0x3810, 0x00}, + {0x3811, 0x04}, + {0x3812, 0x00}, + {0x3813, 0x03}, + {0x3814, 0x11}, + {0x3815, 0x11}, + {0x3820, 0x02}, + {0x3821, 0x14}, + {0x3822, 0x00}, + {0x3823, 0x04}, + {0x3828, 0x0f}, + {0x382a, 0x80}, + {0x382e, 0x41}, + {0x3837, 0x08}, + {0x383a, 0x81}, + {0x383b, 0x81}, + {0x383c, 0x11}, + {0x383d, 0x11}, + {0x383e, 0x00}, + {0x383f, 0x38}, + {0x3840, 0x00}, + {0x3847, 0x00}, + {0x384a, 0x00}, + {0x384c, 0x02}, + {0x384d, 0xd0}, + {0x3856, 0x50}, + {0x3857, 0x30}, + {0x3858, 0x80}, + {0x3859, 0x40}, + {0x3860, 0x00}, + {0x3888, 0x00}, + {0x3889, 0x00}, + {0x388a, 0x00}, + {0x388b, 0x00}, + {0x388c, 0x00}, + {0x388d, 0x00}, + {0x388e, 0x00}, + {0x388f, 0x00}, + {0x3894, 0x00}, + {0x3895, 0x00}, + {0x3c84, 0x00}, + {0x3d85, 0x8b}, + {0x3daa, 0x80}, + {0x3dab, 0x14}, + {0x3dac, 0x80}, + {0x3dad, 0xc8}, + {0x3dae, 0x81}, + {0x3daf, 0x7b}, + {0x3f00, 0x10}, + {0x3f01, 0x11}, + {0x3f06, 0x0d}, + {0x3f07, 0x0b}, + {0x3f08, 0x0d}, + {0x3f09, 0x0b}, + {0x3f0a, 0x01}, + {0x3f0b, 0x11}, + {0x3f0c, 0x33}, + {0x4001, 0x07}, + {0x4007, 0x20}, + {0x4008, 0x00}, + {0x4009, 0x05}, + {0x400a, 0x00}, + {0x400b, 0x04}, + {0x400c, 0x00}, + {0x400d, 0x04}, + {0x400e, 0x14}, + {0x4010, 0xf4}, + {0x4011, 0x03}, + {0x4012, 0x55}, + {0x4015, 0x00}, + {0x4016, 0x27}, + {0x4017, 0x00}, + {0x4018, 0x0f}, + {0x401b, 0x08}, + {0x401c, 0x00}, + {0x401d, 0x10}, + {0x401e, 0x02}, + {0x401f, 0x00}, + {0x4050, 0x06}, + {0x4051, 0xff}, + {0x4052, 0xff}, + {0x4053, 0xff}, + {0x4054, 0xff}, + {0x4055, 0xff}, + {0x4056, 0xff}, + {0x4057, 0x7f}, + {0x4058, 0x00}, + {0x4059, 0x00}, + {0x405a, 0x00}, + {0x405b, 0x00}, + {0x405c, 0x07}, + {0x405d, 0xff}, + {0x405e, 0x07}, + {0x405f, 0xff}, + {0x4080, 0x78}, + {0x4081, 0x78}, + {0x4082, 0x78}, + {0x4083, 0x78}, + {0x4019, 0x00}, + {0x401a, 0x40}, + {0x4020, 0x04}, + {0x4021, 0x00}, + {0x4022, 0x04}, + {0x4023, 0x00}, + {0x4024, 0x04}, + {0x4025, 0x00}, + {0x4026, 0x04}, + {0x4027, 0x00}, + {0x4030, 0x00}, + {0x4031, 0x00}, + {0x4032, 0x00}, + {0x4033, 0x00}, + {0x4034, 0x00}, + {0x4035, 0x00}, + {0x4036, 0x00}, + {0x4037, 0x00}, + {0x4040, 0x00}, + {0x4041, 0x80}, + {0x4042, 0x00}, + {0x4043, 0x80}, + {0x4044, 0x00}, + {0x4045, 0x80}, + {0x4046, 0x00}, + {0x4047, 0x80}, + {0x4060, 0x00}, + {0x4061, 0x00}, + {0x4062, 0x00}, + {0x4063, 0x00}, + {0x4064, 0x00}, + {0x4065, 0x00}, + {0x4066, 0x00}, + {0x4067, 0x00}, + {0x4068, 0x00}, + {0x4069, 0x00}, + {0x406a, 0x00}, + {0x406b, 0x00}, + {0x406c, 0x00}, + {0x406d, 0x00}, + {0x406e, 0x00}, + {0x406f, 0x00}, + {0x4070, 0x00}, + {0x4071, 0x00}, + {0x4072, 0x00}, + {0x4073, 0x00}, + {0x4074, 0x00}, + {0x4075, 0x00}, + {0x4076, 0x00}, + {0x4077, 0x00}, + {0x4078, 0x00}, + {0x4079, 0x00}, + {0x407a, 0x00}, + {0x407b, 0x00}, + {0x407c, 0x00}, + {0x407d, 0x00}, + {0x407e, 0x00}, + {0x407f, 0x00}, + {0x40e0, 0x00}, + {0x40e1, 0x00}, + {0x40e2, 0x00}, + {0x40e3, 0x00}, + {0x40e4, 0x00}, + {0x40e5, 0x00}, + {0x40e6, 0x00}, + {0x40e7, 0x00}, + {0x40e8, 0x00}, + {0x40e9, 0x80}, + {0x40ea, 0x00}, + {0x40eb, 0x80}, + {0x40ec, 0x00}, + {0x40ed, 0x80}, + {0x40ee, 0x00}, + {0x40ef, 0x80}, + {0x40f0, 0x02}, + {0x40f1, 0x04}, + {0x4300, 0x00}, + {0x4301, 0x00}, + {0x4302, 0x00}, + {0x4303, 0x00}, + {0x4304, 0x00}, + {0x4305, 0x00}, + {0x4306, 0x00}, + {0x4307, 0x00}, + {0x4308, 0x00}, + {0x4309, 0x00}, + {0x430a, 0x00}, + {0x430b, 0xff}, + {0x430c, 0xff}, + {0x430d, 0x00}, + {0x430e, 0x00}, + {0x4315, 0x00}, + {0x4316, 0x00}, + {0x4317, 0x00}, + {0x4318, 0x00}, + {0x4319, 0x00}, + {0x431a, 0x00}, + {0x431b, 0x00}, + {0x431c, 0x00}, + {0x4500, 0x07}, + {0x4501, 0x10}, + {0x4502, 0x00}, + {0x4503, 0x0f}, + {0x4504, 0x80}, + {0x4506, 0x01}, + {0x4509, 0x05}, + {0x450c, 0x00}, + {0x450d, 0x20}, + {0x450e, 0x00}, + {0x450f, 0x00}, + {0x4510, 0x00}, + {0x4523, 0x00}, + {0x4526, 0x00}, + {0x4542, 0x00}, + {0x4543, 0x00}, + {0x4544, 0x00}, + {0x4545, 0x00}, + {0x4546, 0x00}, + {0x4547, 0x10}, + {0x4602, 0x00}, + {0x4603, 0x15}, + {0x460b, 0x07}, + {0x4680, 0x11}, + {0x4686, 0x00}, + {0x4687, 0x00}, + {0x4700, 0x00}, + {0x4800, 0x64}, + {0x4806, 0x40}, + {0x480b, 0x10}, + {0x480c, 0x80}, + {0x480f, 0x32}, + {0x4813, 0xe4}, + {0x4837, 0x14}, + {0x4850, 0x42}, + {0x4884, 0x04}, + {0x4c00, 0xf8}, + {0x4c01, 0x44}, + {0x4c03, 0x00}, + {0x4d00, 0x00}, + {0x4d01, 0x16}, + {0x4d04, 0x10}, + {0x4d05, 0x00}, + {0x4d06, 0x0c}, + {0x4d07, 0x00}, + {0x3d84, 0x04}, + {0x3680, 0xa4}, + {0x3682, 0x80}, + {0x3601, 0x40}, + {0x3602, 0x90}, + {0x3608, 0x0a}, + {0x3938, 0x09}, + {0x3a74, 0x84}, + {0x3a99, 0x84}, + {0x3ab9, 0xa6}, + {0x3aba, 0xba}, + {0x3b12, 0x84}, + {0x3b14, 0xbb}, + {0x3b15, 0xbf}, + {0x3a29, 0x26}, + {0x3a1f, 0x8a}, + {0x3a22, 0x91}, + {0x3a25, 0x96}, + {0x3a28, 0xb4}, + {0x3a2b, 0xba}, + {0x3a2e, 0xbf}, + {0x3a31, 0xc1}, + {0x3a20, 0x05}, + {0x3939, 0x6b}, + {0x3902, 0x10}, + {0x3903, 0x10}, + {0x3904, 0x10}, + {0x3905, 0x10}, + {0x3906, 0x01}, + {0x3907, 0x0b}, + {0x3908, 0x10}, + {0x3909, 0x13}, + {0x360f, 0x99}, + {0x390b, 0x11}, + {0x390c, 0x21}, + {0x390d, 0x32}, + {0x390e, 0x76}, + {0x3911, 0x90}, + {0x3913, 0x90}, + {0x3b3f, 0x9d}, + {0x3b45, 0x9d}, + {0x3b1b, 0xc9}, + {0x3b21, 0xc9}, + {0x3a1a, 0x1c}, + {0x3a23, 0x15}, + {0x3a26, 0x17}, + {0x3a2c, 0x50}, + {0x3a2f, 0x18}, + {0x3a32, 0x4f}, + {0x3ace, 0x01}, + {0x3ad2, 0x01}, + {0x3ad6, 0x01}, + {0x3ada, 0x01}, + {0x3ade, 0x01}, + {0x3ae2, 0x01}, + {0x3aee, 0x01}, + {0x3af2, 0x01}, + {0x3af6, 0x01}, + {0x3afa, 0x01}, + {0x3afe, 0x01}, + {0x3b02, 0x01}, + {0x3b06, 0x01}, + {0x3b0a, 0x01}, + {0x3b0b, 0x00}, + {0x3b0e, 0x01}, + {0x3b0f, 0x00}, + {0x392c, 0x02}, + {0x392d, 0x01}, + {0x392e, 0x04}, + {0x392f, 0x03}, + {0x3930, 0x09}, + {0x3931, 0x07}, + {0x3932, 0x10}, + {0x3933, 0x0d}, + {0x3609, 0x08}, + {0x3921, 0x0f}, + {0x3928, 0x15}, + {0x3929, 0x2a}, + {0x392a, 0x52}, + {0x392b, 0xa3}, + {0x340b, 0x1b}, + {0x3426, 0x10}, + {0x3407, 0x01}, + {0x3404, 0x01}, + {0x3500, 0x00}, + {0x3501, 0x08}, + {0x3502, 0x10}, + {0x3508, 0x04}, + {0x3509, 0x00}, +}; + +// OV08X 1C 3856x2176 30fps 800mbps 4lane +static const struct ov08x40_reg lane_4_mode_3856x2176_regs_800mbps[] = { + {0x5000, 0x5d}, + {0x5001, 0x20}, + {0x5004, 0x53}, + {0x5110, 0x00}, + {0x5111, 0x14}, + {0x5112, 0x01}, + {0x5113, 0x7b}, + {0x5114, 0x00}, + {0x5152, 0xa3}, + {0x5a52, 0x1f}, + {0x5a1a, 0x0e}, + {0x5a1b, 0x10}, + {0x5a1f, 0x0e}, + {0x5a27, 0x0e}, + {0x6002, 0x2e}, + {0x5008, 0xb0}, + {0x50c1, 0x00}, + {0x53c1, 0x00}, + {0x5f40, 0x00}, + {0x5f41, 0x40}, + {0x0300, 0x3a}, + {0x0301, 0xc8}, + {0x0302, 0x31}, + {0x0303, 0x03}, + {0x0304, 0x01}, + {0x0305, 0xa1}, + {0x0306, 0x04}, + {0x0307, 0x01}, + {0x0308, 0x03}, + {0x0309, 0x03}, + {0x0310, 0x0a}, + {0x0311, 0x02}, + {0x0312, 0x01}, + {0x0313, 0x08}, + {0x0314, 0x66}, + {0x0315, 0x00}, + {0x0316, 0x34}, + {0x0320, 0x02}, + {0x0321, 0x03}, + {0x0323, 0x05}, + {0x0324, 0x01}, + {0x0325, 0xb8}, + {0x0326, 0x4a}, + {0x0327, 0x04}, + {0x0329, 0x00}, + {0x032a, 0x05}, + {0x032b, 0x00}, + {0x032c, 0x00}, + {0x032d, 0x00}, + {0x032e, 0x02}, + {0x032f, 0xa0}, + {0x0350, 0x00}, + {0x0360, 0x01}, + {0x1216, 0x60}, + {0x1217, 0x5b}, + {0x1218, 0x00}, + {0x1220, 0x24}, + {0x198a, 0x00}, + {0x198b, 0x01}, + {0x198e, 0x00}, + {0x198f, 0x01}, + {0x3009, 0x04}, + {0x3012, 0x41}, + {0x3015, 0x00}, + {0x3016, 0xb0}, + {0x3017, 0xf0}, + {0x3018, 0xf0}, + {0x3019, 0xd2}, + {0x301a, 0xb0}, + {0x301c, 0x81}, + {0x301d, 0x02}, + {0x301e, 0x80}, + {0x3022, 0xf0}, + {0x3025, 0x89}, + {0x3030, 0x03}, + {0x3044, 0xc2}, + {0x3050, 0x35}, + {0x3051, 0x60}, + {0x3052, 0x25}, + {0x3053, 0x00}, + {0x3054, 0x00}, + {0x3055, 0x02}, + {0x3056, 0x80}, + {0x3057, 0x80}, + {0x3058, 0x80}, + {0x3059, 0x00}, + {0x3107, 0x86}, + {0x3400, 0x1c}, + {0x3401, 0x80}, + {0x3402, 0x8c}, + {0x3419, 0x13}, + {0x341a, 0x89}, + {0x341b, 0x30}, + {0x3420, 0x00}, + {0x3421, 0x00}, + {0x3422, 0x00}, + {0x3423, 0x00}, + {0x3424, 0x00}, + {0x3425, 0x00}, + {0x3426, 0x00}, + {0x3427, 0x00}, + {0x3428, 0x0f}, + {0x3429, 0x00}, + {0x342a, 0x00}, + {0x342b, 0x00}, + {0x342c, 0x00}, + {0x342d, 0x00}, + {0x342e, 0x00}, + {0x342f, 0x11}, + {0x3430, 0x11}, + {0x3431, 0x10}, + {0x3432, 0x00}, + {0x3433, 0x00}, + {0x3434, 0x00}, + {0x3435, 0x00}, + {0x3436, 0x00}, + {0x3437, 0x00}, + {0x3442, 0x02}, + {0x3443, 0x02}, + {0x3444, 0x07}, + {0x3450, 0x00}, + {0x3451, 0x00}, + {0x3452, 0x18}, + {0x3453, 0x18}, + {0x3454, 0x00}, + {0x3455, 0x80}, + {0x3456, 0x08}, + {0x3500, 0x00}, + {0x3501, 0x02}, + {0x3502, 0x00}, + {0x3504, 0x4c}, + {0x3506, 0x30}, + {0x3507, 0x00}, + {0x3508, 0x01}, + {0x3509, 0x00}, + {0x350a, 0x01}, + {0x350b, 0x00}, + {0x350c, 0x00}, + {0x3540, 0x00}, + {0x3541, 0x01}, + {0x3542, 0x00}, + {0x3544, 0x4c}, + {0x3546, 0x30}, + {0x3547, 0x00}, + {0x3548, 0x01}, + {0x3549, 0x00}, + {0x354a, 0x01}, + {0x354b, 0x00}, + {0x354c, 0x00}, + {0x3688, 0x02}, + {0x368a, 0x2e}, + {0x368e, 0x71}, + {0x3696, 0xd1}, + {0x3699, 0x00}, + {0x369a, 0x00}, + {0x36a4, 0x00}, + {0x36a6, 0x00}, + {0x3711, 0x00}, + {0x3712, 0x51}, + {0x3713, 0x00}, + {0x3714, 0x24}, + {0x3716, 0x00}, + {0x3718, 0x07}, + {0x371a, 0x1c}, + {0x371b, 0x00}, + {0x3720, 0x08}, + {0x3725, 0x32}, + {0x3727, 0x05}, + {0x3760, 0x02}, + {0x3761, 0x17}, + {0x3762, 0x02}, + {0x3763, 0x02}, + {0x3764, 0x02}, + {0x3765, 0x2c}, + {0x3766, 0x04}, + {0x3767, 0x2c}, + {0x3768, 0x02}, + {0x3769, 0x00}, + {0x376b, 0x20}, + {0x376e, 0x03}, + {0x37b0, 0x00}, + {0x37b1, 0xab}, + {0x37b2, 0x01}, + {0x37b3, 0x82}, + {0x37b4, 0x00}, + {0x37b5, 0xe4}, + {0x37b6, 0x01}, + {0x37b7, 0xee}, + {0x3814, 0x11}, + {0x3815, 0x11}, + {0x3820, 0x00}, + {0x3821, 0x04}, + {0x3822, 0x00}, + {0x3823, 0x04}, + {0x3828, 0x0f}, + {0x382a, 0x80}, + {0x382e, 0x41}, + {0x3837, 0x08}, + {0x383a, 0x81}, + {0x383b, 0x81}, + {0x383c, 0x11}, + {0x383d, 0x11}, + {0x383e, 0x00}, + {0x383f, 0x38}, + {0x3840, 0x00}, + {0x3847, 0x00}, + {0x384a, 0x00}, + {0x384c, 0x02}, + {0x384d, 0x80}, + {0x3856, 0x50}, + {0x3857, 0x30}, + {0x3858, 0x80}, + {0x3859, 0x40}, + {0x3860, 0x00}, + {0x3888, 0x00}, + {0x3889, 0x00}, + {0x388a, 0x00}, + {0x388b, 0x00}, + {0x388c, 0x00}, + {0x388d, 0x00}, + {0x388e, 0x00}, + {0x388f, 0x00}, + {0x3894, 0x00}, + {0x3895, 0x00}, + {0x3c84, 0x00}, + {0x3d85, 0x8b}, + {0x3daa, 0x80}, + {0x3dab, 0x14}, + {0x3dac, 0x80}, + {0x3dad, 0xc8}, + {0x3dae, 0x81}, + {0x3daf, 0x7b}, + {0x3f00, 0x10}, + {0x3f01, 0x11}, + {0x3f06, 0x0d}, + {0x3f07, 0x0b}, + {0x3f08, 0x0d}, + {0x3f09, 0x0b}, + {0x3f0a, 0x01}, + {0x3f0b, 0x11}, + {0x3f0c, 0x33}, + {0x4001, 0x07}, + {0x4007, 0x20}, + {0x4008, 0x00}, + {0x4009, 0x05}, + {0x400a, 0x00}, + {0x400b, 0x08}, + {0x400c, 0x00}, + {0x400d, 0x08}, + {0x400e, 0x14}, + {0x4010, 0xf4}, + {0x4011, 0x03}, + {0x4012, 0x55}, + {0x4015, 0x00}, + {0x4016, 0x2d}, + {0x4017, 0x00}, + {0x4018, 0x0f}, + {0x401b, 0x08}, + {0x401c, 0x00}, + {0x401d, 0x10}, + {0x401e, 0x02}, + {0x401f, 0x00}, + {0x4050, 0x06}, + {0x4051, 0xff}, + {0x4052, 0xff}, + {0x4053, 0xff}, + {0x4054, 0xff}, + {0x4055, 0xff}, + {0x4056, 0xff}, + {0x4057, 0x7f}, + {0x4058, 0x00}, + {0x4059, 0x00}, + {0x405a, 0x00}, + {0x405b, 0x00}, + {0x405c, 0x07}, + {0x405d, 0xff}, + {0x405e, 0x07}, + {0x405f, 0xff}, + {0x4080, 0x78}, + {0x4081, 0x78}, + {0x4082, 0x78}, + {0x4083, 0x78}, + {0x4019, 0x00}, + {0x401a, 0x40}, + {0x4020, 0x04}, + {0x4021, 0x00}, + {0x4022, 0x04}, + {0x4023, 0x00}, + {0x4024, 0x04}, + {0x4025, 0x00}, + {0x4026, 0x04}, + {0x4027, 0x00}, + {0x4030, 0x00}, + {0x4031, 0x00}, + {0x4032, 0x00}, + {0x4033, 0x00}, + {0x4034, 0x00}, + {0x4035, 0x00}, + {0x4036, 0x00}, + {0x4037, 0x00}, + {0x4040, 0x00}, + {0x4041, 0x80}, + {0x4042, 0x00}, + {0x4043, 0x80}, + {0x4044, 0x00}, + {0x4045, 0x80}, + {0x4046, 0x00}, + {0x4047, 0x80}, + {0x4060, 0x00}, + {0x4061, 0x00}, + {0x4062, 0x00}, + {0x4063, 0x00}, + {0x4064, 0x00}, + {0x4065, 0x00}, + {0x4066, 0x00}, + {0x4067, 0x00}, + {0x4068, 0x00}, + {0x4069, 0x00}, + {0x406a, 0x00}, + {0x406b, 0x00}, + {0x406c, 0x00}, + {0x406d, 0x00}, + {0x406e, 0x00}, + {0x406f, 0x00}, + {0x4070, 0x00}, + {0x4071, 0x00}, + {0x4072, 0x00}, + {0x4073, 0x00}, + {0x4074, 0x00}, + {0x4075, 0x00}, + {0x4076, 0x00}, + {0x4077, 0x00}, + {0x4078, 0x00}, + {0x4079, 0x00}, + {0x407a, 0x00}, + {0x407b, 0x00}, + {0x407c, 0x00}, + {0x407d, 0x00}, + {0x407e, 0x00}, + {0x407f, 0x00}, + {0x40e0, 0x00}, + {0x40e1, 0x00}, + {0x40e2, 0x00}, + {0x40e3, 0x00}, + {0x40e4, 0x00}, + {0x40e5, 0x00}, + {0x40e6, 0x00}, + {0x40e7, 0x00}, + {0x40e8, 0x00}, + {0x40e9, 0x80}, + {0x40ea, 0x00}, + {0x40eb, 0x80}, + {0x40ec, 0x00}, + {0x40ed, 0x80}, + {0x40ee, 0x00}, + {0x40ef, 0x80}, + {0x40f0, 0x02}, + {0x40f1, 0x04}, + {0x4300, 0x00}, + {0x4301, 0x00}, + {0x4302, 0x00}, + {0x4303, 0x00}, + {0x4304, 0x00}, + {0x4305, 0x00}, + {0x4306, 0x00}, + {0x4307, 0x00}, + {0x4308, 0x00}, + {0x4309, 0x00}, + {0x430a, 0x00}, + {0x430b, 0xff}, + {0x430c, 0xff}, + {0x430d, 0x00}, + {0x430e, 0x00}, + {0x4315, 0x00}, + {0x4316, 0x00}, + {0x4317, 0x00}, + {0x4318, 0x00}, + {0x4319, 0x00}, + {0x431a, 0x00}, + {0x431b, 0x00}, + {0x431c, 0x00}, + {0x4500, 0x07}, + {0x4501, 0x00}, + {0x4502, 0x00}, + {0x4503, 0x0f}, + {0x4504, 0x80}, + {0x4506, 0x01}, + {0x4509, 0x05}, + {0x450c, 0x00}, + {0x450d, 0x20}, + {0x450e, 0x00}, + {0x450f, 0x00}, + {0x4510, 0x00}, + {0x4523, 0x00}, + {0x4526, 0x00}, + {0x4542, 0x00}, + {0x4543, 0x00}, + {0x4544, 0x00}, + {0x4545, 0x00}, + {0x4546, 0x00}, + {0x4547, 0x10}, + {0x4602, 0x00}, + {0x4603, 0x15}, + {0x460b, 0x07}, + {0x4680, 0x11}, + {0x4686, 0x00}, + {0x4687, 0x00}, + {0x4700, 0x00}, + {0x4800, 0x64}, + {0x4806, 0x40}, + {0x480b, 0x10}, + {0x480c, 0x80}, + {0x480f, 0x32}, + {0x4813, 0xe4}, + {0x4837, 0x14}, + {0x4850, 0x42}, + {0x4884, 0x04}, + {0x4c00, 0xf8}, + {0x4c01, 0x44}, + {0x4c03, 0x00}, + {0x4d00, 0x00}, + {0x4d01, 0x16}, + {0x4d04, 0x10}, + {0x4d05, 0x00}, + {0x4d06, 0x0c}, + {0x4d07, 0x00}, + {0x3d84, 0x04}, + {0x3680, 0xa4}, + {0x3682, 0x80}, + {0x3601, 0x40}, + {0x3602, 0x90}, + {0x3608, 0x0a}, + {0x3938, 0x09}, + {0x3a74, 0x84}, + {0x3a99, 0x84}, + {0x3ab9, 0xa6}, + {0x3aba, 0xba}, + {0x3b12, 0x84}, + {0x3b14, 0xbb}, + {0x3b15, 0xbf}, + {0x3a29, 0x26}, + {0x3a1f, 0x8a}, + {0x3a22, 0x91}, + {0x3a25, 0x96}, + {0x3a28, 0xb4}, + {0x3a2b, 0xba}, + {0x3a2e, 0xbf}, + {0x3a31, 0xc1}, + {0x3a20, 0x00}, + {0x3939, 0x9d}, + {0x3902, 0x0e}, + {0x3903, 0x0e}, + {0x3904, 0x0e}, + {0x3905, 0x0e}, + {0x3906, 0x07}, + {0x3907, 0x0d}, + {0x3908, 0x11}, + {0x3909, 0x12}, + {0x360f, 0x99}, + {0x390c, 0x33}, + {0x390d, 0x66}, + {0x390e, 0xaa}, + {0x3911, 0x90}, + {0x3913, 0x90}, + {0x3915, 0x90}, + {0x3917, 0x90}, + {0x3b3f, 0x9d}, + {0x3b45, 0x9d}, + {0x3b1b, 0xc9}, + {0x3b21, 0xc9}, + {0x3440, 0xa4}, + {0x3a23, 0x15}, + {0x3a26, 0x1d}, + {0x3a2c, 0x4a}, + {0x3a2f, 0x18}, + {0x3a32, 0x55}, + {0x3b0a, 0x01}, + {0x3b0b, 0x00}, + {0x3b0e, 0x01}, + {0x3b0f, 0x00}, + {0x392c, 0x02}, + {0x392d, 0x02}, + {0x392e, 0x04}, + {0x392f, 0x03}, + {0x3930, 0x08}, + {0x3931, 0x07}, + {0x3932, 0x10}, + {0x3933, 0x0c}, + {0x3609, 0x08}, + {0x3921, 0x0f}, + {0x3928, 0x15}, + {0x3929, 0x2a}, + {0x392a, 0x54}, + {0x392b, 0xa8}, + {0x3426, 0x10}, + {0x3407, 0x01}, + {0x3404, 0x01}, + {0x380c, 0x02}, + {0x380d, 0x80}, + {0x380e, 0x13}, + {0x380f, 0x88}, + {0x3800, 0x00}, + {0x3801, 0x00}, + {0x3802, 0x00}, + {0x3803, 0x70}, + {0x3804, 0x0f}, + {0x3805, 0x1f}, + {0x3806, 0x09}, + {0x3807, 0x0f}, + {0x3808, 0x0f}, + {0x3809, 0x10}, + {0x380a, 0x08}, + {0x380b, 0x80}, + {0x3810, 0x00}, + {0x3811, 0x08}, + {0x3812, 0x00}, + {0x3813, 0x10}, + {0x3500, 0x00}, + {0x3501, 0x10}, + {0x3502, 0x10}, + {0x3508, 0x0f}, + {0x3509, 0x80}, + {0x5a80, 0x75}, + {0x5a81, 0x75}, + {0x5a82, 0x75}, + {0x5a83, 0x75}, + {0x5a84, 0x75}, + {0x5a85, 0x75}, + {0x5a86, 0x75}, + {0x5a87, 0x75}, + {0x5a88, 0x75}, + {0x5a89, 0x75}, + {0x5a8a, 0x75}, + {0x5a8b, 0x75}, + {0x5a8c, 0x75}, + {0x5a8d, 0x75}, + {0x5a8e, 0x75}, + {0x5a8f, 0x75}, + {0x5a90, 0x75}, + {0x5a91, 0x75}, + {0x5a92, 0x75}, + {0x5a93, 0x75}, + {0x5a94, 0x75}, + {0x5a95, 0x75}, + {0x5a96, 0x75}, + {0x5a97, 0x75}, + {0x5a98, 0x75}, + {0x5a99, 0x75}, + {0x5a9a, 0x75}, + {0x5a9b, 0x75}, + {0x5a9c, 0x75}, + {0x5a9d, 0x75}, + {0x5a9e, 0x75}, + {0x5a9f, 0x75}, + {0x5aa0, 0x75}, + {0x5aa1, 0x75}, + {0x5aa2, 0x75}, + {0x5aa3, 0x75}, + {0x5aa4, 0x75}, + {0x5aa5, 0x75}, + {0x5aa6, 0x75}, + {0x5aa7, 0x75}, + {0x5aa8, 0x75}, + {0x5aa9, 0x75}, + {0x5aaa, 0x75}, + {0x5aab, 0x75}, + {0x5aac, 0x75}, + {0x5aad, 0x75}, + {0x5aae, 0x75}, + {0x5aaf, 0x75}, + {0x5ab0, 0x75}, + {0x5ab1, 0x75}, + {0x5ab2, 0x75}, + {0x5ab3, 0x75}, + {0x5ab4, 0x75}, + {0x5ab5, 0x75}, + {0x5ab6, 0x75}, + {0x5ab7, 0x75}, + {0x5ab8, 0x75}, + {0x5ab9, 0x75}, + {0x5aba, 0x75}, + {0x5abb, 0x75}, + {0x5abc, 0x75}, + {0x5abd, 0x75}, + {0x5abe, 0x75}, + {0x5abf, 0x75}, + {0x5ac0, 0x75}, + {0x5ac1, 0x75}, + {0x5ac2, 0x75}, + {0x5ac3, 0x75}, + {0x5ac4, 0x75}, + {0x5ac5, 0x75}, + {0x5ac6, 0x75}, + {0x5ac7, 0x75}, + {0x5ac8, 0x75}, + {0x5ac9, 0x75}, + {0x5aca, 0x75}, + {0x5acb, 0x75}, + {0x5acc, 0x75}, + {0x5acd, 0x75}, + {0x5ace, 0x75}, + {0x5acf, 0x75}, + {0x5ad0, 0x75}, + {0x5ad1, 0x75}, + {0x5ad2, 0x75}, + {0x5ad3, 0x75}, + {0x5ad4, 0x75}, + {0x5ad5, 0x75}, + {0x5ad6, 0x75}, + {0x5ad7, 0x75}, + {0x5ad8, 0x75}, + {0x5ad9, 0x75}, + {0x5ada, 0x75}, + {0x5adb, 0x75}, + {0x5adc, 0x75}, + {0x5add, 0x75}, + {0x5ade, 0x75}, + {0x5adf, 0x75}, + {0x5ae0, 0x75}, + {0x5ae1, 0x75}, + {0x5ae2, 0x75}, + {0x5ae3, 0x75}, + {0x5ae4, 0x75}, + {0x5ae5, 0x75}, + {0x5ae6, 0x75}, + {0x5ae7, 0x75}, + {0x5ae8, 0x75}, + {0x5ae9, 0x75}, + {0x5aea, 0x75}, + {0x5aeb, 0x75}, + {0x5aec, 0x75}, + {0x5aed, 0x75}, + {0x5aee, 0x75}, + {0x5aef, 0x75}, + {0x5af0, 0x75}, + {0x5af1, 0x75}, + {0x5af2, 0x75}, + {0x5af3, 0x75}, + {0x5af4, 0x75}, + {0x5af5, 0x75}, + {0x5af6, 0x75}, + {0x5af7, 0x75}, + {0x5af8, 0x75}, + {0x5af9, 0x75}, + {0x5afa, 0x75}, + {0x5afb, 0x75}, + {0x5afc, 0x75}, + {0x5afd, 0x75}, + {0x5afe, 0x75}, + {0x5aff, 0x75}, + {0x5b00, 0x75}, + {0x5b01, 0x75}, + {0x5b02, 0x75}, + {0x5b03, 0x75}, + {0x5b04, 0x75}, + {0x5b05, 0x75}, + {0x5b06, 0x75}, + {0x5b07, 0x75}, + {0x5b08, 0x75}, + {0x5b09, 0x75}, + {0x5b0a, 0x75}, + {0x5b0b, 0x75}, + {0x5b0c, 0x75}, + {0x5b0d, 0x75}, + {0x5b0e, 0x75}, + {0x5b0f, 0x75}, + {0x5b10, 0x75}, + {0x5b11, 0x75}, + {0x5b12, 0x75}, + {0x5b13, 0x75}, + {0x5b14, 0x75}, + {0x5b15, 0x75}, + {0x5b16, 0x75}, + {0x5b17, 0x75}, + {0x5b18, 0x75}, + {0x5b19, 0x75}, + {0x5b1a, 0x75}, + {0x5b1b, 0x75}, + {0x5b1c, 0x75}, + {0x5b1d, 0x75}, + {0x5b1e, 0x75}, + {0x5b1f, 0x75}, + {0x5b20, 0x75}, + {0x5b21, 0x75}, + {0x5b22, 0x75}, + {0x5b23, 0x75}, + {0x5b24, 0x75}, + {0x5b25, 0x75}, + {0x5b26, 0x75}, + {0x5b27, 0x75}, + {0x5b28, 0x75}, + {0x5b29, 0x75}, + {0x5b2a, 0x75}, + {0x5b2b, 0x75}, + {0x5b2c, 0x75}, + {0x5b2d, 0x75}, + {0x5b2e, 0x75}, + {0x5b2f, 0x75}, + {0x5b30, 0x75}, + {0x5b31, 0x75}, + {0x5b32, 0x75}, + {0x5b33, 0x75}, + {0x5b34, 0x75}, + {0x5b35, 0x75}, + {0x5b36, 0x75}, + {0x5b37, 0x75}, + {0x5b38, 0x75}, + {0x5b39, 0x75}, + {0x5b3a, 0x75}, + {0x5b3b, 0x75}, + {0x5b3c, 0x75}, + {0x5b3d, 0x75}, + {0x5b3e, 0x75}, + {0x5b3f, 0x75}, + {0x5b40, 0x75}, + {0x5b41, 0x75}, + {0x5b42, 0x75}, + {0x5b43, 0x75}, + {0x5b44, 0x75}, + {0x5b45, 0x75}, + {0x5b46, 0x75}, + {0x5b47, 0x75}, + {0x5b48, 0x75}, + {0x5b49, 0x75}, + {0x5b4a, 0x75}, + {0x5b4b, 0x75}, + {0x5b4c, 0x75}, + {0x5b4d, 0x75}, + {0x5b4e, 0x75}, + {0x5b4f, 0x75}, + {0x5b50, 0x75}, + {0x5b51, 0x75}, + {0x5b52, 0x75}, + {0x5b53, 0x75}, + {0x5b54, 0x75}, + {0x5b55, 0x75}, + {0x5b56, 0x75}, + {0x5b57, 0x75}, + {0x5b58, 0x75}, + {0x5b59, 0x75}, + {0x5b5a, 0x75}, + {0x5b5b, 0x75}, + {0x5b5c, 0x75}, + {0x5b5d, 0x75}, + {0x5b5e, 0x75}, + {0x5b5f, 0x75}, + {0x5b60, 0x75}, + {0x5b61, 0x75}, + {0x5b62, 0x75}, + {0x5b63, 0x75}, + {0x5b64, 0x75}, + {0x5b65, 0x75}, + {0x5b66, 0x75}, + {0x5b67, 0x75}, + {0x5b68, 0x75}, + {0x5b69, 0x75}, + {0x5b6a, 0x75}, + {0x5b6b, 0x75}, + {0x5b6c, 0x75}, + {0x5b6d, 0x75}, + {0x5b6e, 0x75}, + {0x5b6f, 0x75}, + {0x5b70, 0x75}, + {0x5b71, 0x75}, + {0x5b72, 0x75}, + {0x5b73, 0x75}, + {0x5b74, 0x75}, + {0x5b75, 0x75}, + {0x5b76, 0x75}, + {0x5b77, 0x75}, + {0x5b78, 0x75}, + {0x5b79, 0x75}, + {0x5b7a, 0x75}, + {0x5b7b, 0x75}, + {0x5b7c, 0x75}, + {0x5b7d, 0x75}, + {0x5b7e, 0x75}, + {0x5b7f, 0x75}, + {0x5b80, 0x75}, + {0x5b81, 0x75}, + {0x5b82, 0x75}, + {0x5b83, 0x75}, + {0x5b84, 0x75}, + {0x5b85, 0x75}, + {0x5b86, 0x75}, + {0x5b87, 0x75}, + {0x5b88, 0x75}, + {0x5b89, 0x75}, + {0x5b8a, 0x75}, + {0x5b8b, 0x75}, + {0x5b8c, 0x75}, + {0x5b8d, 0x75}, + {0x5b8e, 0x75}, + {0x5b8f, 0x75}, + {0x5b90, 0x75}, + {0x5b91, 0x75}, + {0x5b92, 0x75}, + {0x5b93, 0x75}, + {0x5b94, 0x75}, + {0x5b95, 0x75}, + {0x5b96, 0x75}, + {0x5b97, 0x75}, + {0x5b98, 0x75}, + {0x5b99, 0x75}, + {0x5b9a, 0x75}, + {0x5b9b, 0x75}, + {0x5b9c, 0x75}, + {0x5b9d, 0x75}, + {0x5b9e, 0x75}, + {0x5b9f, 0x75}, + {0x5bc0, 0x75}, + {0x5bc1, 0x75}, + {0x5bc2, 0x75}, + {0x5bc3, 0x75}, + {0x5bc4, 0x75}, + {0x5bc5, 0x75}, + {0x5bc6, 0x75}, + {0x5bc7, 0x75}, + {0x5bc8, 0x75}, + {0x5bc9, 0x75}, + {0x5bca, 0x75}, + {0x5bcb, 0x75}, + {0x5bcc, 0x75}, + {0x5bcd, 0x75}, + {0x5bce, 0x75}, + {0x5bcf, 0x75}, + {0x5bd0, 0x75}, + {0x5bd1, 0x75}, + {0x5bd2, 0x75}, + {0x5bd3, 0x75}, + {0x5bd4, 0x75}, + {0x5bd5, 0x75}, + {0x5bd6, 0x75}, + {0x5bd7, 0x75}, + {0x5bd8, 0x75}, + {0x5bd9, 0x75}, + {0x5bda, 0x75}, + {0x5bdb, 0x75}, + {0x5bdc, 0x75}, + {0x5bdd, 0x75}, + {0x5bde, 0x75}, + {0x5bdf, 0x75}, + {0x5be0, 0x75}, + {0x5be1, 0x75}, + {0x5be2, 0x75}, + {0x5be3, 0x75}, + {0x5be4, 0x75}, + {0x5be5, 0x75}, + {0x5be6, 0x75}, + {0x5be7, 0x75}, + {0x5be8, 0x75}, + {0x5be9, 0x75}, + {0x5bea, 0x75}, + {0x5beb, 0x75}, + {0x5bec, 0x75}, + {0x5bed, 0x75}, + {0x5bee, 0x75}, + {0x5bef, 0x75}, + {0x5bf0, 0x75}, + {0x5bf1, 0x75}, + {0x5bf2, 0x75}, + {0x5bf3, 0x75}, + {0x5bf4, 0x75}, + {0x5bf5, 0x75}, + {0x5bf6, 0x75}, + {0x5bf7, 0x75}, + {0x5bf8, 0x75}, + {0x5bf9, 0x75}, + {0x5bfa, 0x75}, + {0x5bfb, 0x75}, + {0x5bfc, 0x75}, + {0x5bfd, 0x75}, + {0x5bfe, 0x75}, + {0x5bff, 0x75}, + {0x5c00, 0x75}, + {0x5c01, 0x75}, + {0x5c02, 0x75}, + {0x5c03, 0x75}, + {0x5c04, 0x75}, + {0x5c05, 0x75}, + {0x5c06, 0x75}, + {0x5c07, 0x75}, + {0x5c08, 0x75}, + {0x5c09, 0x75}, + {0x5c0a, 0x75}, + {0x5c0b, 0x75}, + {0x5c0c, 0x75}, + {0x5c0d, 0x75}, + {0x5c0e, 0x75}, + {0x5c0f, 0x75}, + {0x5c10, 0x75}, + {0x5c11, 0x75}, + {0x5c12, 0x75}, + {0x5c13, 0x75}, + {0x5c14, 0x75}, + {0x5c15, 0x75}, + {0x5c16, 0x75}, + {0x5c17, 0x75}, + {0x5c18, 0x75}, + {0x5c19, 0x75}, + {0x5c1a, 0x75}, + {0x5c1b, 0x75}, + {0x5c1c, 0x75}, + {0x5c1d, 0x75}, + {0x5c1e, 0x75}, + {0x5c1f, 0x75}, + {0x5c20, 0x75}, + {0x5c21, 0x75}, + {0x5c22, 0x75}, + {0x5c23, 0x75}, + {0x5c24, 0x75}, + {0x5c25, 0x75}, + {0x5c26, 0x75}, + {0x5c27, 0x75}, + {0x5c28, 0x75}, + {0x5c29, 0x75}, + {0x5c2a, 0x75}, + {0x5c2b, 0x75}, + {0x5c2c, 0x75}, + {0x5c2d, 0x75}, + {0x5c2e, 0x75}, + {0x5c2f, 0x75}, + {0x5c30, 0x75}, + {0x5c31, 0x75}, + {0x5c32, 0x75}, + {0x5c33, 0x75}, + {0x5c34, 0x75}, + {0x5c35, 0x75}, + {0x5c36, 0x75}, + {0x5c37, 0x75}, + {0x5c38, 0x75}, + {0x5c39, 0x75}, + {0x5c3a, 0x75}, + {0x5c3b, 0x75}, + {0x5c3c, 0x75}, + {0x5c3d, 0x75}, + {0x5c3e, 0x75}, + {0x5c3f, 0x75}, + {0x5c40, 0x75}, + {0x5c41, 0x75}, + {0x5c42, 0x75}, + {0x5c43, 0x75}, + {0x5c44, 0x75}, + {0x5c45, 0x75}, + {0x5c46, 0x75}, + {0x5c47, 0x75}, + {0x5c48, 0x75}, + {0x5c49, 0x75}, + {0x5c4a, 0x75}, + {0x5c4b, 0x75}, + {0x5c4c, 0x75}, + {0x5c4d, 0x75}, + {0x5c4e, 0x75}, + {0x5c4f, 0x75}, + {0x5c50, 0x75}, + {0x5c51, 0x75}, + {0x5c52, 0x75}, + {0x5c53, 0x75}, + {0x5c54, 0x75}, + {0x5c55, 0x75}, + {0x5c56, 0x75}, + {0x5c57, 0x75}, + {0x5c58, 0x75}, + {0x5c59, 0x75}, + {0x5c5a, 0x75}, + {0x5c5b, 0x75}, + {0x5c5c, 0x75}, + {0x5c5d, 0x75}, + {0x5c5e, 0x75}, + {0x5c5f, 0x75}, + {0x5c60, 0x75}, + {0x5c61, 0x75}, + {0x5c62, 0x75}, + {0x5c63, 0x75}, + {0x5c64, 0x75}, + {0x5c65, 0x75}, + {0x5c66, 0x75}, + {0x5c67, 0x75}, + {0x5c68, 0x75}, + {0x5c69, 0x75}, + {0x5c6a, 0x75}, + {0x5c6b, 0x75}, + {0x5c6c, 0x75}, + {0x5c6d, 0x75}, + {0x5c6e, 0x75}, + {0x5c6f, 0x75}, + {0x5c70, 0x75}, + {0x5c71, 0x75}, + {0x5c72, 0x75}, + {0x5c73, 0x75}, + {0x5c74, 0x75}, + {0x5c75, 0x75}, + {0x5c76, 0x75}, + {0x5c77, 0x75}, + {0x5c78, 0x75}, + {0x5c79, 0x75}, + {0x5c7a, 0x75}, + {0x5c7b, 0x75}, + {0x5c7c, 0x75}, + {0x5c7d, 0x75}, + {0x5c7e, 0x75}, + {0x5c7f, 0x75}, + {0x5c80, 0x75}, + {0x5c81, 0x75}, + {0x5c82, 0x75}, + {0x5c83, 0x75}, + {0x5c84, 0x75}, + {0x5c85, 0x75}, + {0x5c86, 0x75}, + {0x5c87, 0x75}, + {0x5c88, 0x75}, + {0x5c89, 0x75}, + {0x5c8a, 0x75}, + {0x5c8b, 0x75}, + {0x5c8c, 0x75}, + {0x5c8d, 0x75}, + {0x5c8e, 0x75}, + {0x5c8f, 0x75}, + {0x5c90, 0x75}, + {0x5c91, 0x75}, + {0x5c92, 0x75}, + {0x5c93, 0x75}, + {0x5c94, 0x75}, + {0x5c95, 0x75}, + {0x5c96, 0x75}, + {0x5c97, 0x75}, + {0x5c98, 0x75}, + {0x5c99, 0x75}, + {0x5c9a, 0x75}, + {0x5c9b, 0x75}, + {0x5c9c, 0x75}, + {0x5c9d, 0x75}, + {0x5c9e, 0x75}, + {0x5c9f, 0x75}, + {0x5ca0, 0x75}, + {0x5ca1, 0x75}, + {0x5ca2, 0x75}, + {0x5ca3, 0x75}, + {0x5ca4, 0x75}, + {0x5ca5, 0x75}, + {0x5ca6, 0x75}, + {0x5ca7, 0x75}, + {0x5ca8, 0x75}, + {0x5ca9, 0x75}, + {0x5caa, 0x75}, + {0x5cab, 0x75}, + {0x5cac, 0x75}, + {0x5cad, 0x75}, + {0x5cae, 0x75}, + {0x5caf, 0x75}, + {0x5cb0, 0x75}, + {0x5cb1, 0x75}, + {0x5cb2, 0x75}, + {0x5cb3, 0x75}, + {0x5cb4, 0x75}, + {0x5cb5, 0x75}, + {0x5cb6, 0x75}, + {0x5cb7, 0x75}, + {0x5cb8, 0x75}, + {0x5cb9, 0x75}, + {0x5cba, 0x75}, + {0x5cbb, 0x75}, + {0x5cbc, 0x75}, + {0x5cbd, 0x75}, + {0x5cbe, 0x75}, + {0x5cbf, 0x75}, + {0x5cc0, 0x75}, + {0x5cc1, 0x75}, + {0x5cc2, 0x75}, + {0x5cc3, 0x75}, + {0x5cc4, 0x75}, + {0x5cc5, 0x75}, + {0x5cc6, 0x75}, + {0x5cc7, 0x75}, + {0x5cc8, 0x75}, + {0x5cc9, 0x75}, + {0x5cca, 0x75}, + {0x5ccb, 0x75}, + {0x5ccc, 0x75}, + {0x5ccd, 0x75}, + {0x5cce, 0x75}, + {0x5ccf, 0x75}, + {0x5cd0, 0x75}, + {0x5cd1, 0x75}, + {0x5cd2, 0x75}, + {0x5cd3, 0x75}, + {0x5cd4, 0x75}, + {0x5cd5, 0x75}, + {0x5cd6, 0x75}, + {0x5cd7, 0x75}, + {0x5cd8, 0x75}, + {0x5cd9, 0x75}, + {0x5cda, 0x75}, + {0x5cdb, 0x75}, + {0x5cdc, 0x75}, + {0x5cdd, 0x75}, + {0x5cde, 0x75}, + {0x5cdf, 0x75}, + {0x5ce0, 0x75}, + {0x5ce1, 0x75}, + {0x5ce2, 0x75}, + {0x5ce3, 0x75}, + {0x5ce4, 0x75}, + {0x5ce5, 0x75}, + {0x5ce6, 0x75}, + {0x5ce7, 0x75}, + {0x5ce8, 0x75}, + {0x5ce9, 0x75}, + {0x5cea, 0x75}, + {0x5ceb, 0x75}, + {0x5cec, 0x75}, + {0x5ced, 0x75}, + {0x5cee, 0x75}, + {0x5cef, 0x75}, + {0x5cf0, 0x75}, + {0x5cf1, 0x75}, + {0x5cf2, 0x75}, + {0x5cf3, 0x75}, + {0x5cf4, 0x75}, + {0x5cf5, 0x75}, + {0x5cf6, 0x75}, + {0x5cf7, 0x75}, + {0x5cf8, 0x75}, + {0x5cf9, 0x75}, + {0x5cfa, 0x75}, + {0x5cfb, 0x75}, + {0x5cfc, 0x75}, + {0x5cfd, 0x75}, + {0x5cfe, 0x75}, + {0x5cff, 0x75}, + {0x5d00, 0x75}, + {0x5d01, 0x75}, + {0x5d02, 0x75}, + {0x5d03, 0x75}, + {0x5d04, 0x75}, + {0x5d05, 0x75}, + {0x5d06, 0x75}, + {0x5d07, 0x75}, + {0x5d08, 0x75}, + {0x5d09, 0x75}, + {0x5d0a, 0x75}, + {0x5d0b, 0x75}, + {0x5d0c, 0x75}, + {0x5d0d, 0x75}, + {0x5d0e, 0x75}, + {0x5d0f, 0x75}, + {0x5d10, 0x75}, + {0x5d11, 0x75}, + {0x5d12, 0x75}, + {0x5d13, 0x75}, + {0x5d14, 0x75}, + {0x5d15, 0x75}, + {0x5d16, 0x75}, + {0x5d17, 0x75}, + {0x5d18, 0x75}, + {0x5d19, 0x75}, + {0x5d1a, 0x75}, + {0x5d1b, 0x75}, + {0x5d1c, 0x75}, + {0x5d1d, 0x75}, + {0x5d1e, 0x75}, + {0x5d1f, 0x75}, + {0x5d20, 0x75}, + {0x5d21, 0x75}, + {0x5d22, 0x75}, + {0x5d23, 0x75}, + {0x5d24, 0x75}, + {0x5d25, 0x75}, + {0x5d26, 0x75}, + {0x5d27, 0x75}, + {0x5d28, 0x75}, + {0x5d29, 0x75}, + {0x5d2a, 0x75}, + {0x5d2b, 0x75}, + {0x5d2c, 0x75}, + {0x5d2d, 0x75}, + {0x5d2e, 0x75}, + {0x5d2f, 0x75}, + {0x5d30, 0x75}, + {0x5d31, 0x75}, + {0x5d32, 0x75}, + {0x5d33, 0x75}, + {0x5d34, 0x75}, + {0x5d35, 0x75}, + {0x5d36, 0x75}, + {0x5d37, 0x75}, + {0x5d38, 0x75}, + {0x5d39, 0x75}, + {0x5d3a, 0x75}, + {0x5d3b, 0x75}, + {0x5d3c, 0x75}, + {0x5d3d, 0x75}, + {0x5d3e, 0x75}, + {0x5d3f, 0x75}, + {0x5d40, 0x75}, + {0x5d41, 0x75}, + {0x5d42, 0x75}, + {0x5d43, 0x75}, + {0x5d44, 0x75}, + {0x5d45, 0x75}, + {0x5d46, 0x75}, + {0x5d47, 0x75}, + {0x5d48, 0x75}, + {0x5d49, 0x75}, + {0x5d4a, 0x75}, + {0x5d4b, 0x75}, + {0x5d4c, 0x75}, + {0x5d4d, 0x75}, + {0x5d4e, 0x75}, + {0x5d4f, 0x75}, + {0x5d50, 0x75}, + {0x5d51, 0x75}, + {0x5d52, 0x75}, + {0x5d53, 0x75}, + {0x5d54, 0x75}, + {0x5d55, 0x75}, + {0x5d56, 0x75}, + {0x5d57, 0x75}, + {0x5d58, 0x75}, + {0x5d59, 0x75}, + {0x5d5a, 0x75}, + {0x5d5b, 0x75}, + {0x5d5c, 0x75}, + {0x5d5d, 0x75}, + {0x5d5e, 0x75}, + {0x5d5f, 0x75}, + {0x5d60, 0x75}, + {0x5d61, 0x75}, + {0x5d62, 0x75}, + {0x5d63, 0x75}, + {0x5d64, 0x75}, + {0x5d65, 0x75}, + {0x5d66, 0x75}, + {0x5d67, 0x75}, + {0x5d68, 0x75}, + {0x5d69, 0x75}, + {0x5d6a, 0x75}, + {0x5d6b, 0x75}, + {0x5d6c, 0x75}, + {0x5d6d, 0x75}, + {0x5d6e, 0x75}, + {0x5d6f, 0x75}, + {0x5d70, 0x75}, + {0x5d71, 0x75}, + {0x5d72, 0x75}, + {0x5d73, 0x75}, + {0x5d74, 0x75}, + {0x5d75, 0x75}, + {0x5d76, 0x75}, + {0x5d77, 0x75}, + {0x5d78, 0x75}, + {0x5d79, 0x75}, + {0x5d7a, 0x75}, + {0x5d7b, 0x75}, + {0x5d7c, 0x75}, + {0x5d7d, 0x75}, + {0x5d7e, 0x75}, + {0x5d7f, 0x75}, + {0x5d80, 0x75}, + {0x5d81, 0x75}, + {0x5d82, 0x75}, + {0x5d83, 0x75}, + {0x5d84, 0x75}, + {0x5d85, 0x75}, + {0x5d86, 0x75}, + {0x5d87, 0x75}, + {0x5d88, 0x75}, + {0x5d89, 0x75}, + {0x5d8a, 0x75}, + {0x5d8b, 0x75}, + {0x5d8c, 0x75}, + {0x5d8d, 0x75}, + {0x5d8e, 0x75}, + {0x5d8f, 0x75}, + {0x5d90, 0x75}, + {0x5d91, 0x75}, + {0x5d92, 0x75}, + {0x5d93, 0x75}, + {0x5d94, 0x75}, + {0x5d95, 0x75}, + {0x5d96, 0x75}, + {0x5d97, 0x75}, + {0x5d98, 0x75}, + {0x5d99, 0x75}, + {0x5d9a, 0x75}, + {0x5d9b, 0x75}, + {0x5d9c, 0x75}, + {0x5d9d, 0x75}, + {0x5d9e, 0x75}, + {0x5d9f, 0x75}, + {0x5da0, 0x75}, + {0x5da1, 0x75}, + {0x5da2, 0x75}, + {0x5da3, 0x75}, + {0x5da4, 0x75}, + {0x5da5, 0x75}, + {0x5da6, 0x75}, + {0x5da7, 0x75}, + {0x5da8, 0x75}, + {0x5da9, 0x75}, + {0x5daa, 0x75}, + {0x5dab, 0x75}, + {0x5dac, 0x75}, + {0x5dad, 0x75}, + {0x5dae, 0x75}, + {0x5daf, 0x75}, + {0x5db0, 0x75}, + {0x5db1, 0x75}, + {0x5db2, 0x75}, + {0x5db3, 0x75}, + {0x5db4, 0x75}, + {0x5db5, 0x75}, + {0x5db6, 0x75}, + {0x5db7, 0x75}, + {0x5db8, 0x75}, + {0x5db9, 0x75}, + {0x5dba, 0x75}, + {0x5dbb, 0x75}, + {0x5dbc, 0x75}, + {0x5dbd, 0x75}, + {0x5dbe, 0x75}, + {0x5dbf, 0x75}, + {0x5dc0, 0x75}, + {0x5dc1, 0x75}, + {0x5dc2, 0x75}, + {0x5dc3, 0x75}, + {0x5dc4, 0x75}, + {0x5dc5, 0x75}, + {0x5dc6, 0x75}, + {0x5dc7, 0x75}, + {0x5dc8, 0x75}, + {0x5dc9, 0x75}, + {0x5dca, 0x75}, + {0x5dcb, 0x75}, + {0x5dcc, 0x75}, + {0x5dcd, 0x75}, + {0x5dce, 0x75}, + {0x5dcf, 0x75}, + {0x5dd0, 0x75}, + {0x5dd1, 0x75}, + {0x5dd2, 0x75}, + {0x5dd3, 0x75}, + {0x5dd4, 0x75}, + {0x5dd5, 0x75}, + {0x5dd6, 0x75}, + {0x5dd7, 0x75}, + {0x5dd8, 0x75}, + {0x5dd9, 0x75}, + {0x5dda, 0x75}, + {0x5ddb, 0x75}, + {0x5ddc, 0x75}, + {0x5ddd, 0x75}, + {0x5dde, 0x75}, + {0x5ddf, 0x75}, + {0x5de0, 0x75}, + {0x5de1, 0x75}, + {0x5de2, 0x75}, + {0x5de3, 0x75}, + {0x5de4, 0x75}, + {0x5de5, 0x75}, + {0x5de6, 0x75}, + {0x5de7, 0x75}, + {0x5de8, 0x75}, + {0x5de9, 0x75}, + {0x5dea, 0x75}, + {0x5deb, 0x75}, + {0x5dec, 0x75}, + {0x5ded, 0x75}, + {0x5dee, 0x75}, + {0x5def, 0x75}, + {0x5df0, 0x75}, + {0x5df1, 0x75}, + {0x5df2, 0x75}, + {0x5df3, 0x75}, + {0x5df4, 0x75}, + {0x5df5, 0x75}, + {0x5df6, 0x75}, + {0x5df7, 0x75}, + {0x5df8, 0x75}, + {0x5df9, 0x75}, + {0x5dfa, 0x75}, + {0x5dfb, 0x75}, + {0x5dfc, 0x75}, + {0x5dfd, 0x75}, + {0x5dfe, 0x75}, + {0x5dff, 0x75}, + {0x5e00, 0x75}, + {0x5e01, 0x75}, + {0x5e02, 0x75}, + {0x5e03, 0x75}, + {0x5e04, 0x75}, + {0x5e05, 0x75}, + {0x5e06, 0x75}, + {0x5e07, 0x75}, + {0x5e08, 0x75}, + {0x5e09, 0x75}, + {0x5e0a, 0x75}, + {0x5e0b, 0x75}, + {0x5e0c, 0x75}, + {0x5e0d, 0x75}, + {0x5e0e, 0x75}, + {0x5e0f, 0x75}, + {0x5e10, 0x75}, + {0x5e11, 0x75}, + {0x5e12, 0x75}, + {0x5e13, 0x75}, + {0x5e14, 0x75}, + {0x5e15, 0x75}, + {0x5e16, 0x75}, + {0x5e17, 0x75}, + {0x5e18, 0x75}, + {0x5e19, 0x75}, + {0x5e1a, 0x75}, + {0x5e1b, 0x75}, + {0x5e1c, 0x75}, + {0x5e1d, 0x75}, + {0x5e1e, 0x75}, + {0x5e1f, 0x75}, + {0x5e20, 0x75}, + {0x5e21, 0x75}, + {0x5e22, 0x75}, + {0x5e23, 0x75}, + {0x5e24, 0x75}, + {0x5e25, 0x75}, + {0x5e26, 0x75}, + {0x5e27, 0x75}, + {0x5e28, 0x75}, + {0x5e29, 0x75}, + {0x5e2a, 0x75}, + {0x5e2b, 0x75}, + {0x5e2c, 0x75}, + {0x5e2d, 0x75}, + {0x5e2e, 0x75}, + {0x5e2f, 0x75}, + {0x5e30, 0x75}, + {0x5e31, 0x75}, + {0x5e32, 0x75}, + {0x5e33, 0x75}, + {0x5e34, 0x75}, + {0x5e35, 0x75}, + {0x5e36, 0x75}, + {0x5e37, 0x75}, + {0x5e38, 0x75}, + {0x5e39, 0x75}, + {0x5e3a, 0x75}, + {0x5e3b, 0x75}, + {0x5e3c, 0x75}, + {0x5e3d, 0x75}, + {0x5e3e, 0x75}, + {0x5e3f, 0x75}, + {0x5e40, 0x75}, + {0x5e41, 0x75}, + {0x5e42, 0x75}, + {0x5e43, 0x75}, + {0x5e44, 0x75}, + {0x5e45, 0x75}, + {0x5e46, 0x75}, + {0x5e47, 0x75}, + {0x5e48, 0x75}, + {0x5e49, 0x75}, + {0x5e4a, 0x75}, + {0x5e4b, 0x75}, + {0x5e4c, 0x75}, + {0x5e4d, 0x75}, + {0x5e4e, 0x75}, + {0x5e4f, 0x75}, + {0x5e50, 0x75}, + {0x5e51, 0x75}, + {0x5e52, 0x75}, + {0x5e53, 0x75}, + {0x5e54, 0x75}, + {0x5e55, 0x75}, + {0x5e56, 0x75}, + {0x5e57, 0x75}, + {0x5e58, 0x75}, + {0x5e59, 0x75}, + {0x5e5a, 0x75}, + {0x5e5b, 0x75}, + {0x5e5c, 0x75}, + {0x5e5d, 0x75}, + {0x5e5e, 0x75}, + {0x5e5f, 0x75}, + {0x5e60, 0x75}, + {0x5e61, 0x75}, + {0x5e62, 0x75}, + {0x5e63, 0x75}, + {0x5e64, 0x75}, + {0x5e65, 0x75}, + {0x5e66, 0x75}, + {0x5e67, 0x75}, + {0x5e68, 0x75}, + {0x5e69, 0x75}, + {0x5e6a, 0x75}, + {0x5e6b, 0x75}, + {0x5e6c, 0x75}, + {0x5e6d, 0x75}, + {0x5e6e, 0x75}, + {0x5e6f, 0x75}, + {0x5e70, 0x75}, + {0x5e71, 0x75}, + {0x5e72, 0x75}, + {0x5e73, 0x75}, + {0x5e74, 0x75}, + {0x5e75, 0x75}, + {0x5e76, 0x75}, + {0x5e77, 0x75}, + {0x5e78, 0x75}, + {0x5e79, 0x75}, + {0x5e7a, 0x75}, + {0x5e7b, 0x75}, + {0x5e7c, 0x75}, + {0x5e7d, 0x75}, + {0x5e7e, 0x75}, + {0x5e7f, 0x75}, + {0x5e80, 0x75}, + {0x5e81, 0x75}, + {0x5e82, 0x75}, + {0x5e83, 0x75}, + {0x5e84, 0x75}, + {0x5e85, 0x75}, + {0x5e86, 0x75}, + {0x5e87, 0x75}, + {0x5e88, 0x75}, + {0x5e89, 0x75}, + {0x5e8a, 0x75}, + {0x5e8b, 0x75}, + {0x5e8c, 0x75}, + {0x5e8d, 0x75}, + {0x5e8e, 0x75}, + {0x5e8f, 0x75}, + {0x5e90, 0x75}, + {0x5e91, 0x75}, + {0x5e92, 0x75}, + {0x5e93, 0x75}, + {0x5e94, 0x75}, + {0x5e95, 0x75}, + {0x5e96, 0x75}, + {0x5e97, 0x75}, + {0x5e98, 0x75}, + {0x5e99, 0x75}, + {0x5e9a, 0x75}, + {0x5e9b, 0x75}, + {0x5e9c, 0x75}, + {0x5e9d, 0x75}, + {0x5e9e, 0x75}, + {0x5e9f, 0x75}, + {0x5ea0, 0x75}, + {0x5ea1, 0x75}, + {0x5ea2, 0x75}, + {0x5ea3, 0x75}, + {0x5ea4, 0x75}, + {0x5ea5, 0x75}, + {0x5ea6, 0x75}, + {0x5ea7, 0x75}, + {0x5ea8, 0x75}, + {0x5ea9, 0x75}, + {0x5eaa, 0x75}, + {0x5eab, 0x75}, + {0x5eac, 0x75}, + {0x5ead, 0x75}, + {0x5eae, 0x75}, + {0x5eaf, 0x75}, + {0x5eb0, 0x75}, + {0x5eb1, 0x75}, + {0x5eb2, 0x75}, + {0x5eb3, 0x75}, + {0x5eb4, 0x75}, + {0x5eb5, 0x75}, + {0x5eb6, 0x75}, + {0x5eb7, 0x75}, + {0x5eb8, 0x75}, + {0x5eb9, 0x75}, + {0x5eba, 0x75}, + {0x5ebb, 0x75}, + {0x5ebc, 0x75}, + {0x5ebd, 0x75}, + {0x5ebe, 0x75}, + {0x5ebf, 0x75}, + {0x5ec0, 0x75}, + {0x5ec1, 0x75}, + {0x5ec2, 0x75}, + {0x5ec3, 0x75}, + {0x5ec4, 0x75}, + {0x5ec5, 0x75}, + {0x5ec6, 0x75}, + {0x5ec7, 0x75}, + {0x5ec8, 0x75}, + {0x5ec9, 0x75}, + {0x5eca, 0x75}, + {0x5ecb, 0x75}, + {0x5ecc, 0x75}, + {0x5ecd, 0x75}, + {0x5ece, 0x75}, + {0x5ecf, 0x75}, + {0x5ed0, 0x75}, + {0x5ed1, 0x75}, + {0x5ed2, 0x75}, + {0x5ed3, 0x75}, + {0x5ed4, 0x75}, + {0x5ed5, 0x75}, + {0x5ed6, 0x75}, + {0x5ed7, 0x75}, + {0x5ed8, 0x75}, + {0x5ed9, 0x75}, + {0x5eda, 0x75}, + {0x5edb, 0x75}, + {0x5edc, 0x75}, + {0x5edd, 0x75}, + {0x5ede, 0x75}, + {0x5edf, 0x75}, + {0x5ee0, 0x75}, + {0x5ee1, 0x75}, + {0x5ee2, 0x75}, + {0x5ee3, 0x75}, + {0x5ee4, 0x75}, + {0x5ee5, 0x75}, + {0x5ee6, 0x75}, + {0x5ee7, 0x75}, + {0x5ee8, 0x75}, + {0x5ee9, 0x75}, + {0x5eea, 0x75}, + {0x5eeb, 0x75}, + {0x5eec, 0x75}, + {0x5eed, 0x75}, + {0x5eee, 0x75}, + {0x5eef, 0x75}, + {0x5ef0, 0x75}, + {0x5ef1, 0x75}, + {0x5ef2, 0x75}, + {0x5ef3, 0x75}, + {0x5ef4, 0x75}, + {0x5ef5, 0x75}, + {0x5ef6, 0x75}, + {0x5ef7, 0x75}, + {0x5ef8, 0x75}, + {0x5ef9, 0x75}, + {0x5efa, 0x75}, + {0x5efb, 0x75}, + {0x5efc, 0x75}, + {0x5efd, 0x75}, + {0x5efe, 0x75}, + {0x5eff, 0x75}, + {0x5f00, 0x75}, + {0x5f01, 0x75}, + {0x5f02, 0x75}, + {0x5f03, 0x75}, + {0x5f04, 0x75}, + {0x5f05, 0x75}, + {0x5f06, 0x75}, + {0x5f07, 0x75}, + {0x5f08, 0x75}, + {0x5f09, 0x75}, + {0x5f0a, 0x75}, + {0x5f0b, 0x75}, + {0x5f0c, 0x75}, + {0x5f0d, 0x75}, + {0x5f0e, 0x75}, + {0x5f0f, 0x75}, + {0x5f10, 0x75}, + {0x5f11, 0x75}, + {0x5f12, 0x75}, + {0x5f13, 0x75}, + {0x5f14, 0x75}, + {0x5f15, 0x75}, + {0x5f16, 0x75}, + {0x5f17, 0x75}, + {0x5f18, 0x75}, + {0x5f19, 0x75}, + {0x5f1a, 0x75}, + {0x5f1b, 0x75}, + {0x5f1c, 0x75}, + {0x5f1d, 0x75}, + {0x5f1e, 0x75}, + {0x5f1f, 0x75}, + {0x3813, 0x0f}, //10; +}; + +//OV08X 1C 3856x2176_DPHY150M-2L +static const struct ov08x40_reg lane_2_mode_3856x2176_regs_1500mbps[] = { + {0x5000, 0x5d}, + {0x5001, 0x20}, {0x5004, 0x53}, {0x5110, 0x00}, {0x5111, 0x14}, @@ -165,33 +2941,28 @@ static const struct ov08x40_reg mipi_data_rate_800mbps[] = { {0x5a1f, 0x0e}, {0x5a27, 0x0e}, {0x6002, 0x2e}, -}; - -static const struct ov08x40_reg mode_3856x2416_regs[] = { - {0x5000, 0x5d}, - {0x5001, 0x20}, {0x5008, 0xb0}, {0x50c1, 0x00}, {0x53c1, 0x00}, {0x5f40, 0x00}, {0x5f41, 0x40}, {0x0300, 0x3a}, - {0x0301, 0xc8}, + {0x0301, 0x88}, {0x0302, 0x31}, - {0x0303, 0x03}, + {0x0303, 0x05}, {0x0304, 0x01}, - {0x0305, 0xa1}, + {0x0305, 0x38}, {0x0306, 0x04}, - {0x0307, 0x01}, + {0x0307, 0x00}, {0x0308, 0x03}, - {0x0309, 0x03}, + {0x0309, 0x02}, {0x0310, 0x0a}, {0x0311, 0x02}, {0x0312, 0x01}, {0x0313, 0x08}, - {0x0314, 0x66}, + {0x0314, 0x00}, {0x0315, 0x00}, - {0x0316, 0x34}, + {0x0316, 0x2c}, {0x0320, 0x02}, {0x0321, 0x03}, {0x0323, 0x05}, @@ -217,7 +2988,7 @@ static const struct ov08x40_reg mode_3856x2416_regs[] = { {0x198e, 0x00}, {0x198f, 0x01}, {0x3009, 0x04}, - {0x3012, 0x41}, + {0x3012, 0x21}, {0x3015, 0x00}, {0x3016, 0xb0}, {0x3017, 0xf0}, @@ -245,8 +3016,8 @@ static const struct ov08x40_reg mode_3856x2416_regs[] = { {0x3400, 0x1c}, {0x3401, 0x80}, {0x3402, 0x8c}, - {0x3419, 0x13}, - {0x341a, 0x89}, + {0x3419, 0x12}, + {0x341a, 0x99}, {0x341b, 0x30}, {0x3420, 0x00}, {0x3421, 0x00}, @@ -346,23 +3117,23 @@ static const struct ov08x40_reg mode_3856x2416_regs[] = { {0x3800, 0x00}, {0x3801, 0x00}, {0x3802, 0x00}, - {0x3803, 0x00}, + {0x3803, 0x70}, {0x3804, 0x0f}, {0x3805, 0x1f}, {0x3806, 0x09}, - {0x3807, 0x7f}, + {0x3807, 0x0f}, {0x3808, 0x0f}, {0x3809, 0x10}, - {0x380a, 0x09}, - {0x380b, 0x70}, + {0x380a, 0x08}, + {0x380b, 0x80}, {0x380c, 0x02}, - {0x380d, 0x80}, - {0x380e, 0x13}, - {0x380f, 0x88}, + {0x380d, 0xa0}, + {0x380e, 0x12}, + {0x380f, 0x98}, {0x3810, 0x00}, {0x3811, 0x08}, {0x3812, 0x00}, - {0x3813, 0x07}, + {0x3813, 0x10}, {0x3814, 0x11}, {0x3815, 0x11}, {0x3820, 0x00}, @@ -383,7 +3154,7 @@ static const struct ov08x40_reg mode_3856x2416_regs[] = { {0x3847, 0x00}, {0x384a, 0x00}, {0x384c, 0x02}, - {0x384d, 0x80}, + {0x384d, 0xa0}, {0x3856, 0x50}, {0x3857, 0x30}, {0x3858, 0x80}, @@ -589,8 +3360,8 @@ static const struct ov08x40_reg mode_3856x2416_regs[] = { {0x480c, 0x80}, {0x480f, 0x32}, {0x4813, 0xe4}, - {0x4837, 0x14}, - {0x4850, 0x42}, + {0x4837, 0x0a}, + {0x4850, 0x47}, {0x4884, 0x04}, {0x4c00, 0xf8}, {0x4c01, 0x44}, @@ -677,33 +3448,1200 @@ static const struct ov08x40_reg mode_3856x2416_regs[] = { {0x3502, 0x10}, {0x3508, 0x0f}, {0x3509, 0x80}, + {0x5a80, 0x75}, + {0x5a81, 0x75}, + {0x5a82, 0x75}, + {0x5a83, 0x75}, + {0x5a84, 0x75}, + {0x5a85, 0x75}, + {0x5a86, 0x75}, + {0x5a87, 0x75}, + {0x5a88, 0x75}, + {0x5a89, 0x75}, + {0x5a8a, 0x75}, + {0x5a8b, 0x75}, + {0x5a8c, 0x75}, + {0x5a8d, 0x75}, + {0x5a8e, 0x75}, + {0x5a8f, 0x75}, + {0x5a90, 0x75}, + {0x5a91, 0x75}, + {0x5a92, 0x75}, + {0x5a93, 0x75}, + {0x5a94, 0x75}, + {0x5a95, 0x75}, + {0x5a96, 0x75}, + {0x5a97, 0x75}, + {0x5a98, 0x75}, + {0x5a99, 0x75}, + {0x5a9a, 0x75}, + {0x5a9b, 0x75}, + {0x5a9c, 0x75}, + {0x5a9d, 0x75}, + {0x5a9e, 0x75}, + {0x5a9f, 0x75}, + {0x5aa0, 0x75}, + {0x5aa1, 0x75}, + {0x5aa2, 0x75}, + {0x5aa3, 0x75}, + {0x5aa4, 0x75}, + {0x5aa5, 0x75}, + {0x5aa6, 0x75}, + {0x5aa7, 0x75}, + {0x5aa8, 0x75}, + {0x5aa9, 0x75}, + {0x5aaa, 0x75}, + {0x5aab, 0x75}, + {0x5aac, 0x75}, + {0x5aad, 0x75}, + {0x5aae, 0x75}, + {0x5aaf, 0x75}, + {0x5ab0, 0x75}, + {0x5ab1, 0x75}, + {0x5ab2, 0x75}, + {0x5ab3, 0x75}, + {0x5ab4, 0x75}, + {0x5ab5, 0x75}, + {0x5ab6, 0x75}, + {0x5ab7, 0x75}, + {0x5ab8, 0x75}, + {0x5ab9, 0x75}, + {0x5aba, 0x75}, + {0x5abb, 0x75}, + {0x5abc, 0x75}, + {0x5abd, 0x75}, + {0x5abe, 0x75}, + {0x5abf, 0x75}, + {0x5ac0, 0x75}, + {0x5ac1, 0x75}, + {0x5ac2, 0x75}, + {0x5ac3, 0x75}, + {0x5ac4, 0x75}, + {0x5ac5, 0x75}, + {0x5ac6, 0x75}, + {0x5ac7, 0x75}, + {0x5ac8, 0x75}, + {0x5ac9, 0x75}, + {0x5aca, 0x75}, + {0x5acb, 0x75}, + {0x5acc, 0x75}, + {0x5acd, 0x75}, + {0x5ace, 0x75}, + {0x5acf, 0x75}, + {0x5ad0, 0x75}, + {0x5ad1, 0x75}, + {0x5ad2, 0x75}, + {0x5ad3, 0x75}, + {0x5ad4, 0x75}, + {0x5ad5, 0x75}, + {0x5ad6, 0x75}, + {0x5ad7, 0x75}, + {0x5ad8, 0x75}, + {0x5ad9, 0x75}, + {0x5ada, 0x75}, + {0x5adb, 0x75}, + {0x5adc, 0x75}, + {0x5add, 0x75}, + {0x5ade, 0x75}, + {0x5adf, 0x75}, + {0x5ae0, 0x75}, + {0x5ae1, 0x75}, + {0x5ae2, 0x75}, + {0x5ae3, 0x75}, + {0x5ae4, 0x75}, + {0x5ae5, 0x75}, + {0x5ae6, 0x75}, + {0x5ae7, 0x75}, + {0x5ae8, 0x75}, + {0x5ae9, 0x75}, + {0x5aea, 0x75}, + {0x5aeb, 0x75}, + {0x5aec, 0x75}, + {0x5aed, 0x75}, + {0x5aee, 0x75}, + {0x5aef, 0x75}, + {0x5af0, 0x75}, + {0x5af1, 0x75}, + {0x5af2, 0x75}, + {0x5af3, 0x75}, + {0x5af4, 0x75}, + {0x5af5, 0x75}, + {0x5af6, 0x75}, + {0x5af7, 0x75}, + {0x5af8, 0x75}, + {0x5af9, 0x75}, + {0x5afa, 0x75}, + {0x5afb, 0x75}, + {0x5afc, 0x75}, + {0x5afd, 0x75}, + {0x5afe, 0x75}, + {0x5aff, 0x75}, + {0x5b00, 0x75}, + {0x5b01, 0x75}, + {0x5b02, 0x75}, + {0x5b03, 0x75}, + {0x5b04, 0x75}, + {0x5b05, 0x75}, + {0x5b06, 0x75}, + {0x5b07, 0x75}, + {0x5b08, 0x75}, + {0x5b09, 0x75}, + {0x5b0a, 0x75}, + {0x5b0b, 0x75}, + {0x5b0c, 0x75}, + {0x5b0d, 0x75}, + {0x5b0e, 0x75}, + {0x5b0f, 0x75}, + {0x5b10, 0x75}, + {0x5b11, 0x75}, + {0x5b12, 0x75}, + {0x5b13, 0x75}, + {0x5b14, 0x75}, + {0x5b15, 0x75}, + {0x5b16, 0x75}, + {0x5b17, 0x75}, + {0x5b18, 0x75}, + {0x5b19, 0x75}, + {0x5b1a, 0x75}, + {0x5b1b, 0x75}, + {0x5b1c, 0x75}, + {0x5b1d, 0x75}, + {0x5b1e, 0x75}, + {0x5b1f, 0x75}, + {0x5b20, 0x75}, + {0x5b21, 0x75}, + {0x5b22, 0x75}, + {0x5b23, 0x75}, + {0x5b24, 0x75}, + {0x5b25, 0x75}, + {0x5b26, 0x75}, + {0x5b27, 0x75}, + {0x5b28, 0x75}, + {0x5b29, 0x75}, + {0x5b2a, 0x75}, + {0x5b2b, 0x75}, + {0x5b2c, 0x75}, + {0x5b2d, 0x75}, + {0x5b2e, 0x75}, + {0x5b2f, 0x75}, + {0x5b30, 0x75}, + {0x5b31, 0x75}, + {0x5b32, 0x75}, + {0x5b33, 0x75}, + {0x5b34, 0x75}, + {0x5b35, 0x75}, + {0x5b36, 0x75}, + {0x5b37, 0x75}, + {0x5b38, 0x75}, + {0x5b39, 0x75}, + {0x5b3a, 0x75}, + {0x5b3b, 0x75}, + {0x5b3c, 0x75}, + {0x5b3d, 0x75}, + {0x5b3e, 0x75}, + {0x5b3f, 0x75}, + {0x5b40, 0x75}, + {0x5b41, 0x75}, + {0x5b42, 0x75}, + {0x5b43, 0x75}, + {0x5b44, 0x75}, + {0x5b45, 0x75}, + {0x5b46, 0x75}, + {0x5b47, 0x75}, + {0x5b48, 0x75}, + {0x5b49, 0x75}, + {0x5b4a, 0x75}, + {0x5b4b, 0x75}, + {0x5b4c, 0x75}, + {0x5b4d, 0x75}, + {0x5b4e, 0x75}, + {0x5b4f, 0x75}, + {0x5b50, 0x75}, + {0x5b51, 0x75}, + {0x5b52, 0x75}, + {0x5b53, 0x75}, + {0x5b54, 0x75}, + {0x5b55, 0x75}, + {0x5b56, 0x75}, + {0x5b57, 0x75}, + {0x5b58, 0x75}, + {0x5b59, 0x75}, + {0x5b5a, 0x75}, + {0x5b5b, 0x75}, + {0x5b5c, 0x75}, + {0x5b5d, 0x75}, + {0x5b5e, 0x75}, + {0x5b5f, 0x75}, + {0x5b60, 0x75}, + {0x5b61, 0x75}, + {0x5b62, 0x75}, + {0x5b63, 0x75}, + {0x5b64, 0x75}, + {0x5b65, 0x75}, + {0x5b66, 0x75}, + {0x5b67, 0x75}, + {0x5b68, 0x75}, + {0x5b69, 0x75}, + {0x5b6a, 0x75}, + {0x5b6b, 0x75}, + {0x5b6c, 0x75}, + {0x5b6d, 0x75}, + {0x5b6e, 0x75}, + {0x5b6f, 0x75}, + {0x5b70, 0x75}, + {0x5b71, 0x75}, + {0x5b72, 0x75}, + {0x5b73, 0x75}, + {0x5b74, 0x75}, + {0x5b75, 0x75}, + {0x5b76, 0x75}, + {0x5b77, 0x75}, + {0x5b78, 0x75}, + {0x5b79, 0x75}, + {0x5b7a, 0x75}, + {0x5b7b, 0x75}, + {0x5b7c, 0x75}, + {0x5b7d, 0x75}, + {0x5b7e, 0x75}, + {0x5b7f, 0x75}, + {0x5b80, 0x75}, + {0x5b81, 0x75}, + {0x5b82, 0x75}, + {0x5b83, 0x75}, + {0x5b84, 0x75}, + {0x5b85, 0x75}, + {0x5b86, 0x75}, + {0x5b87, 0x75}, + {0x5b88, 0x75}, + {0x5b89, 0x75}, + {0x5b8a, 0x75}, + {0x5b8b, 0x75}, + {0x5b8c, 0x75}, + {0x5b8d, 0x75}, + {0x5b8e, 0x75}, + {0x5b8f, 0x75}, + {0x5b90, 0x75}, + {0x5b91, 0x75}, + {0x5b92, 0x75}, + {0x5b93, 0x75}, + {0x5b94, 0x75}, + {0x5b95, 0x75}, + {0x5b96, 0x75}, + {0x5b97, 0x75}, + {0x5b98, 0x75}, + {0x5b99, 0x75}, + {0x5b9a, 0x75}, + {0x5b9b, 0x75}, + {0x5b9c, 0x75}, + {0x5b9d, 0x75}, + {0x5b9e, 0x75}, + {0x5b9f, 0x75}, + {0x5bc0, 0x75}, + {0x5bc1, 0x75}, + {0x5bc2, 0x75}, + {0x5bc3, 0x75}, + {0x5bc4, 0x75}, + {0x5bc5, 0x75}, + {0x5bc6, 0x75}, + {0x5bc7, 0x75}, + {0x5bc8, 0x75}, + {0x5bc9, 0x75}, + {0x5bca, 0x75}, + {0x5bcb, 0x75}, + {0x5bcc, 0x75}, + {0x5bcd, 0x75}, + {0x5bce, 0x75}, + {0x5bcf, 0x75}, + {0x5bd0, 0x75}, + {0x5bd1, 0x75}, + {0x5bd2, 0x75}, + {0x5bd3, 0x75}, + {0x5bd4, 0x75}, + {0x5bd5, 0x75}, + {0x5bd6, 0x75}, + {0x5bd7, 0x75}, + {0x5bd8, 0x75}, + {0x5bd9, 0x75}, + {0x5bda, 0x75}, + {0x5bdb, 0x75}, + {0x5bdc, 0x75}, + {0x5bdd, 0x75}, + {0x5bde, 0x75}, + {0x5bdf, 0x75}, + {0x5be0, 0x75}, + {0x5be1, 0x75}, + {0x5be2, 0x75}, + {0x5be3, 0x75}, + {0x5be4, 0x75}, + {0x5be5, 0x75}, + {0x5be6, 0x75}, + {0x5be7, 0x75}, + {0x5be8, 0x75}, + {0x5be9, 0x75}, + {0x5bea, 0x75}, + {0x5beb, 0x75}, + {0x5bec, 0x75}, + {0x5bed, 0x75}, + {0x5bee, 0x75}, + {0x5bef, 0x75}, + {0x5bf0, 0x75}, + {0x5bf1, 0x75}, + {0x5bf2, 0x75}, + {0x5bf3, 0x75}, + {0x5bf4, 0x75}, + {0x5bf5, 0x75}, + {0x5bf6, 0x75}, + {0x5bf7, 0x75}, + {0x5bf8, 0x75}, + {0x5bf9, 0x75}, + {0x5bfa, 0x75}, + {0x5bfb, 0x75}, + {0x5bfc, 0x75}, + {0x5bfd, 0x75}, + {0x5bfe, 0x75}, + {0x5bff, 0x75}, + {0x5c00, 0x75}, + {0x5c01, 0x75}, + {0x5c02, 0x75}, + {0x5c03, 0x75}, + {0x5c04, 0x75}, + {0x5c05, 0x75}, + {0x5c06, 0x75}, + {0x5c07, 0x75}, + {0x5c08, 0x75}, + {0x5c09, 0x75}, + {0x5c0a, 0x75}, + {0x5c0b, 0x75}, + {0x5c0c, 0x75}, + {0x5c0d, 0x75}, + {0x5c0e, 0x75}, + {0x5c0f, 0x75}, + {0x5c10, 0x75}, + {0x5c11, 0x75}, + {0x5c12, 0x75}, + {0x5c13, 0x75}, + {0x5c14, 0x75}, + {0x5c15, 0x75}, + {0x5c16, 0x75}, + {0x5c17, 0x75}, + {0x5c18, 0x75}, + {0x5c19, 0x75}, + {0x5c1a, 0x75}, + {0x5c1b, 0x75}, + {0x5c1c, 0x75}, + {0x5c1d, 0x75}, + {0x5c1e, 0x75}, + {0x5c1f, 0x75}, + {0x5c20, 0x75}, + {0x5c21, 0x75}, + {0x5c22, 0x75}, + {0x5c23, 0x75}, + {0x5c24, 0x75}, + {0x5c25, 0x75}, + {0x5c26, 0x75}, + {0x5c27, 0x75}, + {0x5c28, 0x75}, + {0x5c29, 0x75}, + {0x5c2a, 0x75}, + {0x5c2b, 0x75}, + {0x5c2c, 0x75}, + {0x5c2d, 0x75}, + {0x5c2e, 0x75}, + {0x5c2f, 0x75}, + {0x5c30, 0x75}, + {0x5c31, 0x75}, + {0x5c32, 0x75}, + {0x5c33, 0x75}, + {0x5c34, 0x75}, + {0x5c35, 0x75}, + {0x5c36, 0x75}, + {0x5c37, 0x75}, + {0x5c38, 0x75}, + {0x5c39, 0x75}, + {0x5c3a, 0x75}, + {0x5c3b, 0x75}, + {0x5c3c, 0x75}, + {0x5c3d, 0x75}, + {0x5c3e, 0x75}, + {0x5c3f, 0x75}, + {0x5c40, 0x75}, + {0x5c41, 0x75}, + {0x5c42, 0x75}, + {0x5c43, 0x75}, + {0x5c44, 0x75}, + {0x5c45, 0x75}, + {0x5c46, 0x75}, + {0x5c47, 0x75}, + {0x5c48, 0x75}, + {0x5c49, 0x75}, + {0x5c4a, 0x75}, + {0x5c4b, 0x75}, + {0x5c4c, 0x75}, + {0x5c4d, 0x75}, + {0x5c4e, 0x75}, + {0x5c4f, 0x75}, + {0x5c50, 0x75}, + {0x5c51, 0x75}, + {0x5c52, 0x75}, + {0x5c53, 0x75}, + {0x5c54, 0x75}, + {0x5c55, 0x75}, + {0x5c56, 0x75}, + {0x5c57, 0x75}, + {0x5c58, 0x75}, + {0x5c59, 0x75}, + {0x5c5a, 0x75}, + {0x5c5b, 0x75}, + {0x5c5c, 0x75}, + {0x5c5d, 0x75}, + {0x5c5e, 0x75}, + {0x5c5f, 0x75}, + {0x5c60, 0x75}, + {0x5c61, 0x75}, + {0x5c62, 0x75}, + {0x5c63, 0x75}, + {0x5c64, 0x75}, + {0x5c65, 0x75}, + {0x5c66, 0x75}, + {0x5c67, 0x75}, + {0x5c68, 0x75}, + {0x5c69, 0x75}, + {0x5c6a, 0x75}, + {0x5c6b, 0x75}, + {0x5c6c, 0x75}, + {0x5c6d, 0x75}, + {0x5c6e, 0x75}, + {0x5c6f, 0x75}, + {0x5c70, 0x75}, + {0x5c71, 0x75}, + {0x5c72, 0x75}, + {0x5c73, 0x75}, + {0x5c74, 0x75}, + {0x5c75, 0x75}, + {0x5c76, 0x75}, + {0x5c77, 0x75}, + {0x5c78, 0x75}, + {0x5c79, 0x75}, + {0x5c7a, 0x75}, + {0x5c7b, 0x75}, + {0x5c7c, 0x75}, + {0x5c7d, 0x75}, + {0x5c7e, 0x75}, + {0x5c7f, 0x75}, + {0x5c80, 0x75}, + {0x5c81, 0x75}, + {0x5c82, 0x75}, + {0x5c83, 0x75}, + {0x5c84, 0x75}, + {0x5c85, 0x75}, + {0x5c86, 0x75}, + {0x5c87, 0x75}, + {0x5c88, 0x75}, + {0x5c89, 0x75}, + {0x5c8a, 0x75}, + {0x5c8b, 0x75}, + {0x5c8c, 0x75}, + {0x5c8d, 0x75}, + {0x5c8e, 0x75}, + {0x5c8f, 0x75}, + {0x5c90, 0x75}, + {0x5c91, 0x75}, + {0x5c92, 0x75}, + {0x5c93, 0x75}, + {0x5c94, 0x75}, + {0x5c95, 0x75}, + {0x5c96, 0x75}, + {0x5c97, 0x75}, + {0x5c98, 0x75}, + {0x5c99, 0x75}, + {0x5c9a, 0x75}, + {0x5c9b, 0x75}, + {0x5c9c, 0x75}, + {0x5c9d, 0x75}, + {0x5c9e, 0x75}, + {0x5c9f, 0x75}, + {0x5ca0, 0x75}, + {0x5ca1, 0x75}, + {0x5ca2, 0x75}, + {0x5ca3, 0x75}, + {0x5ca4, 0x75}, + {0x5ca5, 0x75}, + {0x5ca6, 0x75}, + {0x5ca7, 0x75}, + {0x5ca8, 0x75}, + {0x5ca9, 0x75}, + {0x5caa, 0x75}, + {0x5cab, 0x75}, + {0x5cac, 0x75}, + {0x5cad, 0x75}, + {0x5cae, 0x75}, + {0x5caf, 0x75}, + {0x5cb0, 0x75}, + {0x5cb1, 0x75}, + {0x5cb2, 0x75}, + {0x5cb3, 0x75}, + {0x5cb4, 0x75}, + {0x5cb5, 0x75}, + {0x5cb6, 0x75}, + {0x5cb7, 0x75}, + {0x5cb8, 0x75}, + {0x5cb9, 0x75}, + {0x5cba, 0x75}, + {0x5cbb, 0x75}, + {0x5cbc, 0x75}, + {0x5cbd, 0x75}, + {0x5cbe, 0x75}, + {0x5cbf, 0x75}, + {0x5cc0, 0x75}, + {0x5cc1, 0x75}, + {0x5cc2, 0x75}, + {0x5cc3, 0x75}, + {0x5cc4, 0x75}, + {0x5cc5, 0x75}, + {0x5cc6, 0x75}, + {0x5cc7, 0x75}, + {0x5cc8, 0x75}, + {0x5cc9, 0x75}, + {0x5cca, 0x75}, + {0x5ccb, 0x75}, + {0x5ccc, 0x75}, + {0x5ccd, 0x75}, + {0x5cce, 0x75}, + {0x5ccf, 0x75}, + {0x5cd0, 0x75}, + {0x5cd1, 0x75}, + {0x5cd2, 0x75}, + {0x5cd3, 0x75}, + {0x5cd4, 0x75}, + {0x5cd5, 0x75}, + {0x5cd6, 0x75}, + {0x5cd7, 0x75}, + {0x5cd8, 0x75}, + {0x5cd9, 0x75}, + {0x5cda, 0x75}, + {0x5cdb, 0x75}, + {0x5cdc, 0x75}, + {0x5cdd, 0x75}, + {0x5cde, 0x75}, + {0x5cdf, 0x75}, + {0x5ce0, 0x75}, + {0x5ce1, 0x75}, + {0x5ce2, 0x75}, + {0x5ce3, 0x75}, + {0x5ce4, 0x75}, + {0x5ce5, 0x75}, + {0x5ce6, 0x75}, + {0x5ce7, 0x75}, + {0x5ce8, 0x75}, + {0x5ce9, 0x75}, + {0x5cea, 0x75}, + {0x5ceb, 0x75}, + {0x5cec, 0x75}, + {0x5ced, 0x75}, + {0x5cee, 0x75}, + {0x5cef, 0x75}, + {0x5cf0, 0x75}, + {0x5cf1, 0x75}, + {0x5cf2, 0x75}, + {0x5cf3, 0x75}, + {0x5cf4, 0x75}, + {0x5cf5, 0x75}, + {0x5cf6, 0x75}, + {0x5cf7, 0x75}, + {0x5cf8, 0x75}, + {0x5cf9, 0x75}, + {0x5cfa, 0x75}, + {0x5cfb, 0x75}, + {0x5cfc, 0x75}, + {0x5cfd, 0x75}, + {0x5cfe, 0x75}, + {0x5cff, 0x75}, + {0x5d00, 0x75}, + {0x5d01, 0x75}, + {0x5d02, 0x75}, + {0x5d03, 0x75}, + {0x5d04, 0x75}, + {0x5d05, 0x75}, + {0x5d06, 0x75}, + {0x5d07, 0x75}, + {0x5d08, 0x75}, + {0x5d09, 0x75}, + {0x5d0a, 0x75}, + {0x5d0b, 0x75}, + {0x5d0c, 0x75}, + {0x5d0d, 0x75}, + {0x5d0e, 0x75}, + {0x5d0f, 0x75}, + {0x5d10, 0x75}, + {0x5d11, 0x75}, + {0x5d12, 0x75}, + {0x5d13, 0x75}, + {0x5d14, 0x75}, + {0x5d15, 0x75}, + {0x5d16, 0x75}, + {0x5d17, 0x75}, + {0x5d18, 0x75}, + {0x5d19, 0x75}, + {0x5d1a, 0x75}, + {0x5d1b, 0x75}, + {0x5d1c, 0x75}, + {0x5d1d, 0x75}, + {0x5d1e, 0x75}, + {0x5d1f, 0x75}, + {0x5d20, 0x75}, + {0x5d21, 0x75}, + {0x5d22, 0x75}, + {0x5d23, 0x75}, + {0x5d24, 0x75}, + {0x5d25, 0x75}, + {0x5d26, 0x75}, + {0x5d27, 0x75}, + {0x5d28, 0x75}, + {0x5d29, 0x75}, + {0x5d2a, 0x75}, + {0x5d2b, 0x75}, + {0x5d2c, 0x75}, + {0x5d2d, 0x75}, + {0x5d2e, 0x75}, + {0x5d2f, 0x75}, + {0x5d30, 0x75}, + {0x5d31, 0x75}, + {0x5d32, 0x75}, + {0x5d33, 0x75}, + {0x5d34, 0x75}, + {0x5d35, 0x75}, + {0x5d36, 0x75}, + {0x5d37, 0x75}, + {0x5d38, 0x75}, + {0x5d39, 0x75}, + {0x5d3a, 0x75}, + {0x5d3b, 0x75}, + {0x5d3c, 0x75}, + {0x5d3d, 0x75}, + {0x5d3e, 0x75}, + {0x5d3f, 0x75}, + {0x5d40, 0x75}, + {0x5d41, 0x75}, + {0x5d42, 0x75}, + {0x5d43, 0x75}, + {0x5d44, 0x75}, + {0x5d45, 0x75}, + {0x5d46, 0x75}, + {0x5d47, 0x75}, + {0x5d48, 0x75}, + {0x5d49, 0x75}, + {0x5d4a, 0x75}, + {0x5d4b, 0x75}, + {0x5d4c, 0x75}, + {0x5d4d, 0x75}, + {0x5d4e, 0x75}, + {0x5d4f, 0x75}, + {0x5d50, 0x75}, + {0x5d51, 0x75}, + {0x5d52, 0x75}, + {0x5d53, 0x75}, + {0x5d54, 0x75}, + {0x5d55, 0x75}, + {0x5d56, 0x75}, + {0x5d57, 0x75}, + {0x5d58, 0x75}, + {0x5d59, 0x75}, + {0x5d5a, 0x75}, + {0x5d5b, 0x75}, + {0x5d5c, 0x75}, + {0x5d5d, 0x75}, + {0x5d5e, 0x75}, + {0x5d5f, 0x75}, + {0x5d60, 0x75}, + {0x5d61, 0x75}, + {0x5d62, 0x75}, + {0x5d63, 0x75}, + {0x5d64, 0x75}, + {0x5d65, 0x75}, + {0x5d66, 0x75}, + {0x5d67, 0x75}, + {0x5d68, 0x75}, + {0x5d69, 0x75}, + {0x5d6a, 0x75}, + {0x5d6b, 0x75}, + {0x5d6c, 0x75}, + {0x5d6d, 0x75}, + {0x5d6e, 0x75}, + {0x5d6f, 0x75}, + {0x5d70, 0x75}, + {0x5d71, 0x75}, + {0x5d72, 0x75}, + {0x5d73, 0x75}, + {0x5d74, 0x75}, + {0x5d75, 0x75}, + {0x5d76, 0x75}, + {0x5d77, 0x75}, + {0x5d78, 0x75}, + {0x5d79, 0x75}, + {0x5d7a, 0x75}, + {0x5d7b, 0x75}, + {0x5d7c, 0x75}, + {0x5d7d, 0x75}, + {0x5d7e, 0x75}, + {0x5d7f, 0x75}, + {0x5d80, 0x75}, + {0x5d81, 0x75}, + {0x5d82, 0x75}, + {0x5d83, 0x75}, + {0x5d84, 0x75}, + {0x5d85, 0x75}, + {0x5d86, 0x75}, + {0x5d87, 0x75}, + {0x5d88, 0x75}, + {0x5d89, 0x75}, + {0x5d8a, 0x75}, + {0x5d8b, 0x75}, + {0x5d8c, 0x75}, + {0x5d8d, 0x75}, + {0x5d8e, 0x75}, + {0x5d8f, 0x75}, + {0x5d90, 0x75}, + {0x5d91, 0x75}, + {0x5d92, 0x75}, + {0x5d93, 0x75}, + {0x5d94, 0x75}, + {0x5d95, 0x75}, + {0x5d96, 0x75}, + {0x5d97, 0x75}, + {0x5d98, 0x75}, + {0x5d99, 0x75}, + {0x5d9a, 0x75}, + {0x5d9b, 0x75}, + {0x5d9c, 0x75}, + {0x5d9d, 0x75}, + {0x5d9e, 0x75}, + {0x5d9f, 0x75}, + {0x5da0, 0x75}, + {0x5da1, 0x75}, + {0x5da2, 0x75}, + {0x5da3, 0x75}, + {0x5da4, 0x75}, + {0x5da5, 0x75}, + {0x5da6, 0x75}, + {0x5da7, 0x75}, + {0x5da8, 0x75}, + {0x5da9, 0x75}, + {0x5daa, 0x75}, + {0x5dab, 0x75}, + {0x5dac, 0x75}, + {0x5dad, 0x75}, + {0x5dae, 0x75}, + {0x5daf, 0x75}, + {0x5db0, 0x75}, + {0x5db1, 0x75}, + {0x5db2, 0x75}, + {0x5db3, 0x75}, + {0x5db4, 0x75}, + {0x5db5, 0x75}, + {0x5db6, 0x75}, + {0x5db7, 0x75}, + {0x5db8, 0x75}, + {0x5db9, 0x75}, + {0x5dba, 0x75}, + {0x5dbb, 0x75}, + {0x5dbc, 0x75}, + {0x5dbd, 0x75}, + {0x5dbe, 0x75}, + {0x5dbf, 0x75}, + {0x5dc0, 0x75}, + {0x5dc1, 0x75}, + {0x5dc2, 0x75}, + {0x5dc3, 0x75}, + {0x5dc4, 0x75}, + {0x5dc5, 0x75}, + {0x5dc6, 0x75}, + {0x5dc7, 0x75}, + {0x5dc8, 0x75}, + {0x5dc9, 0x75}, + {0x5dca, 0x75}, + {0x5dcb, 0x75}, + {0x5dcc, 0x75}, + {0x5dcd, 0x75}, + {0x5dce, 0x75}, + {0x5dcf, 0x75}, + {0x5dd0, 0x75}, + {0x5dd1, 0x75}, + {0x5dd2, 0x75}, + {0x5dd3, 0x75}, + {0x5dd4, 0x75}, + {0x5dd5, 0x75}, + {0x5dd6, 0x75}, + {0x5dd7, 0x75}, + {0x5dd8, 0x75}, + {0x5dd9, 0x75}, + {0x5dda, 0x75}, + {0x5ddb, 0x75}, + {0x5ddc, 0x75}, + {0x5ddd, 0x75}, + {0x5dde, 0x75}, + {0x5ddf, 0x75}, + {0x5de0, 0x75}, + {0x5de1, 0x75}, + {0x5de2, 0x75}, + {0x5de3, 0x75}, + {0x5de4, 0x75}, + {0x5de5, 0x75}, + {0x5de6, 0x75}, + {0x5de7, 0x75}, + {0x5de8, 0x75}, + {0x5de9, 0x75}, + {0x5dea, 0x75}, + {0x5deb, 0x75}, + {0x5dec, 0x75}, + {0x5ded, 0x75}, + {0x5dee, 0x75}, + {0x5def, 0x75}, + {0x5df0, 0x75}, + {0x5df1, 0x75}, + {0x5df2, 0x75}, + {0x5df3, 0x75}, + {0x5df4, 0x75}, + {0x5df5, 0x75}, + {0x5df6, 0x75}, + {0x5df7, 0x75}, + {0x5df8, 0x75}, + {0x5df9, 0x75}, + {0x5dfa, 0x75}, + {0x5dfb, 0x75}, + {0x5dfc, 0x75}, + {0x5dfd, 0x75}, + {0x5dfe, 0x75}, + {0x5dff, 0x75}, + {0x5e00, 0x75}, + {0x5e01, 0x75}, + {0x5e02, 0x75}, + {0x5e03, 0x75}, + {0x5e04, 0x75}, + {0x5e05, 0x75}, + {0x5e06, 0x75}, + {0x5e07, 0x75}, + {0x5e08, 0x75}, + {0x5e09, 0x75}, + {0x5e0a, 0x75}, + {0x5e0b, 0x75}, + {0x5e0c, 0x75}, + {0x5e0d, 0x75}, + {0x5e0e, 0x75}, + {0x5e0f, 0x75}, + {0x5e10, 0x75}, + {0x5e11, 0x75}, + {0x5e12, 0x75}, + {0x5e13, 0x75}, + {0x5e14, 0x75}, + {0x5e15, 0x75}, + {0x5e16, 0x75}, + {0x5e17, 0x75}, + {0x5e18, 0x75}, + {0x5e19, 0x75}, + {0x5e1a, 0x75}, + {0x5e1b, 0x75}, + {0x5e1c, 0x75}, + {0x5e1d, 0x75}, + {0x5e1e, 0x75}, + {0x5e1f, 0x75}, + {0x5e20, 0x75}, + {0x5e21, 0x75}, + {0x5e22, 0x75}, + {0x5e23, 0x75}, + {0x5e24, 0x75}, + {0x5e25, 0x75}, + {0x5e26, 0x75}, + {0x5e27, 0x75}, + {0x5e28, 0x75}, + {0x5e29, 0x75}, + {0x5e2a, 0x75}, + {0x5e2b, 0x75}, + {0x5e2c, 0x75}, + {0x5e2d, 0x75}, + {0x5e2e, 0x75}, + {0x5e2f, 0x75}, + {0x5e30, 0x75}, + {0x5e31, 0x75}, + {0x5e32, 0x75}, + {0x5e33, 0x75}, + {0x5e34, 0x75}, + {0x5e35, 0x75}, + {0x5e36, 0x75}, + {0x5e37, 0x75}, + {0x5e38, 0x75}, + {0x5e39, 0x75}, + {0x5e3a, 0x75}, + {0x5e3b, 0x75}, + {0x5e3c, 0x75}, + {0x5e3d, 0x75}, + {0x5e3e, 0x75}, + {0x5e3f, 0x75}, + {0x5e40, 0x75}, + {0x5e41, 0x75}, + {0x5e42, 0x75}, + {0x5e43, 0x75}, + {0x5e44, 0x75}, + {0x5e45, 0x75}, + {0x5e46, 0x75}, + {0x5e47, 0x75}, + {0x5e48, 0x75}, + {0x5e49, 0x75}, + {0x5e4a, 0x75}, + {0x5e4b, 0x75}, + {0x5e4c, 0x75}, + {0x5e4d, 0x75}, + {0x5e4e, 0x75}, + {0x5e4f, 0x75}, + {0x5e50, 0x75}, + {0x5e51, 0x75}, + {0x5e52, 0x75}, + {0x5e53, 0x75}, + {0x5e54, 0x75}, + {0x5e55, 0x75}, + {0x5e56, 0x75}, + {0x5e57, 0x75}, + {0x5e58, 0x75}, + {0x5e59, 0x75}, + {0x5e5a, 0x75}, + {0x5e5b, 0x75}, + {0x5e5c, 0x75}, + {0x5e5d, 0x75}, + {0x5e5e, 0x75}, + {0x5e5f, 0x75}, + {0x5e60, 0x75}, + {0x5e61, 0x75}, + {0x5e62, 0x75}, + {0x5e63, 0x75}, + {0x5e64, 0x75}, + {0x5e65, 0x75}, + {0x5e66, 0x75}, + {0x5e67, 0x75}, + {0x5e68, 0x75}, + {0x5e69, 0x75}, + {0x5e6a, 0x75}, + {0x5e6b, 0x75}, + {0x5e6c, 0x75}, + {0x5e6d, 0x75}, + {0x5e6e, 0x75}, + {0x5e6f, 0x75}, + {0x5e70, 0x75}, + {0x5e71, 0x75}, + {0x5e72, 0x75}, + {0x5e73, 0x75}, + {0x5e74, 0x75}, + {0x5e75, 0x75}, + {0x5e76, 0x75}, + {0x5e77, 0x75}, + {0x5e78, 0x75}, + {0x5e79, 0x75}, + {0x5e7a, 0x75}, + {0x5e7b, 0x75}, + {0x5e7c, 0x75}, + {0x5e7d, 0x75}, + {0x5e7e, 0x75}, + {0x5e7f, 0x75}, + {0x5e80, 0x75}, + {0x5e81, 0x75}, + {0x5e82, 0x75}, + {0x5e83, 0x75}, + {0x5e84, 0x75}, + {0x5e85, 0x75}, + {0x5e86, 0x75}, + {0x5e87, 0x75}, + {0x5e88, 0x75}, + {0x5e89, 0x75}, + {0x5e8a, 0x75}, + {0x5e8b, 0x75}, + {0x5e8c, 0x75}, + {0x5e8d, 0x75}, + {0x5e8e, 0x75}, + {0x5e8f, 0x75}, + {0x5e90, 0x75}, + {0x5e91, 0x75}, + {0x5e92, 0x75}, + {0x5e93, 0x75}, + {0x5e94, 0x75}, + {0x5e95, 0x75}, + {0x5e96, 0x75}, + {0x5e97, 0x75}, + {0x5e98, 0x75}, + {0x5e99, 0x75}, + {0x5e9a, 0x75}, + {0x5e9b, 0x75}, + {0x5e9c, 0x75}, + {0x5e9d, 0x75}, + {0x5e9e, 0x75}, + {0x5e9f, 0x75}, + {0x5ea0, 0x75}, + {0x5ea1, 0x75}, + {0x5ea2, 0x75}, + {0x5ea3, 0x75}, + {0x5ea4, 0x75}, + {0x5ea5, 0x75}, + {0x5ea6, 0x75}, + {0x5ea7, 0x75}, + {0x5ea8, 0x75}, + {0x5ea9, 0x75}, + {0x5eaa, 0x75}, + {0x5eab, 0x75}, + {0x5eac, 0x75}, + {0x5ead, 0x75}, + {0x5eae, 0x75}, + {0x5eaf, 0x75}, + {0x5eb0, 0x75}, + {0x5eb1, 0x75}, + {0x5eb2, 0x75}, + {0x5eb3, 0x75}, + {0x5eb4, 0x75}, + {0x5eb5, 0x75}, + {0x5eb6, 0x75}, + {0x5eb7, 0x75}, + {0x5eb8, 0x75}, + {0x5eb9, 0x75}, + {0x5eba, 0x75}, + {0x5ebb, 0x75}, + {0x5ebc, 0x75}, + {0x5ebd, 0x75}, + {0x5ebe, 0x75}, + {0x5ebf, 0x75}, + {0x5ec0, 0x75}, + {0x5ec1, 0x75}, + {0x5ec2, 0x75}, + {0x5ec3, 0x75}, + {0x5ec4, 0x75}, + {0x5ec5, 0x75}, + {0x5ec6, 0x75}, + {0x5ec7, 0x75}, + {0x5ec8, 0x75}, + {0x5ec9, 0x75}, + {0x5eca, 0x75}, + {0x5ecb, 0x75}, + {0x5ecc, 0x75}, + {0x5ecd, 0x75}, + {0x5ece, 0x75}, + {0x5ecf, 0x75}, + {0x5ed0, 0x75}, + {0x5ed1, 0x75}, + {0x5ed2, 0x75}, + {0x5ed3, 0x75}, + {0x5ed4, 0x75}, + {0x5ed5, 0x75}, + {0x5ed6, 0x75}, + {0x5ed7, 0x75}, + {0x5ed8, 0x75}, + {0x5ed9, 0x75}, + {0x5eda, 0x75}, + {0x5edb, 0x75}, + {0x5edc, 0x75}, + {0x5edd, 0x75}, + {0x5ede, 0x75}, + {0x5edf, 0x75}, + {0x5ee0, 0x75}, + {0x5ee1, 0x75}, + {0x5ee2, 0x75}, + {0x5ee3, 0x75}, + {0x5ee4, 0x75}, + {0x5ee5, 0x75}, + {0x5ee6, 0x75}, + {0x5ee7, 0x75}, + {0x5ee8, 0x75}, + {0x5ee9, 0x75}, + {0x5eea, 0x75}, + {0x5eeb, 0x75}, + {0x5eec, 0x75}, + {0x5eed, 0x75}, + {0x5eee, 0x75}, + {0x5eef, 0x75}, + {0x5ef0, 0x75}, + {0x5ef1, 0x75}, + {0x5ef2, 0x75}, + {0x5ef3, 0x75}, + {0x5ef4, 0x75}, + {0x5ef5, 0x75}, + {0x5ef6, 0x75}, + {0x5ef7, 0x75}, + {0x5ef8, 0x75}, + {0x5ef9, 0x75}, + {0x5efa, 0x75}, + {0x5efb, 0x75}, + {0x5efc, 0x75}, + {0x5efd, 0x75}, + {0x5efe, 0x75}, + {0x5eff, 0x75}, + {0x5f00, 0x75}, + {0x5f01, 0x75}, + {0x5f02, 0x75}, + {0x5f03, 0x75}, + {0x5f04, 0x75}, + {0x5f05, 0x75}, + {0x5f06, 0x75}, + {0x5f07, 0x75}, + {0x5f08, 0x75}, + {0x5f09, 0x75}, + {0x5f0a, 0x75}, + {0x5f0b, 0x75}, + {0x5f0c, 0x75}, + {0x5f0d, 0x75}, + {0x5f0e, 0x75}, + {0x5f0f, 0x75}, + {0x5f10, 0x75}, + {0x5f11, 0x75}, + {0x5f12, 0x75}, + {0x5f13, 0x75}, + {0x5f14, 0x75}, + {0x5f15, 0x75}, + {0x5f16, 0x75}, + {0x5f17, 0x75}, + {0x5f18, 0x75}, + {0x5f19, 0x75}, + {0x5f1a, 0x75}, + {0x5f1b, 0x75}, + {0x5f1c, 0x75}, + {0x5f1d, 0x75}, + {0x5f1e, 0x75}, + {0x5f1f, 0x75}, + {0x3813, 0x0f}, }; -static const struct ov08x40_reg mode_1928x1208_regs[] = { +// OV08X 4C1stg 1928x1088_DPHY150M-2L 30fps +static const struct ov08x40_reg lane_2_mode_1928x1088_regs_1500mbps[] = { {0x5000, 0x55}, {0x5001, 0x00}, + {0x5004, 0x53}, + {0x5110, 0x00}, + {0x5111, 0x14}, + {0x5112, 0x01}, + {0x5113, 0x7b}, + {0x5114, 0x00}, + {0x5152, 0xa3}, + {0x5a52, 0x1f}, + {0x5a1a, 0x0e}, + {0x5a1b, 0x10}, + {0x5a1f, 0x0e}, + {0x5a27, 0x0e}, + {0x6002, 0x2e}, {0x5008, 0xb0}, {0x50c1, 0x00}, {0x53c1, 0x00}, {0x5f40, 0x00}, {0x5f41, 0x40}, {0x0300, 0x3a}, - {0x0301, 0xc8}, + {0x0301, 0x88}, {0x0302, 0x31}, - {0x0303, 0x03}, + {0x0303, 0x05}, {0x0304, 0x01}, - {0x0305, 0xa1}, + {0x0305, 0x38}, {0x0306, 0x04}, - {0x0307, 0x01}, + {0x0307, 0x00}, {0x0308, 0x03}, - {0x0309, 0x03}, + {0x0309, 0x02}, {0x0310, 0x0a}, {0x0311, 0x02}, {0x0312, 0x01}, {0x0313, 0x08}, - {0x0314, 0x66}, + {0x0314, 0x00}, {0x0315, 0x00}, - {0x0316, 0x34}, + {0x0316, 0x2c}, {0x0320, 0x02}, {0x0321, 0x03}, {0x0323, 0x05}, @@ -729,7 +4667,7 @@ static const struct ov08x40_reg mode_1928x1208_regs[] = { {0x198e, 0x00}, {0x198f, 0x01}, {0x3009, 0x04}, - {0x3012, 0x41}, + {0x3012, 0x21}, {0x3015, 0x00}, {0x3016, 0xb0}, {0x3017, 0xf0}, @@ -754,11 +4692,11 @@ static const struct ov08x40_reg mode_1928x1208_regs[] = { {0x3058, 0x80}, {0x3059, 0x00}, {0x3107, 0x86}, - {0x3400, 0x1c}, + {0x3400, 0x30}, {0x3401, 0x80}, {0x3402, 0x8c}, {0x3419, 0x08}, - {0x341a, 0xaf}, + {0x341a, 0x4f}, {0x341b, 0x30}, {0x3420, 0x00}, {0x3421, 0x00}, @@ -858,19 +4796,19 @@ static const struct ov08x40_reg mode_1928x1208_regs[] = { {0x3800, 0x00}, {0x3801, 0x00}, {0x3802, 0x00}, - {0x3803, 0x00}, + {0x3803, 0x78}, {0x3804, 0x0f}, {0x3805, 0x1f}, {0x3806, 0x09}, - {0x3807, 0x7f}, + {0x3807, 0x07}, {0x3808, 0x07}, {0x3809, 0x88}, {0x380a, 0x04}, - {0x380b, 0xb8}, + {0x380b, 0x40}, {0x380c, 0x02}, - {0x380d, 0xd0}, - {0x380e, 0x11}, - {0x380f, 0x5c}, + {0x380d, 0xf0}, + {0x380e, 0x08}, + {0x380f, 0x4e}, {0x3810, 0x00}, {0x3811, 0x04}, {0x3812, 0x00}, @@ -880,7 +4818,7 @@ static const struct ov08x40_reg mode_1928x1208_regs[] = { {0x3820, 0x02}, {0x3821, 0x14}, {0x3822, 0x00}, - {0x3823, 0x04}, + {0x3823, 0x84}, {0x3828, 0x0f}, {0x382a, 0x80}, {0x382e, 0x41}, @@ -895,7 +4833,7 @@ static const struct ov08x40_reg mode_1928x1208_regs[] = { {0x3847, 0x00}, {0x384a, 0x00}, {0x384c, 0x02}, - {0x384d, 0xd0}, + {0x384d, 0xf0}, {0x3856, 0x50}, {0x3857, 0x30}, {0x3858, 0x80}, @@ -909,7 +4847,7 @@ static const struct ov08x40_reg mode_1928x1208_regs[] = { {0x388d, 0x00}, {0x388e, 0x00}, {0x388f, 0x00}, - {0x3894, 0x00}, + {0x3894, 0x03}, {0x3895, 0x00}, {0x3c84, 0x00}, {0x3d85, 0x8b}, @@ -1082,7 +5020,7 @@ static const struct ov08x40_reg mode_1928x1208_regs[] = { {0x4510, 0x00}, {0x4523, 0x00}, {0x4526, 0x00}, - {0x4542, 0x00}, + {0x4542, 0x01}, {0x4543, 0x00}, {0x4544, 0x00}, {0x4545, 0x00}, @@ -1101,9 +5039,86 @@ static const struct ov08x40_reg mode_1928x1208_regs[] = { {0x480c, 0x80}, {0x480f, 0x32}, {0x4813, 0xe4}, - {0x4837, 0x14}, - {0x4850, 0x42}, + {0x4837, 0x0a}, + {0x4850, 0x47}, {0x4884, 0x04}, + {0x4911, 0x00}, + {0x4919, 0x00}, + {0x491a, 0x40}, + {0x4920, 0x04}, + {0x4921, 0x00}, + {0x4922, 0x04}, + {0x4923, 0x00}, + {0x4924, 0x04}, + {0x4925, 0x00}, + {0x4926, 0x04}, + {0x4927, 0x00}, + {0x4930, 0x00}, + {0x4931, 0x00}, + {0x4932, 0x00}, + {0x4933, 0x00}, + {0x4934, 0x00}, + {0x4935, 0x00}, + {0x4936, 0x00}, + {0x4937, 0x00}, + {0x4940, 0x00}, + {0x4941, 0x80}, + {0x4942, 0x00}, + {0x4943, 0x80}, + {0x4944, 0x00}, + {0x4945, 0x80}, + {0x4946, 0x00}, + {0x4947, 0x80}, + {0x4960, 0x00}, + {0x4961, 0x00}, + {0x4962, 0x00}, + {0x4963, 0x00}, + {0x4964, 0x00}, + {0x4965, 0x00}, + {0x4966, 0x00}, + {0x4967, 0x00}, + {0x4968, 0x00}, + {0x4969, 0x00}, + {0x496a, 0x00}, + {0x496b, 0x00}, + {0x496c, 0x00}, + {0x496d, 0x00}, + {0x496e, 0x00}, + {0x496f, 0x00}, + {0x4970, 0x00}, + {0x4971, 0x00}, + {0x4972, 0x00}, + {0x4973, 0x00}, + {0x4974, 0x00}, + {0x4975, 0x00}, + {0x4976, 0x00}, + {0x4977, 0x00}, + {0x4978, 0x00}, + {0x4979, 0x00}, + {0x497a, 0x00}, + {0x497b, 0x00}, + {0x497c, 0x00}, + {0x497d, 0x00}, + {0x497e, 0x00}, + {0x497f, 0x00}, + {0x49e0, 0x00}, + {0x49e1, 0x00}, + {0x49e2, 0x00}, + {0x49e3, 0x00}, + {0x49e4, 0x00}, + {0x49e5, 0x00}, + {0x49e6, 0x00}, + {0x49e7, 0x00}, + {0x49e8, 0x00}, + {0x49e9, 0x80}, + {0x49ea, 0x00}, + {0x49eb, 0x80}, + {0x49ec, 0x00}, + {0x49ed, 0x80}, + {0x49ee, 0x00}, + {0x49ef, 0x80}, + {0x49f0, 0x02}, + {0x49f1, 0x04}, {0x4c00, 0xf8}, {0x4c01, 0x44}, {0x4c03, 0x00}, @@ -1198,10 +5213,15 @@ static const struct ov08x40_reg mode_1928x1208_regs[] = { {0x3407, 0x01}, {0x3404, 0x01}, {0x3500, 0x00}, - {0x3501, 0x08}, + {0x3501, 0x01}, {0x3502, 0x10}, - {0x3508, 0x04}, + {0x3508, 0x0f}, {0x3509, 0x00}, + {0x3541, 0x00}, + {0x3542, 0x80}, + {0x3548, 0x0f}, + {0x3549, 0x00}, + {0x3813, 0x03}, }; static const char * const ov08x40_test_pattern_menu[] = { @@ -1212,8 +5232,16 @@ static const char * const ov08x40_test_pattern_menu[] = { "Vertical Color Bar Type 4" }; +static const char * const ov08x40_module_names[] = { + [MODULE_OTHERS] = "", + [MODULE_BBG802N3] = "BBG802N3", + [MODULE_CJFME55] = "CJFME55", +}; + /* Configurations for supported link frequencies */ #define OV08X40_LINK_FREQ_400MHZ 400000000ULL +#define OV08X40_LINK_FREQ_749MHZ 749000000ULL +#define OV08X40_LINK_FREQ_800MHZ 800000000ULL #define OV08X40_SCLK_96MHZ 96000000ULL #define OV08X40_EXT_CLK 19200000 #define OV08X40_DATA_LANES 4 @@ -1233,6 +5261,8 @@ static u64 link_freq_to_pixel_rate(u64 f) /* Menu items for LINK_FREQ V4L2 control */ static const s64 link_freq_menu_items[] = { OV08X40_LINK_FREQ_400MHZ, + OV08X40_LINK_FREQ_749MHZ, + OV08X40_LINK_FREQ_800MHZ, }; /* Link frequency configs */ @@ -1243,6 +5273,18 @@ static const struct ov08x40_link_freq_config link_freq_configs[] = { .regs = mipi_data_rate_800mbps, } }, + [OV08X40_LINK_FREQ_749MHZ_INDEX] = { + .reg_list = { + .num_of_regs = ARRAY_SIZE(mipi_data_rate_800mbps), + .regs = mipi_data_rate_800mbps, + } + }, + [OV08X40_LINK_FREQ_800MHZ_INDEX] = { + .reg_list = { + .num_of_regs = ARRAY_SIZE(mipi_data_rate_800mbps), + .regs = mipi_data_rate_800mbps, + } + }, }; /* Mode configs */ @@ -1262,6 +5304,21 @@ static const struct ov08x40_mode supported_modes[] = { .exposure_shift = 1, .exposure_margin = OV08X40_EXPOSURE_MAX_MARGIN, }, + { + .width = 3856, + .height = 2176, + .vts_def = OV08X40_VTS_30FPS, + .vts_min = OV08X40_VTS_30FPS, + .llp = 0x10aa, /* in normal mode, tline time = 2 * HTS / SCLK */ + .lanes = 4, + .reg_list = { + .num_of_regs = ARRAY_SIZE(lane_4_mode_3856x2176_regs_800mbps), + .regs = lane_4_mode_3856x2176_regs_800mbps, + }, + .link_freq_index = OV08X40_LINK_FREQ_400MHZ_INDEX, + .exposure_shift = 1, + .exposure_margin = OV08X40_EXPOSURE_MAX_MARGIN, + }, { .width = 1928, .height = 1208, @@ -1277,6 +5334,36 @@ static const struct ov08x40_mode supported_modes[] = { .exposure_shift = 0, .exposure_margin = OV08X40_EXPOSURE_BIN_MAX_MARGIN, }, + { + .width = 3856, + .height = 2176, + .vts_def = OV08X40_VTS_30FPS, + .vts_min = OV08X40_VTS_30FPS, + .llp = 0x10aa, /* in normal mode, tline time = 2 * HTS / SCLK */ + .lanes = 2, + .reg_list = { + .num_of_regs = ARRAY_SIZE(lane_2_mode_3856x2176_regs_1500mbps), + .regs = lane_2_mode_3856x2176_regs_1500mbps, + }, + .link_freq_index = OV08X40_LINK_FREQ_749MHZ_INDEX, + .exposure_shift = 1, + .exposure_margin = OV08X40_EXPOSURE_MAX_MARGIN, + }, + { + .width = 1928, + .height = 1088, + .vts_def = OV08X40_VTS_BIN_30FPS, + .vts_min = OV08X40_VTS_BIN_30FPS, + .llp = 0x960, + .lanes = 2, + .reg_list = { + .num_of_regs = ARRAY_SIZE(lane_2_mode_1928x1088_regs_1500mbps), + .regs = lane_2_mode_1928x1088_regs_1500mbps, + }, + .link_freq_index = OV08X40_LINK_FREQ_749MHZ_INDEX, + .exposure_shift = 0, + .exposure_margin = OV08X40_EXPOSURE_BIN_MAX_MARGIN, + }, }; struct ov08x40 { @@ -1291,9 +5378,20 @@ struct ov08x40 { struct v4l2_ctrl *hblank; struct v4l2_ctrl *exposure; + struct clk *img_clk; + struct regulator *avdd; + struct gpio_desc *reset; + + struct gpio_desc *handshake; + /* Current mode */ const struct ov08x40_mode *cur_mode; + /* MIPI lanes */ + u8 mipi_lanes; + + u8 nr_of_link_freq; + u8 module_name_index; /* Mutex for serialized access */ struct mutex mutex; @@ -1736,10 +5834,24 @@ ov08x40_set_pad_format(struct v4l2_subdev *sd, if (fmt->format.code != MEDIA_BUS_FMT_SGRBG10_1X10) fmt->format.code = MEDIA_BUS_FMT_SGRBG10_1X10; - mode = v4l2_find_nearest_size(supported_modes, + if (ov08x->mipi_lanes == 4) { + if (ov08x->nr_of_link_freq == 1) + mode = &supported_modes[0]; + else if (ov08x->module_name_index == 2) + mode = v4l2_find_nearest_size(supported_modes, + ARRAY_SIZE(supported_modes), + width, height, + fmt->format.width, fmt->format.height); + else + mode = &supported_modes[1]; + } else if (ov08x->mipi_lanes == 2) { + mode = &supported_modes[3]; + } else { + mode = v4l2_find_nearest_size(supported_modes, ARRAY_SIZE(supported_modes), width, height, fmt->format.width, fmt->format.height); + } ov08x40_update_pad_format(mode, fmt); if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { framefmt = v4l2_subdev_state_get_format(sd_state, fmt->pad); @@ -1813,9 +5925,10 @@ static int ov08x40_start_streaming(struct ov08x40 *ov08x) return ret; } + if (ov08x->mipi_lanes == 4 && ov08x->nr_of_link_freq == 1) { /* Use i2c burst to write register on full size registers */ - if (ov08x->cur_mode->exposure_shift == 1) { - ret = ov08x40_burst_fill_regs(ov08x, OV08X40_REG_XTALK_FIRST_A, + if (ov08x->cur_mode->exposure_shift == 1) { + ret = ov08x40_burst_fill_regs(ov08x, OV08X40_REG_XTALK_FIRST_A, OV08X40_REG_XTALK_LAST_A, 0x75); if (ret == 0) ret = ov08x40_burst_fill_regs(ov08x, @@ -1823,6 +5936,7 @@ static int ov08x40_start_streaming(struct ov08x40 *ov08x) OV08X40_REG_XTALK_LAST_B, 0x75); } + } if (ret) { dev_err(&client->dev, "%s failed to set regs\n", __func__); @@ -1883,6 +5997,49 @@ static int ov08x40_set_stream(struct v4l2_subdev *sd, int enable) return ret; } +static void ov08x40_read_mipi_lanes(struct ov08x40 *ov08x40) +{ + struct i2c_client *client = v4l2_get_subdevdata(&ov08x40->sd); + struct mipi_camera_link_ssdb ssdb = {}; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_device *adev = ACPI_COMPANION(&client->dev); + union acpi_object *obj; + acpi_status status; + + if (!adev) { + dev_info(&client->dev, "Not ACPI device\n"); + return; + } + status = acpi_evaluate_object(adev->handle, "SSDB", NULL, &buffer); + if (ACPI_FAILURE(status)) { + dev_info(&client->dev, "ACPI fail: %d\n", -ENODEV); + return; + } + + obj = buffer.pointer; + if (!obj) { + dev_info(&client->dev, "Couldn't locate ACPI buffer\n"); + return; + } + + if (obj->type != ACPI_TYPE_BUFFER) { + dev_info(&client->dev, "Not an ACPI buffer\n"); + goto out_free_buff; + } + + if (obj->buffer.length > sizeof(ssdb)) { + dev_err(&client->dev, "Given buffer is too small\n"); + goto out_free_buff; + } + memcpy(&ssdb, obj->buffer.pointer, obj->buffer.length); + ov08x40->mipi_lanes = ssdb.lanes; + ov08x40->nr_of_link_freq = ssdb.link; + +out_free_buff: + kfree(buffer.pointer); +} + + /* Verify chip ID */ static int ov08x40_identify_module(struct ov08x40 *ov08x) { @@ -1909,6 +6066,50 @@ static int ov08x40_identify_module(struct ov08x40 *ov08x) return 0; } +static int ov08x40_power_off(struct device *dev) +{ + struct v4l2_subdev *sd = dev_get_drvdata(dev); + struct ov08x40 *ov08x40 = to_ov08x40(sd); + + gpiod_set_value_cansleep(ov08x40->reset, 1); + gpiod_set_value_cansleep(ov08x40->handshake, 0); + + if (ov08x40->avdd) + regulator_disable(ov08x40->avdd); + + clk_disable_unprepare(ov08x40->img_clk); + + return 0; +} + +static int ov08x40_power_on(struct device *dev) +{ + struct v4l2_subdev *sd = dev_get_drvdata(dev); + struct ov08x40 *ov08x40 = to_ov08x40(sd); + int ret; + + ret = clk_prepare_enable(ov08x40->img_clk); + if (ret < 0) { + dev_err(dev, "failed to enable imaging clock: %d", ret); + return ret; + } + + if (ov08x40->avdd) { + ret = regulator_enable(ov08x40->avdd); + if (ret < 0) { + dev_err(dev, "failed to enable avdd: %d", ret); + clk_disable_unprepare(ov08x40->img_clk); + return ret; + } + } + + gpiod_set_value_cansleep(ov08x40->handshake, 1); + gpiod_set_value_cansleep(ov08x40->reset, 0); + /* 5ms to wait ready after XSHUTDN assert */ + usleep_range(5000, 5500); + return 0; +} + static const struct v4l2_subdev_video_ops ov08x40_video_ops = { .s_stream = ov08x40_set_stream, }; @@ -2049,6 +6250,41 @@ static void ov08x40_free_controls(struct ov08x40 *ov08x) mutex_destroy(&ov08x->mutex); } +static int ov08x40_get_pm_resources(struct device *dev) +{ + struct v4l2_subdev *sd = dev_get_drvdata(dev); + struct ov08x40 *ov08x = to_ov08x40(sd); + int ret; + + ov08x->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(ov08x->reset)) + return dev_err_probe(dev, PTR_ERR(ov08x->reset), + "failed to get reset gpio\n"); + + ov08x->img_clk = devm_clk_get_optional(dev, NULL); + if (IS_ERR(ov08x->img_clk)) + return dev_err_probe(dev, PTR_ERR(ov08x->img_clk), + "failed to get imaging clock\n"); + + ov08x->avdd = devm_regulator_get_optional(dev, "avdd"); + if (IS_ERR(ov08x->avdd)) { + ret = PTR_ERR(ov08x->avdd); + ov08x->avdd = NULL; + if (ret != -ENODEV) + return dev_err_probe(dev, ret, + "failed to get avdd regulator\n"); + } + + ov08x->handshake = devm_gpiod_get_optional(dev, "handshake", + GPIOD_OUT_LOW); + if (IS_ERR(ov08x->handshake)) + return dev_err_probe(dev, PTR_ERR(ov08x->handshake), + "failed to get handshake gpio\n"); + + + return 0; +} + static int ov08x40_check_hwcfg(struct device *dev) { struct v4l2_fwnode_endpoint bus_cfg = { @@ -2063,6 +6299,10 @@ static int ov08x40_check_hwcfg(struct device *dev) if (!fwnode) return -ENXIO; + ep = fwnode_graph_get_next_endpoint(fwnode, NULL); + if (!ep) + return -EPROBE_DEFER; + ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency", &ext_clk); if (ret) { @@ -2085,13 +6325,6 @@ static int ov08x40_check_hwcfg(struct device *dev) if (ret) return ret; - if (bus_cfg.bus.mipi_csi2.num_data_lanes != OV08X40_DATA_LANES) { - dev_err(dev, "number of CSI2 data lanes %d is not supported", - bus_cfg.bus.mipi_csi2.num_data_lanes); - ret = -EINVAL; - goto out_err; - } - if (!bus_cfg.nr_of_link_frequencies) { dev_err(dev, "no link frequencies defined"); ret = -EINVAL; @@ -2106,10 +6339,12 @@ static int ov08x40_check_hwcfg(struct device *dev) } if (j == bus_cfg.nr_of_link_frequencies) { - dev_err(dev, "no link frequency %lld supported", - link_freq_menu_items[i]); - ret = -EINVAL; - goto out_err; + if (bus_cfg.nr_of_link_frequencies == 3) { + dev_err(dev, "no link frequency %lld supported", + link_freq_menu_items[i]); + ret = -EINVAL; + goto out_err; + } } } @@ -2119,12 +6354,45 @@ static int ov08x40_check_hwcfg(struct device *dev) return ret; } +static int ov08x40_read_module_name(struct ov08x40 *ov08x) +{ + struct i2c_client *client = v4l2_get_subdevdata(&ov08x->sd); + struct device *dev = &client->dev; + union acpi_object *obj; + struct acpi_device *adev = ACPI_COMPANION(dev); + int i; + + ov08x->module_name_index = 0; + if (!adev) + return 0; + + obj = acpi_evaluate_dsm_typed(adev->handle, + &cio2_sensor_module_guid, 0x00, + 0x01, NULL, ACPI_TYPE_STRING); + if (!obj) + return 0; + + dev_dbg(dev, "module name: %s", obj->string.pointer); + for (i = 1; i < ARRAY_SIZE(ov08x40_module_names); i++) { + if (!strcmp(ov08x40_module_names[i], obj->string.pointer)) { + ov08x->module_name_index = i; + break; + } + } + ACPI_FREE(obj); + + return 0; +} static int ov08x40_probe(struct i2c_client *client) { struct ov08x40 *ov08x; int ret; bool full_power; + ov08x = devm_kzalloc(&client->dev, sizeof(*ov08x), GFP_KERNEL); + if (!ov08x) + return -ENOMEM; + /* Check HW config */ ret = ov08x40_check_hwcfg(&client->dev); if (ret) { @@ -2132,25 +6400,38 @@ static int ov08x40_probe(struct i2c_client *client) return ret; } - ov08x = devm_kzalloc(&client->dev, sizeof(*ov08x), GFP_KERNEL); - if (!ov08x) - return -ENOMEM; - /* Initialize subdev */ v4l2_i2c_subdev_init(&ov08x->sd, client, &ov08x40_subdev_ops); + ov08x40_get_pm_resources(&client->dev); full_power = acpi_dev_state_d0(&client->dev); if (full_power) { + ret = ov08x40_power_on(&client->dev); + + if (ret) { + dev_err(&client->dev, "failed to power on\n"); + return ret; + } /* Check module identity */ ret = ov08x40_identify_module(ov08x); if (ret) { dev_err(&client->dev, "failed to find sensor: %d\n", ret); - return ret; + goto error_probe_failed; } } + ov08x40_read_module_name(ov08x); + ov08x40_read_mipi_lanes(ov08x); + /* Set default mode to max resolution */ - ov08x->cur_mode = &supported_modes[0]; + if (ov08x->nr_of_link_freq == 1) + ov08x->cur_mode = &supported_modes[0]; + else if (ov08x->module_name_index == 2) + ov08x->cur_mode = &supported_modes[0]; + else + ov08x->cur_mode = &supported_modes[1]; + if (ov08x->mipi_lanes == 2) + ov08x->cur_mode = &supported_modes[3]; ret = ov08x40_init_controls(ov08x); if (ret) @@ -2187,6 +6468,9 @@ static int ov08x40_probe(struct i2c_client *client) error_handler_free: ov08x40_free_controls(ov08x); +error_probe_failed: + ov08x40_power_off(&client->dev); + return ret; } -- 2.34.1 ipu6-drivers-0~git202411190607.0ad49882/patch/v6.11/in-tree-build/000077500000000000000000000000001471702545500232235ustar00rootroot000000000000000001-media-ipu6-Workaround-to-build-PSYS.patch000066400000000000000000000013671471702545500331540ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patch/v6.11/in-tree-buildFrom 8ef0a32a11740260fdfbb08e711df8d6ed515b56 Mon Sep 17 00:00:00 2001 From: Dongcheng Yan Date: Fri, 13 Sep 2024 18:02:37 +0800 Subject: [PATCH 1/5] media: ipu6: Workaround to build PSYS Signed-off-by: Dongcheng Yan --- drivers/media/pci/intel/ipu6/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/media/pci/intel/ipu6/Makefile b/drivers/media/pci/intel/ipu6/Makefile index a821b0a1567f..8a4f4cd7610d 100644 --- a/drivers/media/pci/intel/ipu6/Makefile +++ b/drivers/media/pci/intel/ipu6/Makefile @@ -21,3 +21,4 @@ intel-ipu6-isys-y := ipu6-isys.o \ ipu6-isys-dwc-phy.o obj-$(CONFIG_VIDEO_INTEL_IPU6) += intel-ipu6-isys.o +obj-$(CONFIG_VIDEO_INTEL_IPU6) += psys/ -- 2.43.0 0002-media-i2c-Add-sensors-config.patch000066400000000000000000000104341471702545500317230ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patch/v6.11/in-tree-buildFrom 045f5beb34e0d4af6d9b5961830c8551f161ff16 Mon Sep 17 00:00:00 2001 From: Hao Yao Date: Mon, 25 Mar 2024 14:40:09 +0800 Subject: [PATCH 2/5] media: i2c: Add sensors config Signed-off-by: Hao Yao --- drivers/media/i2c/Kconfig | 71 ++++++++++++++++++++++++++++++++++++++ drivers/media/i2c/Makefile | 8 +++++ 2 files changed, 79 insertions(+) diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig index 8ba096b8ebca..91728019c09d 100644 --- a/drivers/media/i2c/Kconfig +++ b/drivers/media/i2c/Kconfig @@ -127,6 +127,33 @@ config VIDEO_HI847 To compile this driver as a module, choose M here: the module will be called hi847. +config VIDEO_HM11B1 + tristate "Himax HM11B1 sensor support" + help + This is a Video4Linux2 sensor driver for the Himax + HM11B1 camera. + + To compile this driver as a module, choose M here: the + module will be called hm11b1. + +config VIDEO_HM2170 + tristate "Himax HM2170 sensor support" + help + This is a Video4Linux2 sensor driver for the Himax + HM2170 camera. + + To compile this driver as a module, choose M here: the + module will be called hm2170. + +config VIDEO_HM2172 + tristate "Himax HM2172 sensor support" + help + This is a Video4Linux2 sensor driver for the Himax + HM2172 camera. + + To compile this driver as a module, choose M here: the + module will be called hm2172. + config VIDEO_IMX208 tristate "Sony IMX208 sensor support" help @@ -346,6 +373,15 @@ config VIDEO_OV01A10 To compile this driver as a module, choose M here: the module will be called ov01a10. +config VIDEO_OV01A1S + tristate "OmniVision OV01A1S sensor support" + help + This is a Video4Linux2 sensor driver for the OmniVision + OV01A1S camera. + + To compile this driver as a module, choose M here: the + module will be called ov01a1s. + config VIDEO_OV02A10 tristate "OmniVision OV02A10 sensor support" help @@ -355,6 +391,41 @@ config VIDEO_OV02A10 To compile this driver as a module, choose M here: the module will be called ov02a10. +config VIDEO_OV02C10 + tristate "OmniVision OV02C10 sensor support" + help + This is a Video4Linux2 sensor driver for the OmniVision + OV02C10 camera. + + To compile this driver as a module, choose M here: the + module will be called ov02c10. + +config VIDEO_OV02E10 + tristate "OmniVision OV02E10 sensor support" + help + This is a Video4Linux2 sensor driver for the OmniVision + ov02e10 camera. + +config VIDEO_OV05C10 + tristate "OmniVision OV05C10 sensor support" + depends on ACPI || COMPILE_TEST + select V4L2_CCI_I2C + help + This is a Video4Linux2 sensor driver for the OmniVision + OV05C10 camera. + + To compile this driver as a module, choose M here: the + module will be called ov05c10. + +config VIDEO_OV08A10 + tristate "OmniVision OV08A10 sensor support" + help + This is a Video4Linux2 sensor driver for the OmniVision + OV08A10 camera sensor. + + To compile this driver as a module, choose M here: the + module will be called ov08a10. + config VIDEO_OV08D10 tristate "OmniVision OV08D10 sensor support" help diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile index fbb988bd067a..6c210508427b 100644 --- a/drivers/media/i2c/Makefile +++ b/drivers/media/i2c/Makefile @@ -44,6 +44,9 @@ obj-$(CONFIG_VIDEO_GC2145) += gc2145.o obj-$(CONFIG_VIDEO_HI556) += hi556.o obj-$(CONFIG_VIDEO_HI846) += hi846.o obj-$(CONFIG_VIDEO_HI847) += hi847.o +obj-$(CONFIG_VIDEO_HM11B1) += hm11b1.o +obj-$(CONFIG_VIDEO_HM2170) += hm2170.o +obj-$(CONFIG_VIDEO_HM2172) += hm2172.o obj-$(CONFIG_VIDEO_I2C) += video-i2c.o obj-$(CONFIG_VIDEO_IMX208) += imx208.o obj-$(CONFIG_VIDEO_IMX214) += imx214.o @@ -81,7 +84,12 @@ obj-$(CONFIG_VIDEO_MT9V032) += mt9v032.o obj-$(CONFIG_VIDEO_MT9V111) += mt9v111.o obj-$(CONFIG_VIDEO_OG01A1B) += og01a1b.o obj-$(CONFIG_VIDEO_OV01A10) += ov01a10.o +obj-$(CONFIG_VIDEO_OV01A1S) += ov01a1s.o obj-$(CONFIG_VIDEO_OV02A10) += ov02a10.o +obj-$(CONFIG_VIDEO_OV02C10) += ov02c10.o +obj-$(CONFIG_VIDEO_OV02E10) += ov02e10.o +obj-$(CONFIG_VIDEO_OV05C10) += ov05c10.o +obj-$(CONFIG_VIDEO_OV08A10) += ov08a10.o obj-$(CONFIG_VIDEO_OV08D10) += ov08d10.o obj-$(CONFIG_VIDEO_OV08X40) += ov08x40.o obj-$(CONFIG_VIDEO_OV13858) += ov13858.o -- 2.43.0 ipu6-drivers-0~git202411190607.0ad49882/patch/v6.12/000077500000000000000000000000001471702545500205645ustar00rootroot000000000000000003-media-ipu6-Use-module-parameter-to-set-isys-psys-fre.patch000066400000000000000000000043511471702545500340100ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patch/v6.12From cde3265769fe1ad857137367dea4ec16fcb80548 Mon Sep 17 00:00:00 2001 From: Dongcheng Yan Date: Tue, 30 Jul 2024 11:03:10 +0800 Subject: [PATCH 3/5] media: ipu6: Use module parameter to set isys/psys freq Signed-off-by: Hongju Wang Signed-off-by: Dongcheng Yan --- drivers/media/pci/intel/ipu6/ipu6.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/drivers/media/pci/intel/ipu6/ipu6.c b/drivers/media/pci/intel/ipu6/ipu6.c index bbd646378ab3..5a683dbdf03a 100644 --- a/drivers/media/pci/intel/ipu6/ipu6.c +++ b/drivers/media/pci/intel/ipu6/ipu6.c @@ -33,6 +33,14 @@ #include "ipu6-platform-isys-csi2-reg.h" #include "ipu6-platform-regs.h" +static unsigned int isys_freq_override; +module_param(isys_freq_override, uint, 0660); +MODULE_PARM_DESC(isys_freq_override, "Override ISYS freq(mhz)"); + +static unsigned int psys_freq_override; +module_param(psys_freq_override, uint, 0660); +MODULE_PARM_DESC(psys_freq_override, "Override PSYS freq(mhz)"); + #define IPU6_PCI_BAR 0 struct ipu6_cell_program { @@ -387,6 +395,14 @@ ipu6_isys_init(struct pci_dev *pdev, struct device *parent, pdata->base = base; pdata->ipdata = ipdata; + /* Override the isys freq */ + if (isys_freq_override >= BUTTRESS_MIN_FORCE_IS_FREQ && + isys_freq_override <= BUTTRESS_MAX_FORCE_IS_FREQ) { + ctrl->ratio = isys_freq_override / BUTTRESS_IS_FREQ_STEP; + dev_dbg(&pdev->dev, "Override the isys freq:%u(mhz)\n", + isys_freq_override); + } + isys_adev = ipu6_bus_initialize_device(pdev, parent, pdata, ctrl, IPU6_ISYS_NAME); if (IS_ERR(isys_adev)) { @@ -433,6 +449,15 @@ ipu6_psys_init(struct pci_dev *pdev, struct device *parent, pdata->base = base; pdata->ipdata = ipdata; + /* Override the psys freq */ + if (psys_freq_override >= BUTTRESS_MIN_FORCE_PS_FREQ && + psys_freq_override <= BUTTRESS_MAX_FORCE_PS_FREQ) { + ctrl->ratio = psys_freq_override / BUTTRESS_PS_FREQ_STEP; + ctrl->qos_floor = psys_freq_override; + dev_dbg(&pdev->dev, "Override the psys freq:%u(mhz)\n", + psys_freq_override); + } + psys_adev = ipu6_bus_initialize_device(pdev, parent, pdata, ctrl, IPU6_PSYS_NAME); if (IS_ERR(psys_adev)) { -- 2.43.0 0004-media-ipu6-Don-t-disable-ATS-when-CPU-is-not-Meteor-.patch000066400000000000000000000026311471702545500331570ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patch/v6.12From 1b07d0caa6542fe17adf2d9f553e3f72525c5df0 Mon Sep 17 00:00:00 2001 From: Hao Yao Date: Wed, 18 Sep 2024 14:47:40 +0800 Subject: [PATCH 4/5] media: ipu6: Don't disable ATS when CPU is not Meteor Lake IPU6 on Arrow Lake shares the same PCI ID with Meteor Lake, so we can't use PCI ID to distinguish whether the hardware is Meteor Lake. Use boot_cpu_data.x86_vfm instead. Signed-off-by: Hao Yao --- drivers/media/pci/intel/ipu6/ipu6.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/media/pci/intel/ipu6/ipu6.c b/drivers/media/pci/intel/ipu6/ipu6.c index 5a683dbdf03a..00ae55816f9d 100644 --- a/drivers/media/pci/intel/ipu6/ipu6.c +++ b/drivers/media/pci/intel/ipu6/ipu6.c @@ -3,6 +3,8 @@ * Copyright (C) 2013--2024 Intel Corporation */ +#include + #include #include #include @@ -493,8 +495,9 @@ static int ipu6_pci_config_setup(struct pci_dev *dev, u8 hw_ver) int ret; /* disable IPU6 PCI ATS on mtl ES2 */ - if (is_ipu6ep_mtl(hw_ver) && boot_cpu_data.x86_stepping == 0x2 && - pci_ats_supported(dev)) + if ((boot_cpu_data.x86_vfm == INTEL_METEORLAKE || + boot_cpu_data.x86_vfm == INTEL_METEORLAKE_L) && + boot_cpu_data.x86_stepping == 0x2 && pci_ats_supported(dev)) pci_disable_ats(dev); /* No PCI msi capability for IPU6EP */ -- 2.43.0 0005-media-ipu-bridge-Support-ov05c10-sensor.patch000066400000000000000000000016541471702545500312760ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patch/v6.12From 4777258d86079bc5c1da4a50e2d6547c1fcf277c Mon Sep 17 00:00:00 2001 From: Hao Yao Date: Thu, 10 Oct 2024 15:13:52 +0800 Subject: [PATCH 5/5] media: ipu-bridge: Support ov05c10 sensor Signed-off-by: Hao Yao --- drivers/media/pci/intel/ipu-bridge.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/media/pci/intel/ipu-bridge.c b/drivers/media/pci/intel/ipu-bridge.c index a0e9a71580b5..dd868286c02e 100644 --- a/drivers/media/pci/intel/ipu-bridge.c +++ b/drivers/media/pci/intel/ipu-bridge.c @@ -72,6 +72,8 @@ static const struct ipu_sensor_config ipu_supported_sensors[] = { IPU_SENSOR_CONFIG("OVTI02C1", 1, 400000000), /* Omnivision OV02E10 */ IPU_SENSOR_CONFIG("OVTI02E1", 1, 360000000), + /* Omnivision ov05c10 */ + IPU_SENSOR_CONFIG("OVTI05C1", 1, 480000000), /* Omnivision OV08A10 */ IPU_SENSOR_CONFIG("OVTI08A1", 1, 500000000), /* Omnivision OV08x40 */ -- 2.43.0 ipu6-drivers-0~git202411190607.0ad49882/patch/v6.12/in-tree-build/000077500000000000000000000000001471702545500232245ustar00rootroot000000000000000001-media-ipu6-Workaround-to-build-PSYS.patch000066400000000000000000000013671471702545500331550ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patch/v6.12/in-tree-buildFrom 8ef0a32a11740260fdfbb08e711df8d6ed515b56 Mon Sep 17 00:00:00 2001 From: Dongcheng Yan Date: Fri, 13 Sep 2024 18:02:37 +0800 Subject: [PATCH 1/5] media: ipu6: Workaround to build PSYS Signed-off-by: Dongcheng Yan --- drivers/media/pci/intel/ipu6/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/media/pci/intel/ipu6/Makefile b/drivers/media/pci/intel/ipu6/Makefile index a821b0a1567f..8a4f4cd7610d 100644 --- a/drivers/media/pci/intel/ipu6/Makefile +++ b/drivers/media/pci/intel/ipu6/Makefile @@ -21,3 +21,4 @@ intel-ipu6-isys-y := ipu6-isys.o \ ipu6-isys-dwc-phy.o obj-$(CONFIG_VIDEO_INTEL_IPU6) += intel-ipu6-isys.o +obj-$(CONFIG_VIDEO_INTEL_IPU6) += psys/ -- 2.43.0 0002-media-i2c-Add-sensors-config.patch000066400000000000000000000104341471702545500317240ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patch/v6.12/in-tree-buildFrom 045f5beb34e0d4af6d9b5961830c8551f161ff16 Mon Sep 17 00:00:00 2001 From: Hao Yao Date: Mon, 25 Mar 2024 14:40:09 +0800 Subject: [PATCH 2/5] media: i2c: Add sensors config Signed-off-by: Hao Yao --- drivers/media/i2c/Kconfig | 71 ++++++++++++++++++++++++++++++++++++++ drivers/media/i2c/Makefile | 8 +++++ 2 files changed, 79 insertions(+) diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig index 8ba096b8ebca..91728019c09d 100644 --- a/drivers/media/i2c/Kconfig +++ b/drivers/media/i2c/Kconfig @@ -127,6 +127,33 @@ config VIDEO_HI847 To compile this driver as a module, choose M here: the module will be called hi847. +config VIDEO_HM11B1 + tristate "Himax HM11B1 sensor support" + help + This is a Video4Linux2 sensor driver for the Himax + HM11B1 camera. + + To compile this driver as a module, choose M here: the + module will be called hm11b1. + +config VIDEO_HM2170 + tristate "Himax HM2170 sensor support" + help + This is a Video4Linux2 sensor driver for the Himax + HM2170 camera. + + To compile this driver as a module, choose M here: the + module will be called hm2170. + +config VIDEO_HM2172 + tristate "Himax HM2172 sensor support" + help + This is a Video4Linux2 sensor driver for the Himax + HM2172 camera. + + To compile this driver as a module, choose M here: the + module will be called hm2172. + config VIDEO_IMX208 tristate "Sony IMX208 sensor support" help @@ -346,6 +373,15 @@ config VIDEO_OV01A10 To compile this driver as a module, choose M here: the module will be called ov01a10. +config VIDEO_OV01A1S + tristate "OmniVision OV01A1S sensor support" + help + This is a Video4Linux2 sensor driver for the OmniVision + OV01A1S camera. + + To compile this driver as a module, choose M here: the + module will be called ov01a1s. + config VIDEO_OV02A10 tristate "OmniVision OV02A10 sensor support" help @@ -355,6 +391,41 @@ config VIDEO_OV02A10 To compile this driver as a module, choose M here: the module will be called ov02a10. +config VIDEO_OV02C10 + tristate "OmniVision OV02C10 sensor support" + help + This is a Video4Linux2 sensor driver for the OmniVision + OV02C10 camera. + + To compile this driver as a module, choose M here: the + module will be called ov02c10. + +config VIDEO_OV02E10 + tristate "OmniVision OV02E10 sensor support" + help + This is a Video4Linux2 sensor driver for the OmniVision + ov02e10 camera. + +config VIDEO_OV05C10 + tristate "OmniVision OV05C10 sensor support" + depends on ACPI || COMPILE_TEST + select V4L2_CCI_I2C + help + This is a Video4Linux2 sensor driver for the OmniVision + OV05C10 camera. + + To compile this driver as a module, choose M here: the + module will be called ov05c10. + +config VIDEO_OV08A10 + tristate "OmniVision OV08A10 sensor support" + help + This is a Video4Linux2 sensor driver for the OmniVision + OV08A10 camera sensor. + + To compile this driver as a module, choose M here: the + module will be called ov08a10. + config VIDEO_OV08D10 tristate "OmniVision OV08D10 sensor support" help diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile index fbb988bd067a..6c210508427b 100644 --- a/drivers/media/i2c/Makefile +++ b/drivers/media/i2c/Makefile @@ -44,6 +44,9 @@ obj-$(CONFIG_VIDEO_GC2145) += gc2145.o obj-$(CONFIG_VIDEO_HI556) += hi556.o obj-$(CONFIG_VIDEO_HI846) += hi846.o obj-$(CONFIG_VIDEO_HI847) += hi847.o +obj-$(CONFIG_VIDEO_HM11B1) += hm11b1.o +obj-$(CONFIG_VIDEO_HM2170) += hm2170.o +obj-$(CONFIG_VIDEO_HM2172) += hm2172.o obj-$(CONFIG_VIDEO_I2C) += video-i2c.o obj-$(CONFIG_VIDEO_IMX208) += imx208.o obj-$(CONFIG_VIDEO_IMX214) += imx214.o @@ -81,7 +84,12 @@ obj-$(CONFIG_VIDEO_MT9V032) += mt9v032.o obj-$(CONFIG_VIDEO_MT9V111) += mt9v111.o obj-$(CONFIG_VIDEO_OG01A1B) += og01a1b.o obj-$(CONFIG_VIDEO_OV01A10) += ov01a10.o +obj-$(CONFIG_VIDEO_OV01A1S) += ov01a1s.o obj-$(CONFIG_VIDEO_OV02A10) += ov02a10.o +obj-$(CONFIG_VIDEO_OV02C10) += ov02c10.o +obj-$(CONFIG_VIDEO_OV02E10) += ov02e10.o +obj-$(CONFIG_VIDEO_OV05C10) += ov05c10.o +obj-$(CONFIG_VIDEO_OV08A10) += ov08a10.o obj-$(CONFIG_VIDEO_OV08D10) += ov08d10.o obj-$(CONFIG_VIDEO_OV08X40) += ov08x40.o obj-$(CONFIG_VIDEO_OV13858) += ov13858.o -- 2.43.0 ipu6-drivers-0~git202411190607.0ad49882/patch/v6.8/000077500000000000000000000000001471702545500205115ustar00rootroot000000000000000001-media-ov08x40-Add-Tline-calculation-and-handshake-pi.patch000066400000000000000000000271471471702545500333760ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patch/v6.8From a5624eebbdeb756d5d4b9fec9be10edaa3c3fd02 Mon Sep 17 00:00:00 2001 From: Hao Yao Date: Tue, 26 Mar 2024 11:17:06 +0800 Subject: [PATCH 01/12] media: ov08x40: Add Tline calculation and handshake pin support Signed-off-by: Hao Yao --- drivers/media/i2c/ov08x40.c | 203 ++++++++++++++++++++++++++++++------ 1 file changed, 173 insertions(+), 30 deletions(-) diff --git a/drivers/media/i2c/ov08x40.c b/drivers/media/i2c/ov08x40.c index abbb0b774d43..aac42ad2ae3a 100644 --- a/drivers/media/i2c/ov08x40.c +++ b/drivers/media/i2c/ov08x40.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -34,7 +35,7 @@ /* V_TIMING internal */ #define OV08X40_REG_VTS 0x380e -#define OV08X40_VTS_30FPS 0x1388 +#define OV08X40_VTS_30FPS 0x09c4 #define OV08X40_VTS_BIN_30FPS 0x115c #define OV08X40_VTS_MAX 0x7fff @@ -44,8 +45,9 @@ /* Exposure control */ #define OV08X40_REG_EXPOSURE 0x3500 -#define OV08X40_EXPOSURE_MAX_MARGIN 31 -#define OV08X40_EXPOSURE_MIN 1 +#define OV08X40_EXPOSURE_MAX_MARGIN 8 +#define OV08X40_EXPOSURE_BIN_MAX_MARGIN 2 +#define OV08X40_EXPOSURE_MIN 4 #define OV08X40_EXPOSURE_STEP 1 #define OV08X40_EXPOSURE_DEFAULT 0x40 @@ -125,14 +127,17 @@ struct ov08x40_mode { /* V-timing */ u32 vts_def; u32 vts_min; - - /* HTS */ - u32 hts; + /* Line Length Pixels */ + u32 llp; /* Index of Link frequency config to be used */ u32 link_freq_index; /* Default register values */ struct ov08x40_reg_list reg_list; + + /* Exposure calculation */ + u16 exposure_margin; + u16 exposure_shift; }; static const struct ov08x40_reg mipi_data_rate_800mbps[] = { @@ -2354,7 +2359,7 @@ static const char * const ov08x40_test_pattern_menu[] = { /* Configurations for supported link frequencies */ #define OV08X40_LINK_FREQ_400MHZ 400000000ULL - +#define OV08X40_SCLK_96MHZ 96000000ULL #define OV08X40_EXT_CLK 19200000 #define OV08X40_DATA_LANES 4 @@ -2392,26 +2397,30 @@ static const struct ov08x40_mode supported_modes[] = { .height = 2416, .vts_def = OV08X40_VTS_30FPS, .vts_min = OV08X40_VTS_30FPS, - .hts = 640, + .llp = 0x10aa, /* in normal mode, tline time = 2 * HTS / SCLK */ .lanes = 4, .reg_list = { .num_of_regs = ARRAY_SIZE(mode_3856x2416_regs), .regs = mode_3856x2416_regs, }, .link_freq_index = OV08X40_LINK_FREQ_400MHZ_INDEX, + .exposure_shift = 1, + .exposure_margin = OV08X40_EXPOSURE_MAX_MARGIN, }, { .width = 1928, .height = 1208, .vts_def = OV08X40_VTS_BIN_30FPS, .vts_min = OV08X40_VTS_BIN_30FPS, - .hts = 720, + .llp = 0x960, .lanes = 4, .reg_list = { .num_of_regs = ARRAY_SIZE(mode_1928x1208_regs), .regs = mode_1928x1208_regs, }, .link_freq_index = OV08X40_LINK_FREQ_400MHZ_INDEX, + .exposure_shift = 0, + .exposure_margin = OV08X40_EXPOSURE_BIN_MAX_MARGIN, }, }; @@ -2427,6 +2436,15 @@ struct ov08x40 { struct v4l2_ctrl *hblank; struct v4l2_ctrl *exposure; + /* GPIO for reset */ + struct gpio_desc *reset; + /* GPIO for Lattice handshake */ + struct gpio_desc *handshake; + /* regulator */ + struct regulator *avdd; + /* Clock provider */ + struct clk *img_clk; + /* Current mode */ const struct ov08x40_mode *cur_mode; @@ -2664,13 +2682,24 @@ static int ov08x40_set_ctrl(struct v4l2_ctrl *ctrl) struct ov08x40, ctrl_handler); struct i2c_client *client = v4l2_get_subdevdata(&ov08x->sd); s64 max; + int exp; + int fll; int ret = 0; /* Propagate change of current control to all related controls */ switch (ctrl->id) { case V4L2_CID_VBLANK: /* Update max exposure while meeting expected vblanking */ - max = ov08x->cur_mode->height + ctrl->val - OV08X40_EXPOSURE_MAX_MARGIN; + // max = ov08x->cur_mode->height + ctrl->val - OV08X40_EXPOSURE_MAX_MARGIN; + /* + * because in normal mode, 1 HTS = 0.5 tline + * fps = sclk / hts / vts + * so the vts value needs to be double + */ + max = ((ov08x->cur_mode->height + ctrl->val) << + ov08x->cur_mode->exposure_shift) - + ov08x->cur_mode->exposure_margin; + __v4l2_ctrl_modify_range(ov08x->exposure, ov08x->exposure->minimum, max, ov08x->exposure->step, max); @@ -2694,15 +2723,20 @@ static int ov08x40_set_ctrl(struct v4l2_ctrl *ctrl) ret = ov08x40_update_digital_gain(ov08x, ctrl->val); break; case V4L2_CID_EXPOSURE: + exp = (ctrl->val << ov08x->cur_mode->exposure_shift) - + ov08x->cur_mode->exposure_margin; + ret = ov08x40_write_reg(ov08x, OV08X40_REG_EXPOSURE, - OV08X40_REG_VALUE_24BIT, - ctrl->val); + OV08X40_REG_VALUE_24BIT, + exp); break; case V4L2_CID_VBLANK: + fll = ((ov08x->cur_mode->height + ctrl->val) << + ov08x->cur_mode->exposure_shift); + ret = ov08x40_write_reg(ov08x, OV08X40_REG_VTS, OV08X40_REG_VALUE_16BIT, - ov08x->cur_mode->height - + ctrl->val); + fll); break; case V4L2_CID_TEST_PATTERN: ret = ov08x40_enable_test_pattern(ov08x, ctrl->val); @@ -2812,6 +2846,7 @@ ov08x40_set_pad_format(struct v4l2_subdev *sd, s64 h_blank; s64 pixel_rate; s64 link_freq; + u64 steps; mutex_lock(&ov08x->mutex); @@ -2839,13 +2874,22 @@ ov08x40_set_pad_format(struct v4l2_subdev *sd, ov08x->cur_mode->height; vblank_min = ov08x->cur_mode->vts_min - ov08x->cur_mode->height; + + /* + * The frame length line should be aligned to a multiple of 4, + * as provided by the sensor vendor, in normal mode. + */ + steps = mode->exposure_shift == 1 ? 4 : 1; + __v4l2_ctrl_modify_range(ov08x->vblank, vblank_min, OV08X40_VTS_MAX - ov08x->cur_mode->height, - 1, + steps, vblank_def); __v4l2_ctrl_s_ctrl(ov08x->vblank, vblank_def); - h_blank = ov08x->cur_mode->hts; + + h_blank = ov08x->cur_mode->llp - ov08x->cur_mode->width; + __v4l2_ctrl_modify_range(ov08x->hblank, h_blank, h_blank, 1, h_blank); } @@ -2941,6 +2985,51 @@ static int ov08x40_set_stream(struct v4l2_subdev *sd, int enable) return ret; } +static int ov08x40_power_off(struct device *dev) +{ + struct v4l2_subdev *sd = dev_get_drvdata(dev); + struct ov08x40 *ov08x40 = to_ov08x40(sd); + int ret = 0; + + gpiod_set_value_cansleep(ov08x40->reset, 1); + gpiod_set_value_cansleep(ov08x40->handshake, 0); + if (ov08x40->avdd) + ret = regulator_disable(ov08x40->avdd); + clk_disable_unprepare(ov08x40->img_clk); + + return ret; +} + +static int ov08x40_power_on(struct device *dev) +{ + struct v4l2_subdev *sd = dev_get_drvdata(dev); + struct ov08x40 *ov08x40 = to_ov08x40(sd); + int ret; + + ret = clk_prepare_enable(ov08x40->img_clk); + if (ret < 0) { + dev_err(dev, "failed to enable imaging clock: %d", ret); + return ret; + } + if (ov08x40->avdd) { + ret = regulator_enable(ov08x40->avdd); + if (ret < 0) { + dev_err(dev, "failed to enable avdd: %d", ret); + clk_disable_unprepare(ov08x40->img_clk); + return ret; + } + } + gpiod_set_value_cansleep(ov08x40->handshake, 1); + gpiod_set_value_cansleep(ov08x40->reset, 0); + + /* Lattice MIPI aggregator with some version FW needs longer delay + after handshake triggered. We set 25ms as a safe value and wait + for a stable version FW. */ + msleep_interruptible(25); + + return ret; +} + /* Verify chip ID */ static int ov08x40_identify_module(struct ov08x40 *ov08x) { @@ -3035,7 +3124,8 @@ static int ov08x40_init_controls(struct ov08x40 *ov08x) OV08X40_VTS_MAX - mode->height, 1, vblank_def); - hblank = ov08x->cur_mode->hts; + hblank = ov08x->cur_mode->llp - ov08x->cur_mode->width; + ov08x->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov08x40_ctrl_ops, V4L2_CID_HBLANK, hblank, hblank, 1, hblank); @@ -3101,6 +3191,41 @@ static void ov08x40_free_controls(struct ov08x40 *ov08x) mutex_destroy(&ov08x->mutex); } + +static int ov08x40_get_pm_resources(struct device *dev) +{ + struct v4l2_subdev *sd = dev_get_drvdata(dev); + struct ov08x40 *ov08x40 = to_ov08x40(sd); + int ret; + + ov08x40->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(ov08x40->reset)) + return dev_err_probe(dev, PTR_ERR(ov08x40->reset), + "failed to get reset gpio\n"); + + ov08x40->handshake = devm_gpiod_get_optional(dev, "handshake", + GPIOD_OUT_LOW); + if (IS_ERR(ov08x40->handshake)) + return dev_err_probe(dev, PTR_ERR(ov08x40->handshake), + "failed to get handshake gpio\n"); + + ov08x40->img_clk = devm_clk_get_optional(dev, NULL); + if (IS_ERR(ov08x40->img_clk)) + return dev_err_probe(dev, PTR_ERR(ov08x40->img_clk), + "failed to get imaging clock\n"); + + ov08x40->avdd = devm_regulator_get_optional(dev, "avdd"); + if (IS_ERR(ov08x40->avdd)) { + ret = PTR_ERR(ov08x40->avdd); + ov08x40->avdd = NULL; + if (ret != -ENODEV) + return dev_err_probe(dev, ret, + "failed to get avdd regulator\n"); + } + + return 0; +} + static int ov08x40_check_hwcfg(struct device *dev) { struct v4l2_fwnode_endpoint bus_cfg = { @@ -3112,8 +3237,10 @@ static int ov08x40_check_hwcfg(struct device *dev) int ret; u32 ext_clk; - if (!fwnode) - return -ENXIO; + ep = fwnode_graph_get_next_endpoint(fwnode, NULL); + dev_dbg(dev, "fwnode_graph_get_next_endpoint = %d\n", ep); + if (!ep) + return -EPROBE_DEFER; ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency", &ext_clk); @@ -3128,10 +3255,6 @@ static int ov08x40_check_hwcfg(struct device *dev) return -EINVAL; } - ep = fwnode_graph_get_next_endpoint(fwnode, NULL); - if (!ep) - return -ENXIO; - ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg); fwnode_handle_put(ep); if (ret) @@ -3174,6 +3297,7 @@ static int ov08x40_check_hwcfg(struct device *dev) static int ov08x40_probe(struct i2c_client *client) { struct ov08x40 *ov08x; + bool full_power; int ret; /* Check HW config */ @@ -3189,12 +3313,25 @@ static int ov08x40_probe(struct i2c_client *client) /* Initialize subdev */ v4l2_i2c_subdev_init(&ov08x->sd, client, &ov08x40_subdev_ops); + full_power = acpi_dev_state_d0(&client->dev); + if (full_power) { + dev_err(&client->dev, "start full_power\n"); + ret = ov08x40_get_pm_resources(&client->dev); + if (ret) + return ret; + ret = ov08x40_power_on(&client->dev); + if (ret) { + dev_err_probe(&client->dev, ret, + "failed to power on\n"); + goto probe_error_ret; + } - /* Check module identity */ - ret = ov08x40_identify_module(ov08x); - if (ret) { - dev_err(&client->dev, "failed to find sensor: %d\n", ret); - return ret; + /* Check module identity */ + ret = ov08x40_identify_module(ov08x); + if (ret) { + dev_err(&client->dev, "failed to find sensor: %d\n", ret); + return ret; + } } /* Set default mode to max resolution */ @@ -3226,7 +3363,10 @@ static int ov08x40_probe(struct i2c_client *client) * Device is already turned on by i2c-core with ACPI domain PM. * Enable runtime PM and turn off the device. */ - pm_runtime_set_active(&client->dev); + + /* Set the device's state to active if it's in D0 state. */ + if (full_power) + pm_runtime_set_active(&client->dev); pm_runtime_enable(&client->dev); pm_runtime_idle(&client->dev); @@ -3238,6 +3378,8 @@ static int ov08x40_probe(struct i2c_client *client) error_handler_free: ov08x40_free_controls(ov08x); +probe_error_ret: + ov08x40_power_off(&client->dev); return ret; } @@ -3275,6 +3417,7 @@ static struct i2c_driver ov08x40_i2c_driver = { module_i2c_driver(ov08x40_i2c_driver); MODULE_AUTHOR("Jason Chen "); -MODULE_AUTHOR("Shawn Tu"); +MODULE_AUTHOR("Qingwu Zhang "); +MODULE_AUTHOR("Shawn Tu "); MODULE_DESCRIPTION("OmniVision OV08X40 sensor driver"); MODULE_LICENSE("GPL"); -- 2.43.0 0002-media-Add-IPU6-and-supported-sensors-config.patch000066400000000000000000000113151471702545500320410ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patch/v6.8From 768db18beee57328ca99ebaf465ad8d763167405 Mon Sep 17 00:00:00 2001 From: Hao Yao Date: Mon, 25 Mar 2024 14:40:09 +0800 Subject: [PATCH 02/12] media: Add IPU6 and supported sensors config Signed-off-by: Hao Yao --- drivers/media/i2c/Kconfig | 60 ++++++++++++++++++++++++++++++++ drivers/media/i2c/Makefile | 7 ++++ drivers/media/pci/intel/Kconfig | 1 + drivers/media/pci/intel/Makefile | 1 + 4 files changed, 69 insertions(+) diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig index 4c3435921f19..6b5c33bcec7f 100644 --- a/drivers/media/i2c/Kconfig +++ b/drivers/media/i2c/Kconfig @@ -107,6 +107,33 @@ config VIDEO_HI847 To compile this driver as a module, choose M here: the module will be called hi847. +config VIDEO_HM11B1 + tristate "Himax HM11B1 sensor support" + help + This is a Video4Linux2 sensor driver for the Himax + HM11B1 camera. + + To compile this driver as a module, choose M here: the + module will be called hm11b1. + +config VIDEO_HM2170 + tristate "Himax HM2170 sensor support" + help + This is a Video4Linux2 sensor driver for the Himax + HM2170 camera. + + To compile this driver as a module, choose M here: the + module will be called hm2170. + +config VIDEO_HM2172 + tristate "Himax HM2172 sensor support" + help + This is a Video4Linux2 sensor driver for the Himax + HM2172 camera. + + To compile this driver as a module, choose M here: the + module will be called hm2172. + config VIDEO_IMX208 tristate "Sony IMX208 sensor support" help @@ -313,6 +340,15 @@ config VIDEO_OV01A10 To compile this driver as a module, choose M here: the module will be called ov01a10. +config VIDEO_OV01A1S + tristate "OmniVision OV01A1S sensor support" + help + This is a Video4Linux2 sensor driver for the OmniVision + OV01A1S camera. + + To compile this driver as a module, choose M here: the + module will be called ov01a1s. + config VIDEO_OV02A10 tristate "OmniVision OV02A10 sensor support" help @@ -322,6 +358,30 @@ config VIDEO_OV02A10 To compile this driver as a module, choose M here: the module will be called ov02a10. +config VIDEO_OV02C10 + tristate "OmniVision OV02C10 sensor support" + help + This is a Video4Linux2 sensor driver for the OmniVision + OV02C10 camera. + + To compile this driver as a module, choose M here: the + module will be called ov02c10. + +config VIDEO_OV02E10 + tristate "OmniVision OV02E10 sensor support" + help + This is a Video4Linux2 sensor driver for the OmniVision + ov02e10 camera. + +config VIDEO_OV08A10 + tristate "OmniVision OV08A10 sensor support" + help + This is a Video4Linux2 sensor driver for the OmniVision + OV08A10 camera sensor. + + To compile this driver as a module, choose M here: the + module will be called ov08a10. + config VIDEO_OV08D10 tristate "OmniVision OV08D10 sensor support" help diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile index dfbe6448b549..32dd623c6c81 100644 --- a/drivers/media/i2c/Makefile +++ b/drivers/media/i2c/Makefile @@ -42,6 +42,9 @@ obj-$(CONFIG_VIDEO_GC2145) += gc2145.o obj-$(CONFIG_VIDEO_HI556) += hi556.o obj-$(CONFIG_VIDEO_HI846) += hi846.o obj-$(CONFIG_VIDEO_HI847) += hi847.o +obj-$(CONFIG_VIDEO_HM11B1) += hm11b1.o +obj-$(CONFIG_VIDEO_HM2170) += hm2170.o +obj-$(CONFIG_VIDEO_HM2172) += hm2172.o obj-$(CONFIG_VIDEO_I2C) += video-i2c.o obj-$(CONFIG_VIDEO_IMX208) += imx208.o obj-$(CONFIG_VIDEO_IMX214) += imx214.o @@ -76,7 +79,11 @@ obj-$(CONFIG_VIDEO_MT9V032) += mt9v032.o obj-$(CONFIG_VIDEO_MT9V111) += mt9v111.o obj-$(CONFIG_VIDEO_OG01A1B) += og01a1b.o obj-$(CONFIG_VIDEO_OV01A10) += ov01a10.o +obj-$(CONFIG_VIDEO_OV01A1S) += ov01a1s.o obj-$(CONFIG_VIDEO_OV02A10) += ov02a10.o +obj-$(CONFIG_VIDEO_OV02C10) += ov02c10.o +obj-$(CONFIG_VIDEO_OV02E10) += ov02e10.o +obj-$(CONFIG_VIDEO_OV08A10) += ov08a10.o obj-$(CONFIG_VIDEO_OV08D10) += ov08d10.o obj-$(CONFIG_VIDEO_OV08X40) += ov08x40.o obj-$(CONFIG_VIDEO_OV13858) += ov13858.o diff --git a/drivers/media/pci/intel/Kconfig b/drivers/media/pci/intel/Kconfig index ee4684159d3d..04cb3d253486 100644 --- a/drivers/media/pci/intel/Kconfig +++ b/drivers/media/pci/intel/Kconfig @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only source "drivers/media/pci/intel/ipu3/Kconfig" +source "drivers/media/pci/intel/ipu6/Kconfig" source "drivers/media/pci/intel/ivsc/Kconfig" config IPU_BRIDGE diff --git a/drivers/media/pci/intel/Makefile b/drivers/media/pci/intel/Makefile index f199a97e1d78..0ac987a40da5 100644 --- a/drivers/media/pci/intel/Makefile +++ b/drivers/media/pci/intel/Makefile @@ -4,4 +4,5 @@ # obj-$(CONFIG_IPU_BRIDGE) += ipu-bridge.o obj-y += ipu3/ +obj-y += ipu6/ obj-y += ivsc/ -- 2.43.0 ipu6-drivers-0~git202411190607.0ad49882/patch/v6.8/0003-media-ipu-bridge-Support-more-sensors.patch000066400000000000000000000031011471702545500313150ustar00rootroot00000000000000From 4a50a9f94b16b4f88193d9d015114f16b3ea3a1f Mon Sep 17 00:00:00 2001 From: Hao Yao Date: Tue, 26 Mar 2024 10:50:41 +0800 Subject: [PATCH 03/12] media: ipu-bridge: Support more sensors Signed-off-by: Hao Yao --- drivers/media/pci/intel/ipu-bridge.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/drivers/media/pci/intel/ipu-bridge.c b/drivers/media/pci/intel/ipu-bridge.c index f980e3125a7b..2ac59e02b839 100644 --- a/drivers/media/pci/intel/ipu-bridge.c +++ b/drivers/media/pci/intel/ipu-bridge.c @@ -58,8 +58,27 @@ static const struct ipu_sensor_config ipu_supported_sensors[] = { IPU_SENSOR_CONFIG("INT3537", 1, 437000000), /* Omnivision ov13b10 */ IPU_SENSOR_CONFIG("OVTIDB10", 1, 560000000), + IPU_SENSOR_CONFIG("OVTI13B1", 1, 560000000), /* GalaxyCore GC0310 */ IPU_SENSOR_CONFIG("INT0310", 0), + /* Omnivision ov01a10 */ + IPU_SENSOR_CONFIG("OVTI01A0", 1, 400000000), + /* Omnivision ov08x40 */ + IPU_SENSOR_CONFIG("OVTI08F4", 1, 400000000), + /* Himax hm11b1 */ + IPU_SENSOR_CONFIG("HIMX11B1", 1, 384000000), + /* Himax hm2170 */ + IPU_SENSOR_CONFIG("HIMX2170", 1, 384000000), + /* Himax hm2172 */ + IPU_SENSOR_CONFIG("HIMX2172", 1, 384000000), + /* Omnivision ov01a1s */ + IPU_SENSOR_CONFIG("OVTI01AS", 1, 400000000), + /* Omnivision ov02c10 */ + IPU_SENSOR_CONFIG("OVTI02C1", 1, 400000000), + /* Omnivision ov02e10 */ + IPU_SENSOR_CONFIG("OVTI02E1", 1, 360000000), + /* Omnivision ov08a10 */ + IPU_SENSOR_CONFIG("OVTI08A1", 1, 500000000), }; static const struct ipu_property_names prop_names = { -- 2.43.0 0004-ACPI-scan-Defer-enumeration-of-devices-with-a-_DEP-p.patch000066400000000000000000000035621471702545500332620ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patch/v6.8From 9e155aa642bd0f1187229952fb496026e6fcfee2 Mon Sep 17 00:00:00 2001 From: Wentong Wu Date: Wed, 7 Feb 2024 08:59:08 +0800 Subject: [PATCH 04/12] ACPI: scan: Defer enumeration of devices with a _DEP pointing to IVSC device Inside IVSC, switching ownership requires an interface with two different hardware modules, ACE and CSI. The software interface to these modules is based on Intel MEI framework. Usually mei client devices are dynamically created, so the info of consumers depending on mei client devices is not present in the firmware tables. This causes problems with the probe ordering with respect to drivers for consumers of these MEI client devices. But on these camera sensor devices, the ACPI nodes describing the sensors all have a _DEP dependency on the matching MEI bus ACPI device, so adding IVSC MEI bus ACPI device to acpi_honor_dep_ids allows solving the probe-ordering problem by deferring the enumeration of ACPI-devices which have a _DEP dependency on an IVSC mei bus ACPI device. Add INTC10CF, the HID of IVSC MEI bus ACPI device on MTL platform, to acpi_honor_dep_ids. Signed-off-by: Wentong Wu Signed-off-by: Rafael J. Wysocki --- drivers/acpi/scan.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index e6ed1ba91e5c..f32a2c738c8b 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -798,6 +798,7 @@ static const char * const acpi_honor_dep_ids[] = { "INTC1059", /* IVSC (TGL) driver must be loaded to allow i2c access to camera sensors */ "INTC1095", /* IVSC (ADL) driver must be loaded to allow i2c access to camera sensors */ "INTC100A", /* IVSC (RPL) driver must be loaded to allow i2c access to camera sensors */ + "INTC10CF", /* IVSC (MTL) driver must be loaded to allow i2c access to camera sensors */ NULL }; -- 2.43.0 0005-mei-vsc-Unregister-interrupt-handler-for-system-susp.patch000066400000000000000000000137021471702545500342730ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patch/v6.8From afead1790dd3613ca675365447ce09cae75392b7 Mon Sep 17 00:00:00 2001 From: Sakari Ailus Date: Wed, 3 Apr 2024 13:13:41 +0800 Subject: [PATCH 05/12] mei: vsc: Unregister interrupt handler for system suspend Unregister the MEI VSC interrupt handler before system suspend and re-register it at system resume time. This mirrors implementation of other MEI devices. This patch fixes the bug that causes continuous stream of MEI VSC errors after system resume. Fixes: 386a766c4169 ("mei: Add MEI hardware support for IVSC device") Cc: stable@vger.kernel.org # for 6.8 Reported-by: Dominik Brodowski Signed-off-by: Wentong Wu Signed-off-by: Sakari Ailus Acked-by: Tomas Winkler Link: https://lore.kernel.org/r/20240403051341.3534650-2-wentong.wu@intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/platform-vsc.c | 17 ++++++- drivers/misc/mei/vsc-tp.c | 84 +++++++++++++++++++++++---------- drivers/misc/mei/vsc-tp.h | 3 ++ 3 files changed, 78 insertions(+), 26 deletions(-) diff --git a/drivers/misc/mei/platform-vsc.c b/drivers/misc/mei/platform-vsc.c index 8d303c6c0000..8db0fcf24e70 100644 --- a/drivers/misc/mei/platform-vsc.c +++ b/drivers/misc/mei/platform-vsc.c @@ -402,25 +402,40 @@ static int mei_vsc_remove(struct platform_device *pdev) static int mei_vsc_suspend(struct device *dev) { struct mei_device *mei_dev = dev_get_drvdata(dev); + struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev); mei_stop(mei_dev); + mei_disable_interrupts(mei_dev); + + vsc_tp_free_irq(hw->tp); + return 0; } static int mei_vsc_resume(struct device *dev) { struct mei_device *mei_dev = dev_get_drvdata(dev); + struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev); int ret; - ret = mei_restart(mei_dev); + ret = vsc_tp_request_irq(hw->tp); if (ret) return ret; + ret = mei_restart(mei_dev); + if (ret) + goto err_free; + /* start timer if stopped in suspend */ schedule_delayed_work(&mei_dev->timer_work, HZ); return 0; + +err_free: + vsc_tp_free_irq(hw->tp); + + return ret; } static DEFINE_SIMPLE_DEV_PM_OPS(mei_vsc_pm_ops, mei_vsc_suspend, mei_vsc_resume); diff --git a/drivers/misc/mei/vsc-tp.c b/drivers/misc/mei/vsc-tp.c index 55f7db490d3b..dcf4d9bd3293 100644 --- a/drivers/misc/mei/vsc-tp.c +++ b/drivers/misc/mei/vsc-tp.c @@ -93,6 +93,27 @@ static const struct acpi_gpio_mapping vsc_tp_acpi_gpios[] = { {} }; +static irqreturn_t vsc_tp_isr(int irq, void *data) +{ + struct vsc_tp *tp = data; + + atomic_inc(&tp->assert_cnt); + + wake_up(&tp->xfer_wait); + + return IRQ_WAKE_THREAD; +} + +static irqreturn_t vsc_tp_thread_isr(int irq, void *data) +{ + struct vsc_tp *tp = data; + + if (tp->event_notify) + tp->event_notify(tp->event_notify_context); + + return IRQ_HANDLED; +} + /* wakeup firmware and wait for response */ static int vsc_tp_wakeup_request(struct vsc_tp *tp) { @@ -380,6 +401,37 @@ int vsc_tp_register_event_cb(struct vsc_tp *tp, vsc_tp_event_cb_t event_cb, } EXPORT_SYMBOL_NS_GPL(vsc_tp_register_event_cb, VSC_TP); +/** + * vsc_tp_request_irq - request irq for vsc_tp device + * @tp: vsc_tp device handle + */ +int vsc_tp_request_irq(struct vsc_tp *tp) +{ + struct spi_device *spi = tp->spi; + struct device *dev = &spi->dev; + int ret; + + irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY); + ret = request_threaded_irq(spi->irq, vsc_tp_isr, vsc_tp_thread_isr, + IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + dev_name(dev), tp); + if (ret) + return ret; + + return 0; +} +EXPORT_SYMBOL_NS_GPL(vsc_tp_request_irq, VSC_TP); + +/** + * vsc_tp_free_irq - free irq for vsc_tp device + * @tp: vsc_tp device handle + */ +void vsc_tp_free_irq(struct vsc_tp *tp) +{ + free_irq(tp->spi->irq, tp); +} +EXPORT_SYMBOL_NS_GPL(vsc_tp_free_irq, VSC_TP); + /** * vsc_tp_intr_synchronize - synchronize vsc_tp interrupt * @tp: vsc_tp device handle @@ -410,27 +462,6 @@ void vsc_tp_intr_disable(struct vsc_tp *tp) } EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_disable, VSC_TP); -static irqreturn_t vsc_tp_isr(int irq, void *data) -{ - struct vsc_tp *tp = data; - - atomic_inc(&tp->assert_cnt); - - wake_up(&tp->xfer_wait); - - return IRQ_WAKE_THREAD; -} - -static irqreturn_t vsc_tp_thread_isr(int irq, void *data) -{ - struct vsc_tp *tp = data; - - if (tp->event_notify) - tp->event_notify(tp->event_notify_context); - - return IRQ_HANDLED; -} - static int vsc_tp_match_any(struct acpi_device *adev, void *data) { struct acpi_device **__adev = data; @@ -482,10 +513,9 @@ static int vsc_tp_probe(struct spi_device *spi) tp->spi = spi; irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY); - ret = devm_request_threaded_irq(dev, spi->irq, vsc_tp_isr, - vsc_tp_thread_isr, - IRQF_TRIGGER_FALLING | IRQF_ONESHOT, - dev_name(dev), tp); + ret = request_threaded_irq(spi->irq, vsc_tp_isr, vsc_tp_thread_isr, + IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + dev_name(dev), tp); if (ret) return ret; @@ -519,6 +549,8 @@ static int vsc_tp_probe(struct spi_device *spi) err_destroy_lock: mutex_destroy(&tp->mutex); + free_irq(spi->irq, tp); + return ret; } @@ -529,6 +561,8 @@ static void vsc_tp_remove(struct spi_device *spi) platform_device_unregister(tp->pdev); mutex_destroy(&tp->mutex); + + free_irq(spi->irq, tp); } static const struct acpi_device_id vsc_tp_acpi_ids[] = { diff --git a/drivers/misc/mei/vsc-tp.h b/drivers/misc/mei/vsc-tp.h index f9513ddc3e40..14ca195cbddc 100644 --- a/drivers/misc/mei/vsc-tp.h +++ b/drivers/misc/mei/vsc-tp.h @@ -37,6 +37,9 @@ int vsc_tp_xfer(struct vsc_tp *tp, u8 cmd, const void *obuf, size_t olen, int vsc_tp_register_event_cb(struct vsc_tp *tp, vsc_tp_event_cb_t event_cb, void *context); +int vsc_tp_request_irq(struct vsc_tp *tp); +void vsc_tp_free_irq(struct vsc_tp *tp); + void vsc_tp_intr_enable(struct vsc_tp *tp); void vsc_tp_intr_disable(struct vsc_tp *tp); void vsc_tp_intr_synchronize(struct vsc_tp *tp); -- 2.43.0 ipu6-drivers-0~git202411190607.0ad49882/patch/v6.8/0006-mei-vsc-reset-ivsc-during-shutdown.patch000066400000000000000000000037231471702545500307060ustar00rootroot00000000000000From a0816e198d3bb80809e0f1d895b4526ba1050bf3 Mon Sep 17 00:00:00 2001 From: Wentong Wu Date: Fri, 21 Jun 2024 11:02:52 +0800 Subject: [PATCH 06/12] mei: vsc: reset ivsc during shutdown During system shutdown, reset ivsc to have chipset in valid state. Signed-off-by: Wentong Wu --- drivers/misc/mei/vsc-tp.c | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/drivers/misc/mei/vsc-tp.c b/drivers/misc/mei/vsc-tp.c index dcf4d9bd3293..eae5370b8316 100644 --- a/drivers/misc/mei/vsc-tp.c +++ b/drivers/misc/mei/vsc-tp.c @@ -328,12 +328,12 @@ int vsc_tp_rom_xfer(struct vsc_tp *tp, const void *obuf, void *ibuf, size_t len) return ret; } - ret = vsc_tp_dev_xfer(tp, tp->tx_buf, tp->rx_buf, len); + ret = vsc_tp_dev_xfer(tp, tp->tx_buf, ibuf ? tp->rx_buf : NULL, len); if (ret) return ret; if (ibuf) - cpu_to_be32_array(ibuf, tp->rx_buf, words); + be32_to_cpu_array(ibuf, tp->rx_buf, words); return ret; } @@ -346,6 +346,8 @@ void vsc_tp_reset(struct vsc_tp *tp) { disable_irq(tp->spi->irq); + gpiod_set_value_cansleep(tp->resetfw, 1); + msleep(VSC_TP_RESET_PIN_TOGGLE_INTERVAL_MS); /* toggle reset pin */ gpiod_set_value_cansleep(tp->resetfw, 0); msleep(VSC_TP_RESET_PIN_TOGGLE_INTERVAL_MS); @@ -554,6 +556,19 @@ static int vsc_tp_probe(struct spi_device *spi) return ret; } +static void vsc_tp_shutdown(struct spi_device *spi) +{ + struct vsc_tp *tp = spi_get_drvdata(spi); + + platform_device_unregister(tp->pdev); + + mutex_destroy(&tp->mutex); + + vsc_tp_reset(tp); + + free_irq(spi->irq, tp); +} + static void vsc_tp_remove(struct spi_device *spi) { struct vsc_tp *tp = spi_get_drvdata(spi); @@ -577,6 +592,7 @@ MODULE_DEVICE_TABLE(acpi, vsc_tp_acpi_ids); static struct spi_driver vsc_tp_driver = { .probe = vsc_tp_probe, .remove = vsc_tp_remove, + .shutdown = vsc_tp_shutdown, .driver = { .name = "vsc-tp", .acpi_match_table = vsc_tp_acpi_ids, -- 2.43.0 0007-mei-vsc-Don-t-stop-restart-mei-device-during-system-.patch000066400000000000000000000055661471702545500337340ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patch/v6.8From fb88323ce3ebd94952c479fc80faadf0fe7fc27a Mon Sep 17 00:00:00 2001 From: Wentong Wu Date: Mon, 27 May 2024 20:38:35 +0800 Subject: [PATCH 07/12] mei: vsc: Don't stop/restart mei device during system suspend/resume The dynamically created mei client device (mei csi) is used as one V4L2 sub device of the whole video pipeline, and the V4L2 connection graph is built by software node. The mei_stop() and mei_restart() will delete the old mei csi client device and create a new mei client device, which will cause the software node information saved in old mei csi device lost and the whole video pipeline will be broken. Removing mei_stop()/mei_restart() during system suspend/resume can fix the issue above and won't impact hardware actual power saving logic. Fixes: f6085a96c973 ("mei: vsc: Unregister interrupt handler for system suspend") Cc: stable@vger.kernel.org # for 6.8+ Reported-by: Hao Yao Signed-off-by: Wentong Wu Reviewed-by: Sakari Ailus Tested-by: Jason Chen Tested-by: Sakari Ailus Acked-by: Tomas Winkler Link: https://lore.kernel.org/r/20240527123835.522384-1-wentong.wu@intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/platform-vsc.c | 39 +++++++++++++-------------------- 1 file changed, 15 insertions(+), 24 deletions(-) diff --git a/drivers/misc/mei/platform-vsc.c b/drivers/misc/mei/platform-vsc.c index 8db0fcf24e70..5a445f3b96a9 100644 --- a/drivers/misc/mei/platform-vsc.c +++ b/drivers/misc/mei/platform-vsc.c @@ -401,41 +401,32 @@ static int mei_vsc_remove(struct platform_device *pdev) static int mei_vsc_suspend(struct device *dev) { - struct mei_device *mei_dev = dev_get_drvdata(dev); - struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev); + struct mei_device *mei_dev; + int ret = 0; - mei_stop(mei_dev); + mei_dev = dev_get_drvdata(dev); + if (!mei_dev) + return -ENODEV; - mei_disable_interrupts(mei_dev); + mutex_lock(&mei_dev->device_lock); - vsc_tp_free_irq(hw->tp); + if (!mei_write_is_idle(mei_dev)) + ret = -EAGAIN; - return 0; + mutex_unlock(&mei_dev->device_lock); + + return ret; } static int mei_vsc_resume(struct device *dev) { - struct mei_device *mei_dev = dev_get_drvdata(dev); - struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev); - int ret; - - ret = vsc_tp_request_irq(hw->tp); - if (ret) - return ret; - - ret = mei_restart(mei_dev); - if (ret) - goto err_free; + struct mei_device *mei_dev; - /* start timer if stopped in suspend */ - schedule_delayed_work(&mei_dev->timer_work, HZ); + mei_dev = dev_get_drvdata(dev); + if (!mei_dev) + return -ENODEV; return 0; - -err_free: - vsc_tp_free_irq(hw->tp); - - return ret; } static DEFINE_SIMPLE_DEV_PM_OPS(mei_vsc_pm_ops, mei_vsc_suspend, mei_vsc_resume); -- 2.43.0 0008-mei-vsc-Prevent-timeout-error-with-added-delay-post-.patch000066400000000000000000000027571471702545500340160ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patch/v6.8From dfe37854c057a794ed83bc836bb7de0f2c0d738d Mon Sep 17 00:00:00 2001 From: Wentong Wu Date: Tue, 25 Jun 2024 16:10:44 +0800 Subject: [PATCH 08/12] mei: vsc: Prevent timeout error with added delay post-firmware download After completing the firmware download, the firmware requires some time to become functional. This change introduces additional sleep time before the first read operation to prevent a confusing timeout error in vsc_tp_xfer(). Fixes: 566f5ca97680 ("mei: Add transport driver for IVSC device") Cc: stable@vger.kernel.org # for 6.8+ Signed-off-by: Wentong Wu Tested-by: Jason Chen Acked-by: Sakari Ailus Link: https://lore.kernel.org/r/20240625081047.4178494-3-wentong.wu@intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/platform-vsc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/misc/mei/platform-vsc.c b/drivers/misc/mei/platform-vsc.c index 5a445f3b96a9..a59fb5a13bdc 100644 --- a/drivers/misc/mei/platform-vsc.c +++ b/drivers/misc/mei/platform-vsc.c @@ -28,8 +28,8 @@ #define MEI_VSC_MAX_MSG_SIZE 512 -#define MEI_VSC_POLL_DELAY_US (50 * USEC_PER_MSEC) -#define MEI_VSC_POLL_TIMEOUT_US (200 * USEC_PER_MSEC) +#define MEI_VSC_POLL_DELAY_US (100 * USEC_PER_MSEC) +#define MEI_VSC_POLL_TIMEOUT_US (400 * USEC_PER_MSEC) #define mei_dev_to_vsc_hw(dev) ((struct mei_vsc_hw *)((dev)->hw)) -- 2.43.0 0009-media-ivsc-csi-don-t-count-privacy-on-as-error.patch000066400000000000000000000026201471702545500326200ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patch/v6.8From ae82b86ec2162b66a7b07b7209496d971c799907 Mon Sep 17 00:00:00 2001 From: Wentong Wu Date: Mon, 3 Jun 2024 09:44:06 +0800 Subject: [PATCH 09/12] media: ivsc: csi: don't count privacy on as error Prior to the ongoing command privacy is on, it would return -1 to indicate the current privacy status, and the ongoing command would be well executed by firmware as well, so this is not error. This patch changes its behavior to notify privacy on directly by V4L2 privacy control instead of reporting error. Fixes: 29006e196a56 ("media: pci: intel: ivsc: Add CSI submodule") Reported-by: Hao Yao Signed-off-by: Wentong Wu Tested-by: Jason Chen --- drivers/media/pci/intel/ivsc/mei_csi.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/media/pci/intel/ivsc/mei_csi.c b/drivers/media/pci/intel/ivsc/mei_csi.c index 15b905f66ab7..c58826dd5a50 100644 --- a/drivers/media/pci/intel/ivsc/mei_csi.c +++ b/drivers/media/pci/intel/ivsc/mei_csi.c @@ -188,7 +188,11 @@ static int mei_csi_send(struct mei_csi *csi, u8 *buf, size_t len) /* command response status */ ret = csi->cmd_response.status; - if (ret) { + if (ret == -1) { + /* notify privacy on instead of reporting error */ + ret = 0; + v4l2_ctrl_s_ctrl(csi->privacy_ctrl, 1); + } else if (ret) { ret = -EINVAL; goto out; } -- 2.43.0 0010-media-ivsc-csi-add-separate-lock-for-v4l2-control-ha.patch000066400000000000000000000045231471702545500335250ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patch/v6.8From 60955329017e6f64672dde27911f2ff00874c59f Mon Sep 17 00:00:00 2001 From: Wentong Wu Date: Mon, 3 Jun 2024 10:33:28 +0800 Subject: [PATCH 10/12] media: ivsc: csi: add separate lock for v4l2 control handler There're possibilities that privacy status change notification happens in the middle of the ongoing mei command which already takes the command lock, but v4l2_ctrl_s_ctrl() would also need the same lock prior to this patch, so this may results in circular locking problem. This patch adds one dedicated lock for v4l2 control handler to avoid described issue. Reported-by: Hao Yao Signed-off-by: Wentong Wu Tested-by: Jason Chen --- drivers/media/pci/intel/ivsc/mei_csi.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/media/pci/intel/ivsc/mei_csi.c b/drivers/media/pci/intel/ivsc/mei_csi.c index c58826dd5a50..c730865bdd8a 100644 --- a/drivers/media/pci/intel/ivsc/mei_csi.c +++ b/drivers/media/pci/intel/ivsc/mei_csi.c @@ -123,6 +123,8 @@ struct mei_csi { struct v4l2_ctrl_handler ctrl_handler; struct v4l2_ctrl *freq_ctrl; struct v4l2_ctrl *privacy_ctrl; + /* lock for v4l2 controls */ + struct mutex ctrl_lock; unsigned int remote_pad; /* start streaming or not */ int streaming; @@ -615,11 +617,13 @@ static int mei_csi_init_controls(struct mei_csi *csi) u32 max; int ret; + mutex_init(&csi->ctrl_lock); + ret = v4l2_ctrl_handler_init(&csi->ctrl_handler, 2); if (ret) return ret; - csi->ctrl_handler.lock = &csi->lock; + csi->ctrl_handler.lock = &csi->ctrl_lock; max = ARRAY_SIZE(link_freq_menu_items) - 1; csi->freq_ctrl = v4l2_ctrl_new_int_menu(&csi->ctrl_handler, @@ -798,6 +802,7 @@ static int mei_csi_probe(struct mei_cl_device *cldev, err_ctrl_handler: v4l2_ctrl_handler_free(&csi->ctrl_handler); + mutex_destroy(&csi->ctrl_lock); v4l2_async_nf_unregister(&csi->notifier); v4l2_async_nf_cleanup(&csi->notifier); @@ -817,6 +822,7 @@ static void mei_csi_remove(struct mei_cl_device *cldev) v4l2_async_nf_unregister(&csi->notifier); v4l2_async_nf_cleanup(&csi->notifier); v4l2_ctrl_handler_free(&csi->ctrl_handler); + mutex_destroy(&csi->ctrl_lock); v4l2_async_unregister_subdev(&csi->subdev); v4l2_subdev_cleanup(&csi->subdev); media_entity_cleanup(&csi->subdev.entity); -- 2.43.0 0011-media-ivsc-csi-remove-privacy-status-in-struct-mei_c.patch000066400000000000000000000030511471702545500341150ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patch/v6.8From 3739cc06760919adfa44f10dcb6829a95834722a Mon Sep 17 00:00:00 2001 From: Wentong Wu Date: Mon, 3 Jun 2024 10:57:24 +0800 Subject: [PATCH 11/12] media: ivsc: csi: remove privacy status in struct mei_csi The privacy status is maintained by privacy_ctrl, on which all of the privacy status changes will go through, so there is no point in maintaining one more element any more. Reported-by: Hao Yao Signed-off-by: Wentong Wu Tested-by: Jason Chen --- drivers/media/pci/intel/ivsc/mei_csi.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/drivers/media/pci/intel/ivsc/mei_csi.c b/drivers/media/pci/intel/ivsc/mei_csi.c index c730865bdd8a..8a905ba89b57 100644 --- a/drivers/media/pci/intel/ivsc/mei_csi.c +++ b/drivers/media/pci/intel/ivsc/mei_csi.c @@ -136,9 +136,6 @@ struct mei_csi { u32 nr_of_lanes; /* frequency of the CSI-2 link */ u64 link_freq; - - /* privacy status */ - enum ivsc_privacy_status status; }; static const struct v4l2_mbus_framefmt mei_csi_format_mbus_default = { @@ -269,10 +266,9 @@ static void mei_csi_rx(struct mei_cl_device *cldev) switch (notif.cmd_id) { case CSI_PRIVACY_NOTIF: - if (notif.cont.cont < CSI_PRIVACY_MAX) { - csi->status = notif.cont.cont; - v4l2_ctrl_s_ctrl(csi->privacy_ctrl, csi->status); - } + if (notif.cont.cont < CSI_PRIVACY_MAX) + v4l2_ctrl_s_ctrl(csi->privacy_ctrl, + notif.cont.cont == CSI_PRIVACY_ON); break; case CSI_SET_OWNER: case CSI_SET_CONF: -- 2.43.0 ipu6-drivers-0~git202411190607.0ad49882/patch/v6.8/0012-media-Support-ov05c10-camera-sensor.patch000066400000000000000000000043641471702545500305020ustar00rootroot00000000000000From 7a177f98e126861ab36623336e855be1541d3690 Mon Sep 17 00:00:00 2001 From: Dongcheng Yan Date: Thu, 25 Apr 2024 17:33:09 +0800 Subject: [PATCH 12/12] media: Support ov05c10 camera sensor Signed-off-by: Dongcheng Yan --- drivers/media/i2c/Kconfig | 11 +++++++++++ drivers/media/i2c/Makefile | 1 + drivers/media/pci/intel/ipu-bridge.c | 2 ++ 3 files changed, 14 insertions(+) diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig index 6b5c33bcec7f..be1c61deff9d 100644 --- a/drivers/media/i2c/Kconfig +++ b/drivers/media/i2c/Kconfig @@ -373,6 +373,17 @@ config VIDEO_OV02E10 This is a Video4Linux2 sensor driver for the OmniVision ov02e10 camera. +config VIDEO_OV05C10 + tristate "OmniVision OV05C10 sensor support" + depends on ACPI || COMPILE_TEST + select V4L2_CCI_I2C + help + This is a Video4Linux2 sensor driver for the OmniVision + OV05C10 camera. + + To compile this driver as a module, choose M here: the + module will be called ov05c10. + config VIDEO_OV08A10 tristate "OmniVision OV08A10 sensor support" help diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile index 32dd623c6c81..2c1765d5650c 100644 --- a/drivers/media/i2c/Makefile +++ b/drivers/media/i2c/Makefile @@ -83,6 +83,7 @@ obj-$(CONFIG_VIDEO_OV01A1S) += ov01a1s.o obj-$(CONFIG_VIDEO_OV02A10) += ov02a10.o obj-$(CONFIG_VIDEO_OV02C10) += ov02c10.o obj-$(CONFIG_VIDEO_OV02E10) += ov02e10.o +obj-$(CONFIG_VIDEO_OV05C10) += ov05c10.o obj-$(CONFIG_VIDEO_OV08A10) += ov08a10.o obj-$(CONFIG_VIDEO_OV08D10) += ov08d10.o obj-$(CONFIG_VIDEO_OV08X40) += ov08x40.o diff --git a/drivers/media/pci/intel/ipu-bridge.c b/drivers/media/pci/intel/ipu-bridge.c index 2ac59e02b839..f6907740ffdf 100644 --- a/drivers/media/pci/intel/ipu-bridge.c +++ b/drivers/media/pci/intel/ipu-bridge.c @@ -77,6 +77,8 @@ static const struct ipu_sensor_config ipu_supported_sensors[] = { IPU_SENSOR_CONFIG("OVTI02C1", 1, 400000000), /* Omnivision ov02e10 */ IPU_SENSOR_CONFIG("OVTI02E1", 1, 360000000), + /* Omnivision ov05c10 */ + IPU_SENSOR_CONFIG("OVTI05C1", 1, 480000000), /* Omnivision ov08a10 */ IPU_SENSOR_CONFIG("OVTI08A1", 1, 500000000), }; -- 2.43.0 ipu6-drivers-0~git202411190607.0ad49882/patches/000077500000000000000000000000001471702545500203405ustar00rootroot00000000000000ipu6-drivers-0~git202411190607.0ad49882/patches/0001-v6.10-IPU6-headers-used-by-PSYS.patch000066400000000000000000001167641471702545500270240ustar00rootroot00000000000000From cec333719e0f91769395e20ddc166b971327c97c Mon Sep 17 00:00:00 2001 From: Hao Yao Date: Sun, 29 Sep 2024 15:34:50 +0800 Subject: [PATCH] v6.10 IPU6 headers used by PSYS Signed-off-by: Hao Yao --- drivers/media/pci/intel/ipu6/ipu6-bus.h | 58 +++ drivers/media/pci/intel/ipu6/ipu6-buttress.h | 92 +++++ drivers/media/pci/intel/ipu6/ipu6-cpd.h | 105 ++++++ drivers/media/pci/intel/ipu6/ipu6-fw-com.h | 47 +++ drivers/media/pci/intel/ipu6/ipu6-mmu.h | 73 ++++ .../intel/ipu6/ipu6-platform-buttress-regs.h | 226 ++++++++++++ .../media/pci/intel/ipu6/ipu6-platform-regs.h | 179 +++++++++ drivers/media/pci/intel/ipu6/ipu6.h | 342 ++++++++++++++++++ 8 files changed, 1122 insertions(+) create mode 100644 drivers/media/pci/intel/ipu6/ipu6-bus.h create mode 100644 drivers/media/pci/intel/ipu6/ipu6-buttress.h create mode 100644 drivers/media/pci/intel/ipu6/ipu6-cpd.h create mode 100644 drivers/media/pci/intel/ipu6/ipu6-fw-com.h create mode 100644 drivers/media/pci/intel/ipu6/ipu6-mmu.h create mode 100644 drivers/media/pci/intel/ipu6/ipu6-platform-buttress-regs.h create mode 100644 drivers/media/pci/intel/ipu6/ipu6-platform-regs.h create mode 100644 drivers/media/pci/intel/ipu6/ipu6.h diff --git a/drivers/media/pci/intel/ipu6/ipu6-bus.h b/drivers/media/pci/intel/ipu6/ipu6-bus.h new file mode 100644 index 000000000..b26c6aee1 --- /dev/null +++ b/drivers/media/pci/intel/ipu6/ipu6-bus.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2013 - 2024 Intel Corporation */ + +#ifndef IPU6_BUS_H +#define IPU6_BUS_H + +#include +#include +#include +#include +#include +#include +#include + +struct firmware; +struct pci_dev; + +#define IPU6_BUS_NAME IPU6_NAME "-bus" + +struct ipu6_buttress_ctrl; + +struct ipu6_bus_device { + struct auxiliary_device auxdev; + struct auxiliary_driver *auxdrv; + const struct ipu6_auxdrv_data *auxdrv_data; + struct list_head list; + void *pdata; + struct ipu6_mmu *mmu; + struct ipu6_device *isp; + struct ipu6_buttress_ctrl *ctrl; + u64 dma_mask; + const struct firmware *fw; + struct sg_table fw_sgt; + u64 *pkg_dir; + dma_addr_t pkg_dir_dma_addr; + unsigned int pkg_dir_size; +}; + +struct ipu6_auxdrv_data { + irqreturn_t (*isr)(struct ipu6_bus_device *adev); + irqreturn_t (*isr_threaded)(struct ipu6_bus_device *adev); + bool wake_isr_thread; +}; + +#define to_ipu6_bus_device(_dev) \ + container_of(to_auxiliary_dev(_dev), struct ipu6_bus_device, auxdev) +#define auxdev_to_adev(_auxdev) \ + container_of(_auxdev, struct ipu6_bus_device, auxdev) +#define ipu6_bus_get_drvdata(adev) dev_get_drvdata(&(adev)->auxdev.dev) + +struct ipu6_bus_device * +ipu6_bus_initialize_device(struct pci_dev *pdev, struct device *parent, + void *pdata, struct ipu6_buttress_ctrl *ctrl, + char *name); +int ipu6_bus_add_device(struct ipu6_bus_device *adev); +void ipu6_bus_del_devices(struct pci_dev *pdev); + +#endif diff --git a/drivers/media/pci/intel/ipu6/ipu6-buttress.h b/drivers/media/pci/intel/ipu6/ipu6-buttress.h new file mode 100644 index 000000000..9b6f56958 --- /dev/null +++ b/drivers/media/pci/intel/ipu6/ipu6-buttress.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2013--2024 Intel Corporation */ + +#ifndef IPU6_BUTTRESS_H +#define IPU6_BUTTRESS_H + +#include +#include +#include +#include + +struct device; +struct firmware; +struct ipu6_device; +struct ipu6_bus_device; + +#define BUTTRESS_PS_FREQ_STEP 25U +#define BUTTRESS_MIN_FORCE_PS_FREQ (BUTTRESS_PS_FREQ_STEP * 8) +#define BUTTRESS_MAX_FORCE_PS_FREQ (BUTTRESS_PS_FREQ_STEP * 32) + +#define BUTTRESS_IS_FREQ_STEP 25U +#define BUTTRESS_MIN_FORCE_IS_FREQ (BUTTRESS_IS_FREQ_STEP * 8) +#define BUTTRESS_MAX_FORCE_IS_FREQ (BUTTRESS_IS_FREQ_STEP * 22) + +struct ipu6_buttress_ctrl { + u32 freq_ctl, pwr_sts_shift, pwr_sts_mask, pwr_sts_on, pwr_sts_off; + unsigned int ratio; + unsigned int qos_floor; + bool started; +}; + +struct ipu6_buttress_ipc { + struct completion send_complete; + struct completion recv_complete; + u32 nack; + u32 nack_mask; + u32 recv_data; + u32 csr_out; + u32 csr_in; + u32 db0_in; + u32 db0_out; + u32 data0_out; + u32 data0_in; +}; + +struct ipu6_buttress { + struct mutex power_mutex, auth_mutex, cons_mutex, ipc_mutex; + struct ipu6_buttress_ipc cse; + struct ipu6_buttress_ipc ish; + struct list_head constraints; + u32 wdt_cached_value; + bool force_suspend; + u32 ref_clk; +}; + +enum ipu6_buttress_ipc_domain { + IPU6_BUTTRESS_IPC_CSE, + IPU6_BUTTRESS_IPC_ISH, +}; + +struct ipu6_ipc_buttress_bulk_msg { + u32 cmd; + u32 expected_resp; + bool require_resp; + u8 cmd_size; +}; + +int ipu6_buttress_ipc_reset(struct ipu6_device *isp, + struct ipu6_buttress_ipc *ipc); +int ipu6_buttress_map_fw_image(struct ipu6_bus_device *sys, + const struct firmware *fw, + struct sg_table *sgt); +void ipu6_buttress_unmap_fw_image(struct ipu6_bus_device *sys, + struct sg_table *sgt); +int ipu6_buttress_power(struct device *dev, struct ipu6_buttress_ctrl *ctrl, + bool on); +bool ipu6_buttress_get_secure_mode(struct ipu6_device *isp); +int ipu6_buttress_authenticate(struct ipu6_device *isp); +int ipu6_buttress_reset_authentication(struct ipu6_device *isp); +bool ipu6_buttress_auth_done(struct ipu6_device *isp); +int ipu6_buttress_start_tsc_sync(struct ipu6_device *isp); +void ipu6_buttress_tsc_read(struct ipu6_device *isp, u64 *val); +u64 ipu6_buttress_tsc_ticks_to_ns(u64 ticks, const struct ipu6_device *isp); + +irqreturn_t ipu6_buttress_isr(int irq, void *isp_ptr); +irqreturn_t ipu6_buttress_isr_threaded(int irq, void *isp_ptr); +int ipu6_buttress_init(struct ipu6_device *isp); +void ipu6_buttress_exit(struct ipu6_device *isp); +void ipu6_buttress_csi_port_config(struct ipu6_device *isp, + u32 legacy, u32 combo); +void ipu6_buttress_restore(struct ipu6_device *isp); +#endif /* IPU6_BUTTRESS_H */ diff --git a/drivers/media/pci/intel/ipu6/ipu6-cpd.h b/drivers/media/pci/intel/ipu6/ipu6-cpd.h new file mode 100644 index 000000000..e0e4fdeca --- /dev/null +++ b/drivers/media/pci/intel/ipu6/ipu6-cpd.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2015--2024 Intel Corporation */ + +#ifndef IPU6_CPD_H +#define IPU6_CPD_H + +struct ipu6_device; +struct ipu6_bus_device; + +#define IPU6_CPD_SIZE_OF_FW_ARCH_VERSION 7 +#define IPU6_CPD_SIZE_OF_SYSTEM_VERSION 11 +#define IPU6_CPD_SIZE_OF_COMPONENT_NAME 12 + +#define IPU6_CPD_METADATA_EXTN_TYPE_IUNIT 0x10 + +#define IPU6_CPD_METADATA_IMAGE_TYPE_RESERVED 0 +#define IPU6_CPD_METADATA_IMAGE_TYPE_BOOTLOADER 1 +#define IPU6_CPD_METADATA_IMAGE_TYPE_MAIN_FIRMWARE 2 + +#define IPU6_CPD_PKG_DIR_PSYS_SERVER_IDX 0 +#define IPU6_CPD_PKG_DIR_ISYS_SERVER_IDX 1 + +#define IPU6_CPD_PKG_DIR_CLIENT_PG_TYPE 3 + +#define IPU6_CPD_METADATA_HASH_KEY_SIZE 48 +#define IPU6SE_CPD_METADATA_HASH_KEY_SIZE 32 + +struct ipu6_cpd_module_data_hdr { + u32 hdr_len; + u32 endian; + u32 fw_pkg_date; + u32 hive_sdk_date; + u32 compiler_date; + u32 target_platform_type; + u8 sys_ver[IPU6_CPD_SIZE_OF_SYSTEM_VERSION]; + u8 fw_arch_ver[IPU6_CPD_SIZE_OF_FW_ARCH_VERSION]; + u8 rsvd[2]; +} __packed; + +/* + * ipu6_cpd_hdr structure updated as the chksum and + * sub_partition_name is unused on host side + * CSE layout version 1.6 for IPU6SE (hdr_len = 0x10) + * CSE layout version 1.7 for IPU6 (hdr_len = 0x14) + */ +struct ipu6_cpd_hdr { + u32 hdr_mark; + u32 ent_cnt; + u8 hdr_ver; + u8 ent_ver; + u8 hdr_len; +} __packed; + +struct ipu6_cpd_ent { + u8 name[IPU6_CPD_SIZE_OF_COMPONENT_NAME]; + u32 offset; + u32 len; + u8 rsvd[4]; +} __packed; + +struct ipu6_cpd_metadata_cmpnt_hdr { + u32 id; + u32 size; + u32 ver; +} __packed; + +struct ipu6_cpd_metadata_cmpnt { + struct ipu6_cpd_metadata_cmpnt_hdr hdr; + u8 sha2_hash[IPU6_CPD_METADATA_HASH_KEY_SIZE]; + u32 entry_point; + u32 icache_base_offs; + u8 attrs[16]; +} __packed; + +struct ipu6se_cpd_metadata_cmpnt { + struct ipu6_cpd_metadata_cmpnt_hdr hdr; + u8 sha2_hash[IPU6SE_CPD_METADATA_HASH_KEY_SIZE]; + u32 entry_point; + u32 icache_base_offs; + u8 attrs[16]; +} __packed; + +struct ipu6_cpd_metadata_extn { + u32 extn_type; + u32 len; + u32 img_type; + u8 rsvd[16]; +} __packed; + +struct ipu6_cpd_client_pkg_hdr { + u32 prog_list_offs; + u32 prog_list_size; + u32 prog_desc_offs; + u32 prog_desc_size; + u32 pg_manifest_offs; + u32 pg_manifest_size; + u32 prog_bin_offs; + u32 prog_bin_size; +} __packed; + +int ipu6_cpd_create_pkg_dir(struct ipu6_bus_device *adev, const void *src); +void ipu6_cpd_free_pkg_dir(struct ipu6_bus_device *adev); +int ipu6_cpd_validate_cpd_file(struct ipu6_device *isp, const void *cpd_file, + unsigned long cpd_file_size); +#endif /* IPU6_CPD_H */ diff --git a/drivers/media/pci/intel/ipu6/ipu6-fw-com.h b/drivers/media/pci/intel/ipu6/ipu6-fw-com.h new file mode 100644 index 000000000..b02285a3e --- /dev/null +++ b/drivers/media/pci/intel/ipu6/ipu6-fw-com.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2013--2024 Intel Corporation */ + +#ifndef IPU6_FW_COM_H +#define IPU6_FW_COM_H + +struct ipu6_fw_com_context; +struct ipu6_bus_device; + +struct ipu6_fw_syscom_queue_config { + unsigned int queue_size; /* tokens per queue */ + unsigned int token_size; /* bytes per token */ +}; + +#define SYSCOM_BUTTRESS_FW_PARAMS_ISYS_OFFSET 0 + +struct ipu6_fw_com_cfg { + unsigned int num_input_queues; + unsigned int num_output_queues; + struct ipu6_fw_syscom_queue_config *input; + struct ipu6_fw_syscom_queue_config *output; + + unsigned int dmem_addr; + + /* firmware-specific configuration data */ + void *specific_addr; + unsigned int specific_size; + int (*cell_ready)(struct ipu6_bus_device *adev); + void (*cell_start)(struct ipu6_bus_device *adev); + + unsigned int buttress_boot_offset; +}; + +void *ipu6_fw_com_prepare(struct ipu6_fw_com_cfg *cfg, + struct ipu6_bus_device *adev, void __iomem *base); + +int ipu6_fw_com_open(struct ipu6_fw_com_context *ctx); +bool ipu6_fw_com_ready(struct ipu6_fw_com_context *ctx); +int ipu6_fw_com_close(struct ipu6_fw_com_context *ctx); +int ipu6_fw_com_release(struct ipu6_fw_com_context *ctx, unsigned int force); + +void *ipu6_recv_get_token(struct ipu6_fw_com_context *ctx, int q_nbr); +void ipu6_recv_put_token(struct ipu6_fw_com_context *ctx, int q_nbr); +void *ipu6_send_get_token(struct ipu6_fw_com_context *ctx, int q_nbr); +void ipu6_send_put_token(struct ipu6_fw_com_context *ctx, int q_nbr); + +#endif diff --git a/drivers/media/pci/intel/ipu6/ipu6-mmu.h b/drivers/media/pci/intel/ipu6/ipu6-mmu.h new file mode 100644 index 000000000..21cdb0f14 --- /dev/null +++ b/drivers/media/pci/intel/ipu6/ipu6-mmu.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2013--2024 Intel Corporation */ + +#ifndef IPU6_MMU_H +#define IPU6_MMU_H + +#define ISYS_MMID 1 +#define PSYS_MMID 0 + +#include +#include +#include + +struct device; +struct page; +struct ipu6_hw_variants; + +struct ipu6_mmu_info { + struct device *dev; + + u32 *l1_pt; + u32 l1_pt_dma; + u32 **l2_pts; + + u32 *dummy_l2_pt; + u32 dummy_l2_pteval; + void *dummy_page; + u32 dummy_page_pteval; + + dma_addr_t aperture_start; + dma_addr_t aperture_end; + unsigned long pgsize_bitmap; + + spinlock_t lock; /* Serialize access to users */ + struct ipu6_dma_mapping *dmap; +}; + +struct ipu6_mmu { + struct list_head node; + + struct ipu6_mmu_hw *mmu_hw; + unsigned int nr_mmus; + unsigned int mmid; + + phys_addr_t pgtbl; + struct device *dev; + + struct ipu6_dma_mapping *dmap; + struct list_head vma_list; + + struct page *trash_page; + dma_addr_t pci_trash_page; /* IOVA from PCI DMA services (parent) */ + dma_addr_t iova_trash_page; /* IOVA for IPU6 child nodes to use */ + + bool ready; + spinlock_t ready_lock; /* Serialize access to bool ready */ + + void (*tlb_invalidate)(struct ipu6_mmu *mmu); +}; + +struct ipu6_mmu *ipu6_mmu_init(struct device *dev, + void __iomem *base, int mmid, + const struct ipu6_hw_variants *hw); +void ipu6_mmu_cleanup(struct ipu6_mmu *mmu); +int ipu6_mmu_hw_init(struct ipu6_mmu *mmu); +void ipu6_mmu_hw_cleanup(struct ipu6_mmu *mmu); +int ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova, + phys_addr_t paddr, size_t size); +size_t ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova, + size_t size); +phys_addr_t ipu6_mmu_iova_to_phys(struct ipu6_mmu_info *mmu_info, + dma_addr_t iova); +#endif diff --git a/drivers/media/pci/intel/ipu6/ipu6-platform-buttress-regs.h b/drivers/media/pci/intel/ipu6/ipu6-platform-buttress-regs.h new file mode 100644 index 000000000..20f27011d --- /dev/null +++ b/drivers/media/pci/intel/ipu6/ipu6-platform-buttress-regs.h @@ -0,0 +1,226 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2023--2024 Intel Corporation */ + +#ifndef IPU6_PLATFORM_BUTTRESS_REGS_H +#define IPU6_PLATFORM_BUTTRESS_REGS_H + +#include + +/* IS_WORKPOINT_REQ */ +#define IPU6_BUTTRESS_REG_IS_FREQ_CTL 0x34 +/* PS_WORKPOINT_REQ */ +#define IPU6_BUTTRESS_REG_PS_FREQ_CTL 0x38 + +/* should be tuned for real silicon */ +#define IPU6_IS_FREQ_CTL_DEFAULT_RATIO 0x08 +#define IPU6SE_IS_FREQ_CTL_DEFAULT_RATIO 0x0a +#define IPU6_PS_FREQ_CTL_DEFAULT_RATIO 0x0d + +#define IPU6_IS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO 0x10 +#define IPU6_PS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO 0x0708 + +#define IPU6_BUTTRESS_PWR_STATE_IS_PWR_SHIFT 3 +#define IPU6_BUTTRESS_PWR_STATE_IS_PWR_MASK GENMASK(4, 3) + +#define IPU6_BUTTRESS_PWR_STATE_PS_PWR_SHIFT 6 +#define IPU6_BUTTRESS_PWR_STATE_PS_PWR_MASK GENMASK(7, 6) + +#define IPU6_BUTTRESS_PWR_STATE_DN_DONE 0x0 +#define IPU6_BUTTRESS_PWR_STATE_UP_PROCESS 0x1 +#define IPU6_BUTTRESS_PWR_STATE_DN_PROCESS 0x2 +#define IPU6_BUTTRESS_PWR_STATE_UP_DONE 0x3 + +#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_0 0x270 +#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_1 0x274 +#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_2 0x278 +#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_3 0x27c +#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_4 0x280 +#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_5 0x284 +#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_6 0x288 +#define IPU6_BUTTRESS_REG_FPGA_SUPPORT_7 0x28c + +#define BUTTRESS_REG_WDT 0x8 +#define BUTTRESS_REG_BTRS_CTRL 0xc +#define BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0 BIT(0) +#define BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1 BIT(1) +#define BUTTRESS_REG_BTRS_CTRL_REF_CLK_IND GENMASK(9, 8) + +#define BUTTRESS_REG_FW_RESET_CTL 0x30 +#define BUTTRESS_FW_RESET_CTL_START BIT(0) +#define BUTTRESS_FW_RESET_CTL_DONE BIT(1) + +#define BUTTRESS_REG_IS_FREQ_CTL 0x34 +#define BUTTRESS_REG_PS_FREQ_CTL 0x38 + +#define BUTTRESS_FREQ_CTL_START BIT(31) +#define BUTTRESS_FREQ_CTL_ICCMAX_LEVEL GENMASK(19, 16) +#define BUTTRESS_FREQ_CTL_QOS_FLOOR_MASK GENMASK(15, 8) +#define BUTTRESS_FREQ_CTL_RATIO_MASK GENMASK(7, 0) + +#define BUTTRESS_REG_PWR_STATE 0x5c + +#define BUTTRESS_PWR_STATE_RESET 0x0 +#define BUTTRESS_PWR_STATE_PWR_ON_DONE 0x1 +#define BUTTRESS_PWR_STATE_PWR_RDY 0x3 +#define BUTTRESS_PWR_STATE_PWR_IDLE 0x4 + +#define BUTTRESS_PWR_STATE_HH_STATUS_MASK GENMASK(12, 11) + +enum { + BUTTRESS_PWR_STATE_HH_STATE_IDLE, + BUTTRESS_PWR_STATE_HH_STATE_IN_PRGS, + BUTTRESS_PWR_STATE_HH_STATE_DONE, + BUTTRESS_PWR_STATE_HH_STATE_ERR, +}; + +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_MASK GENMASK(23, 19) + +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_IDLE 0x0 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_PLL_CMP 0x1 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_CLKACK 0x2 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_PG_ACK 0x3 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_RST_ASSRT_CYCLES 0x4 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_STOP_CLK_CYCLES1 0x5 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_STOP_CLK_CYCLES2 0x6 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_RST_DEASSRT_CYCLES 0x7 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_FUSE_WR_CMP 0x8 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_BRK_POINT 0x9 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_IS_RDY 0xa +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_HALT_HALTED 0xb +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_RST_DURATION_CNT3 0xc +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_CLKACK_PD 0xd +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_PD_BRK_POINT 0xe +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_PD_PG_ACK0 0xf + +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_MASK GENMASK(28, 24) + +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_IDLE 0x0 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_PLL_IP_RDY 0x1 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_RO_PRE_CNT_EXH 0x2 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_VGI_PWRGOOD 0x3 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_RO_POST_CNT_EXH 0x4 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WR_PLL_RATIO 0x5 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_PLL_CMP 0x6 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_CLKACK 0x7 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_RST_ASSRT_CYCLES 0x8 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_STOP_CLK_CYCLES1 0x9 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_STOP_CLK_CYCLES2 0xa +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_RST_DEASSRT_CYCLES 0xb +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_PU_BRK_PNT 0xc +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_FUSE_ACCPT 0xd +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_PS_PWR_UP 0xf +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_4_HALTED 0x10 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_RESET_CNT3 0x11 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PD_CLKACK 0x12 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PD_OFF_IND 0x13 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_DVFS_PH4 0x14 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_DVFS_PLL_CMP 0x15 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_DVFS_CLKACK 0x16 + +#define BUTTRESS_REG_SECURITY_CTL 0x300 +#define BUTTRESS_REG_SKU 0x314 +#define BUTTRESS_REG_SECURITY_TOUCH 0x318 +#define BUTTRESS_REG_CAMERA_MASK 0x84 + +#define BUTTRESS_SECURITY_CTL_FW_SECURE_MODE BIT(16) +#define BUTTRESS_SECURITY_CTL_FW_SETUP_MASK GENMASK(4, 0) + +#define BUTTRESS_SECURITY_CTL_FW_SETUP_DONE BIT(0) +#define BUTTRESS_SECURITY_CTL_AUTH_DONE BIT(1) +#define BUTTRESS_SECURITY_CTL_AUTH_FAILED BIT(3) + +#define BUTTRESS_REG_FW_SOURCE_BASE_LO 0x78 +#define BUTTRESS_REG_FW_SOURCE_BASE_HI 0x7C +#define BUTTRESS_REG_FW_SOURCE_SIZE 0x80 + +#define BUTTRESS_REG_ISR_STATUS 0x90 +#define BUTTRESS_REG_ISR_ENABLED_STATUS 0x94 +#define BUTTRESS_REG_ISR_ENABLE 0x98 +#define BUTTRESS_REG_ISR_CLEAR 0x9C + +#define BUTTRESS_ISR_IS_IRQ BIT(0) +#define BUTTRESS_ISR_PS_IRQ BIT(1) +#define BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE BIT(2) +#define BUTTRESS_ISR_IPC_EXEC_DONE_BY_ISH BIT(3) +#define BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING BIT(4) +#define BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING BIT(5) +#define BUTTRESS_ISR_CSE_CSR_SET BIT(6) +#define BUTTRESS_ISR_ISH_CSR_SET BIT(7) +#define BUTTRESS_ISR_SPURIOUS_CMP BIT(8) +#define BUTTRESS_ISR_WATCHDOG_EXPIRED BIT(9) +#define BUTTRESS_ISR_PUNIT_2_IUNIT_IRQ BIT(10) +#define BUTTRESS_ISR_SAI_VIOLATION BIT(11) +#define BUTTRESS_ISR_HW_ASSERTION BIT(12) +#define BUTTRESS_ISR_IS_CORRECTABLE_MEM_ERR BIT(13) +#define BUTTRESS_ISR_IS_FATAL_MEM_ERR BIT(14) +#define BUTTRESS_ISR_IS_NON_FATAL_MEM_ERR BIT(15) +#define BUTTRESS_ISR_PS_CORRECTABLE_MEM_ERR BIT(16) +#define BUTTRESS_ISR_PS_FATAL_MEM_ERR BIT(17) +#define BUTTRESS_ISR_PS_NON_FATAL_MEM_ERR BIT(18) +#define BUTTRESS_ISR_PS_FAST_THROTTLE BIT(19) +#define BUTTRESS_ISR_UFI_ERROR BIT(20) + +#define BUTTRESS_REG_IU2CSEDB0 0x100 + +#define BUTTRESS_IU2CSEDB0_BUSY BIT(31) +#define BUTTRESS_IU2CSEDB0_IPC_CLIENT_ID_VAL 2 + +#define BUTTRESS_REG_IU2CSEDATA0 0x104 + +#define BUTTRESS_IU2CSEDATA0_IPC_BOOT_LOAD 1 +#define BUTTRESS_IU2CSEDATA0_IPC_AUTH_RUN 2 +#define BUTTRESS_IU2CSEDATA0_IPC_AUTH_REPLACE 3 +#define BUTTRESS_IU2CSEDATA0_IPC_UPDATE_SECURE_TOUCH 16 + +#define BUTTRESS_CSE2IUDATA0_IPC_BOOT_LOAD_DONE BIT(0) +#define BUTTRESS_CSE2IUDATA0_IPC_AUTH_RUN_DONE BIT(1) +#define BUTTRESS_CSE2IUDATA0_IPC_AUTH_REPLACE_DONE BIT(2) +#define BUTTRESS_CSE2IUDATA0_IPC_UPDATE_SECURE_TOUCH_DONE BIT(4) + +#define BUTTRESS_REG_IU2CSECSR 0x108 + +#define BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE1 BIT(0) +#define BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE2 BIT(1) +#define BUTTRESS_IU2CSECSR_IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE BIT(2) +#define BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ BIT(3) +#define BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID BIT(4) +#define BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ BIT(5) + +#define BUTTRESS_REG_CSE2IUDB0 0x304 +#define BUTTRESS_REG_CSE2IUCSR 0x30C +#define BUTTRESS_REG_CSE2IUDATA0 0x308 + +/* 0x20 == NACK, 0xf == unknown command */ +#define BUTTRESS_CSE2IUDATA0_IPC_NACK 0xf20 +#define BUTTRESS_CSE2IUDATA0_IPC_NACK_MASK GENMASK(15, 0) + +#define BUTTRESS_REG_ISH2IUCSR 0x50 +#define BUTTRESS_REG_ISH2IUDB0 0x54 +#define BUTTRESS_REG_ISH2IUDATA0 0x58 + +#define BUTTRESS_REG_IU2ISHDB0 0x10C +#define BUTTRESS_REG_IU2ISHDATA0 0x110 +#define BUTTRESS_REG_IU2ISHDATA1 0x114 +#define BUTTRESS_REG_IU2ISHCSR 0x118 + +#define BUTTRESS_REG_FABRIC_CMD 0x88 + +#define BUTTRESS_FABRIC_CMD_START_TSC_SYNC BIT(0) +#define BUTTRESS_FABRIC_CMD_IS_DRAIN BIT(4) + +#define BUTTRESS_REG_TSW_CTL 0x120 +#define BUTTRESS_TSW_CTL_SOFT_RESET BIT(8) + +#define BUTTRESS_REG_TSC_LO 0x164 +#define BUTTRESS_REG_TSC_HI 0x168 + +#define BUTTRESS_IRQS (BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING | \ + BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE | \ + BUTTRESS_ISR_IS_IRQ | BUTTRESS_ISR_PS_IRQ) + +#define BUTTRESS_EVENT (BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING | \ + BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING | \ + BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE | \ + BUTTRESS_ISR_IPC_EXEC_DONE_BY_ISH | \ + BUTTRESS_ISR_SAI_VIOLATION) +#endif /* IPU6_PLATFORM_BUTTRESS_REGS_H */ diff --git a/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h b/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h new file mode 100644 index 000000000..b883385ad --- /dev/null +++ b/drivers/media/pci/intel/ipu6/ipu6-platform-regs.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2018 - 2024 Intel Corporation */ + +#ifndef IPU6_PLATFORM_REGS_H +#define IPU6_PLATFORM_REGS_H + +#include + +/* + * IPU6 uses uniform address within IPU6, therefore all subsystem registers + * locates in one single space starts from 0 but in different sctions with + * different addresses, the subsystem offsets are defined to 0 as the + * register definition will have the address offset to 0. + */ +#define IPU6_UNIFIED_OFFSET 0 + +#define IPU6_ISYS_IOMMU0_OFFSET 0x2e0000 +#define IPU6_ISYS_IOMMU1_OFFSET 0x2e0500 +#define IPU6_ISYS_IOMMUI_OFFSET 0x2e0a00 + +#define IPU6_PSYS_IOMMU0_OFFSET 0x1b0000 +#define IPU6_PSYS_IOMMU1_OFFSET 0x1b0700 +#define IPU6_PSYS_IOMMU1R_OFFSET 0x1b0e00 +#define IPU6_PSYS_IOMMUI_OFFSET 0x1b1500 + +/* the offset from IOMMU base register */ +#define IPU6_MMU_L1_STREAM_ID_REG_OFFSET 0x0c +#define IPU6_MMU_L2_STREAM_ID_REG_OFFSET 0x4c +#define IPU6_PSYS_MMU1W_L2_STREAM_ID_REG_OFFSET 0x8c + +#define IPU6_MMU_INFO_OFFSET 0x8 + +#define IPU6_ISYS_SPC_OFFSET 0x210000 + +#define IPU6SE_PSYS_SPC_OFFSET 0x110000 +#define IPU6_PSYS_SPC_OFFSET 0x118000 + +#define IPU6_ISYS_DMEM_OFFSET 0x200000 +#define IPU6_PSYS_DMEM_OFFSET 0x100000 + +#define IPU6_REG_ISYS_UNISPART_IRQ_EDGE 0x27c000 +#define IPU6_REG_ISYS_UNISPART_IRQ_MASK 0x27c004 +#define IPU6_REG_ISYS_UNISPART_IRQ_STATUS 0x27c008 +#define IPU6_REG_ISYS_UNISPART_IRQ_CLEAR 0x27c00c +#define IPU6_REG_ISYS_UNISPART_IRQ_ENABLE 0x27c010 +#define IPU6_REG_ISYS_UNISPART_IRQ_LEVEL_NOT_PULSE 0x27c014 +#define IPU6_REG_ISYS_UNISPART_SW_IRQ_REG 0x27c414 +#define IPU6_REG_ISYS_UNISPART_SW_IRQ_MUX_REG 0x27c418 +#define IPU6_ISYS_UNISPART_IRQ_CSI0 BIT(2) +#define IPU6_ISYS_UNISPART_IRQ_CSI1 BIT(3) +#define IPU6_ISYS_UNISPART_IRQ_SW BIT(22) + +#define IPU6_REG_ISYS_ISL_TOP_IRQ_EDGE 0x2b0200 +#define IPU6_REG_ISYS_ISL_TOP_IRQ_MASK 0x2b0204 +#define IPU6_REG_ISYS_ISL_TOP_IRQ_STATUS 0x2b0208 +#define IPU6_REG_ISYS_ISL_TOP_IRQ_CLEAR 0x2b020c +#define IPU6_REG_ISYS_ISL_TOP_IRQ_ENABLE 0x2b0210 +#define IPU6_REG_ISYS_ISL_TOP_IRQ_LEVEL_NOT_PULSE 0x2b0214 + +#define IPU6_REG_ISYS_CMPR_TOP_IRQ_EDGE 0x2d2100 +#define IPU6_REG_ISYS_CMPR_TOP_IRQ_MASK 0x2d2104 +#define IPU6_REG_ISYS_CMPR_TOP_IRQ_STATUS 0x2d2108 +#define IPU6_REG_ISYS_CMPR_TOP_IRQ_CLEAR 0x2d210c +#define IPU6_REG_ISYS_CMPR_TOP_IRQ_ENABLE 0x2d2110 +#define IPU6_REG_ISYS_CMPR_TOP_IRQ_LEVEL_NOT_PULSE 0x2d2114 + +/* CDC Burst collector thresholds for isys - 3 FIFOs i = 0..2 */ +#define IPU6_REG_ISYS_CDC_THRESHOLD(i) (0x27c400 + ((i) * 4)) + +#define IPU6_CSI_IRQ_NUM_PER_PIPE 4 +#define IPU6SE_ISYS_CSI_PORT_NUM 4 +#define IPU6_ISYS_CSI_PORT_NUM 8 + +#define IPU6_ISYS_CSI_PORT_IRQ(irq_num) BIT(irq_num) + +/* PKG DIR OFFSET in IMR in secure mode */ +#define IPU6_PKG_DIR_IMR_OFFSET 0x40 + +#define IPU6_ISYS_REG_SPC_STATUS_CTRL 0x0 + +#define IPU6_ISYS_SPC_STATUS_START BIT(1) +#define IPU6_ISYS_SPC_STATUS_RUN BIT(3) +#define IPU6_ISYS_SPC_STATUS_READY BIT(5) +#define IPU6_ISYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE BIT(12) +#define IPU6_ISYS_SPC_STATUS_ICACHE_PREFETCH BIT(13) + +#define IPU6_PSYS_REG_SPC_STATUS_CTRL 0x0 +#define IPU6_PSYS_REG_SPC_START_PC 0x4 +#define IPU6_PSYS_REG_SPC_ICACHE_BASE 0x10 +#define IPU6_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER 0x14 + +#define IPU6_PSYS_SPC_STATUS_START BIT(1) +#define IPU6_PSYS_SPC_STATUS_RUN BIT(3) +#define IPU6_PSYS_SPC_STATUS_READY BIT(5) +#define IPU6_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE BIT(12) +#define IPU6_PSYS_SPC_STATUS_ICACHE_PREFETCH BIT(13) + +#define IPU6_PSYS_REG_SPP0_STATUS_CTRL 0x20000 + +#define IPU6_INFO_ENABLE_SNOOP BIT(0) +#define IPU6_INFO_DEC_FORCE_FLUSH BIT(1) +#define IPU6_INFO_DEC_PASS_THROUGH BIT(2) +#define IPU6_INFO_ZLW BIT(3) +#define IPU6_INFO_REQUEST_DESTINATION_IOSF BIT(9) +#define IPU6_INFO_IMR_BASE BIT(10) +#define IPU6_INFO_IMR_DESTINED BIT(11) + +#define IPU6_INFO_REQUEST_DESTINATION_PRIMARY IPU6_INFO_REQUEST_DESTINATION_IOSF + +/* + * s2m_pixel_soc_pixel_remapping is dedicated for the enabling of the + * pixel s2m remp ability.Remap here means that s2m rearange the order + * of the pixels in each 4 pixels group. + * For examle, mirroring remping means that if input's 4 first pixels + * are 1 2 3 4 then in output we should see 4 3 2 1 in this 4 first pixels. + * 0xE4 is from s2m MAS document. It means no remapping. + */ +#define S2M_PIXEL_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING 0xe4 +/* + * csi_be_soc_pixel_remapping is for the enabling of the pixel remapping. + * This remapping is exactly like the stream2mmio remapping. + */ +#define CSI_BE_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING 0xe4 + +#define IPU6_REG_DMA_TOP_AB_GROUP1_BASE_ADDR 0x1ae000 +#define IPU6_REG_DMA_TOP_AB_GROUP2_BASE_ADDR 0x1af000 +#define IPU6_REG_DMA_TOP_AB_RING_MIN_OFFSET(n) (0x4 + (n) * 0xc) +#define IPU6_REG_DMA_TOP_AB_RING_MAX_OFFSET(n) (0x8 + (n) * 0xc) +#define IPU6_REG_DMA_TOP_AB_RING_ACCESS_OFFSET(n) (0xc + (n) * 0xc) + +enum ipu6_device_ab_group1_target_id { + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R0_SPC_DMEM, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R1_SPC_DMEM, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R2_SPC_DMEM, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R3_SPC_STATUS_REG, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R4_SPC_MASTER_BASE_ADDR, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R5_SPC_PC_STALL, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R6_SPC_EQ, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R7_SPC_RESERVED, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R8_SPC_RESERVED, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R9_SPP0, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R10_SPP1, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R11_CENTRAL_R1, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R12_IRQ, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R13_CENTRAL_R2, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R14_DMA, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R15_DMA, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R16_GP, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R17_ZLW_INSERTER, + IPU6_DEVICE_AB_GROUP1_TARGET_ID_R18_AB, +}; + +enum nci_ab_access_mode { + NCI_AB_ACCESS_MODE_RW, /* read & write */ + NCI_AB_ACCESS_MODE_RO, /* read only */ + NCI_AB_ACCESS_MODE_WO, /* write only */ + NCI_AB_ACCESS_MODE_NA, /* No access at all */ +}; + +/* IRQ-related registers in PSYS */ +#define IPU6_REG_PSYS_GPDEV_IRQ_EDGE 0x1aa200 +#define IPU6_REG_PSYS_GPDEV_IRQ_MASK 0x1aa204 +#define IPU6_REG_PSYS_GPDEV_IRQ_STATUS 0x1aa208 +#define IPU6_REG_PSYS_GPDEV_IRQ_CLEAR 0x1aa20c +#define IPU6_REG_PSYS_GPDEV_IRQ_ENABLE 0x1aa210 +#define IPU6_REG_PSYS_GPDEV_IRQ_LEVEL_NOT_PULSE 0x1aa214 +/* There are 8 FW interrupts, n = 0..7 */ +#define IPU6_PSYS_GPDEV_FWIRQ0 5 +#define IPU6_PSYS_GPDEV_FWIRQ1 6 +#define IPU6_PSYS_GPDEV_FWIRQ2 7 +#define IPU6_PSYS_GPDEV_FWIRQ3 8 +#define IPU6_PSYS_GPDEV_FWIRQ4 9 +#define IPU6_PSYS_GPDEV_FWIRQ5 10 +#define IPU6_PSYS_GPDEV_FWIRQ6 11 +#define IPU6_PSYS_GPDEV_FWIRQ7 12 +#define IPU6_PSYS_GPDEV_IRQ_FWIRQ(n) BIT(n) +#define IPU6_REG_PSYS_GPDEV_FWIRQ(n) (4 * (n) + 0x1aa100) + +#endif /* IPU6_PLATFORM_REGS_H */ diff --git a/drivers/media/pci/intel/ipu6/ipu6.h b/drivers/media/pci/intel/ipu6/ipu6.h new file mode 100644 index 000000000..92e3c3414 --- /dev/null +++ b/drivers/media/pci/intel/ipu6/ipu6.h @@ -0,0 +1,342 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2013 - 2024 Intel Corporation */ + +#ifndef IPU6_H +#define IPU6_H + +#include +#include +#include + +#include "ipu6-buttress.h" + +struct firmware; +struct pci_dev; +struct ipu6_bus_device; + +#define IPU6_NAME "intel-ipu6" +#define IPU6_MEDIA_DEV_MODEL_NAME "ipu6" + +#define IPU6SE_FIRMWARE_NAME "intel/ipu/ipu6se_fw.bin" +#define IPU6EP_FIRMWARE_NAME "intel/ipu/ipu6ep_fw.bin" +#define IPU6_FIRMWARE_NAME "intel/ipu/ipu6_fw.bin" +#define IPU6EPMTL_FIRMWARE_NAME "intel/ipu/ipu6epmtl_fw.bin" +#define IPU6EPADLN_FIRMWARE_NAME "intel/ipu/ipu6epadln_fw.bin" + +enum ipu6_version { + IPU6_VER_INVALID = 0, + IPU6_VER_6 = 1, + IPU6_VER_6SE = 3, + IPU6_VER_6EP = 5, + IPU6_VER_6EP_MTL = 6, +}; + +/* + * IPU6 - TGL + * IPU6SE - JSL + * IPU6EP - ADL/RPL + * IPU6EP_MTL - MTL + */ +static inline bool is_ipu6se(u8 hw_ver) +{ + return hw_ver == IPU6_VER_6SE; +} + +static inline bool is_ipu6ep(u8 hw_ver) +{ + return hw_ver == IPU6_VER_6EP; +} + +static inline bool is_ipu6ep_mtl(u8 hw_ver) +{ + return hw_ver == IPU6_VER_6EP_MTL; +} + +static inline bool is_ipu6_tgl(u8 hw_ver) +{ + return hw_ver == IPU6_VER_6; +} + +/* + * ISYS DMA can overshoot. For higher resolutions over allocation is one line + * but it must be at minimum 1024 bytes. Value could be different in + * different versions / generations thus provide it via platform data. + */ +#define IPU6_ISYS_OVERALLOC_MIN 1024 + +/* Physical pages in GDA is 128, page size is 2K for IPU6, 1K for others */ +#define IPU6_DEVICE_GDA_NR_PAGES 128 + +/* Virtualization factor to calculate the available virtual pages */ +#define IPU6_DEVICE_GDA_VIRT_FACTOR 32 + +struct ipu6_device { + struct pci_dev *pdev; + struct list_head devices; + struct ipu6_bus_device *isys; + struct ipu6_bus_device *psys; + struct ipu6_buttress buttress; + + const struct firmware *cpd_fw; + const char *cpd_fw_name; + u32 cpd_metadata_cmpnt_size; + + void __iomem *base; + bool need_ipc_reset; + bool secure_mode; + u8 hw_ver; + bool bus_ready_to_probe; +}; + +#define IPU6_ISYS_NAME "isys" +#define IPU6_PSYS_NAME "psys" + +#define IPU6_MMU_MAX_DEVICES 4 +#define IPU6_MMU_ADDR_BITS 32 +/* The firmware is accessible within the first 2 GiB only in non-secure mode. */ +#define IPU6_MMU_ADDR_BITS_NON_SECURE 31 + +#define IPU6_MMU_MAX_TLB_L1_STREAMS 32 +#define IPU6_MMU_MAX_TLB_L2_STREAMS 32 +#define IPU6_MAX_LI_BLOCK_ADDR 128 +#define IPU6_MAX_L2_BLOCK_ADDR 64 + +#define IPU6SE_ISYS_NUM_STREAMS IPU6SE_NONSECURE_STREAM_ID_MAX +#define IPU6_ISYS_NUM_STREAMS IPU6_NONSECURE_STREAM_ID_MAX + +/* + * To maximize the IOSF utlization, IPU6 need to send requests in bursts. + * At the DMA interface with the buttress, there are CDC FIFOs with burst + * collection capability. CDC FIFO burst collectors have a configurable + * threshold and is configured based on the outcome of performance measurements. + * + * isys has 3 ports with IOSF interface for VC0, VC1 and VC2 + * psys has 4 ports with IOSF interface for VC0, VC1w, VC1r and VC2 + * + * Threshold values are pre-defined and are arrived at after performance + * evaluations on a type of IPU6 + */ +#define IPU6_MAX_VC_IOSF_PORTS 4 + +/* + * IPU6 must configure correct arbitration mechanism related to the IOSF VC + * requests. There are two options per VC0 and VC1 - > 0 means rearbitrate on + * stall and 1 means stall until the request is completed. + */ +#define IPU6_BTRS_ARB_MODE_TYPE_REARB 0 +#define IPU6_BTRS_ARB_MODE_TYPE_STALL 1 + +/* Currently chosen arbitration mechanism for VC0 */ +#define IPU6_BTRS_ARB_STALL_MODE_VC0 \ + IPU6_BTRS_ARB_MODE_TYPE_REARB + +/* Currently chosen arbitration mechanism for VC1 */ +#define IPU6_BTRS_ARB_STALL_MODE_VC1 \ + IPU6_BTRS_ARB_MODE_TYPE_REARB + +/* + * MMU Invalidation HW bug workaround by ZLW mechanism + * + * Old IPU6 MMUV2 has a bug in the invalidation mechanism which might result in + * wrong translation or replication of the translation. This will cause data + * corruption. So we cannot directly use the MMU V2 invalidation registers + * to invalidate the MMU. Instead, whenever an invalidate is called, we need to + * clear the TLB by evicting all the valid translations by filling it with trash + * buffer (which is guaranteed not to be used by any other processes). ZLW is + * used to fill the L1 and L2 caches with the trash buffer translations. ZLW + * or Zero length write, is pre-fetch mechanism to pre-fetch the pages in + * advance to the L1 and L2 caches without triggering any memory operations. + * + * In MMU V2, L1 -> 16 streams and 64 blocks, maximum 16 blocks per stream + * One L1 block has 16 entries, hence points to 16 * 4K pages + * L2 -> 16 streams and 32 blocks. 2 blocks per streams + * One L2 block maps to 1024 L1 entries, hence points to 4MB address range + * 2 blocks per L2 stream means, 1 stream points to 8MB range + * + * As we need to clear the caches and 8MB being the biggest cache size, we need + * to have trash buffer which points to 8MB address range. As these trash + * buffers are not used for any memory transactions, we need only the least + * amount of physical memory. So we reserve 8MB IOVA address range but only + * one page is reserved from physical memory. Each of this 8MB IOVA address + * range is then mapped to the same physical memory page. + */ +/* One L2 entry maps 1024 L1 entries and one L1 entry per page */ +#define IPU6_MMUV2_L2_RANGE (1024 * PAGE_SIZE) +/* Max L2 blocks per stream */ +#define IPU6_MMUV2_MAX_L2_BLOCKS 2 +/* Max L1 blocks per stream */ +#define IPU6_MMUV2_MAX_L1_BLOCKS 16 +#define IPU6_MMUV2_TRASH_RANGE (IPU6_MMUV2_L2_RANGE * IPU6_MMUV2_MAX_L2_BLOCKS) +/* Entries per L1 block */ +#define MMUV2_ENTRIES_PER_L1_BLOCK 16 +#define MMUV2_TRASH_L1_BLOCK_OFFSET (MMUV2_ENTRIES_PER_L1_BLOCK * PAGE_SIZE) +#define MMUV2_TRASH_L2_BLOCK_OFFSET IPU6_MMUV2_L2_RANGE + +/* + * In some of the IPU6 MMUs, there is provision to configure L1 and L2 page + * table caches. Both these L1 and L2 caches are divided into multiple sections + * called streams. There is maximum 16 streams for both caches. Each of these + * sections are subdivided into multiple blocks. When nr_l1streams = 0 and + * nr_l2streams = 0, means the MMU is of type MMU_V1 and do not support + * L1/L2 page table caches. + * + * L1 stream per block sizes are configurable and varies per usecase. + * L2 has constant block sizes - 2 blocks per stream. + * + * MMU1 support pre-fetching of the pages to have less cache lookup misses. To + * enable the pre-fetching, MMU1 AT (Address Translator) device registers + * need to be configured. + * + * There are four types of memory accesses which requires ZLW configuration. + * ZLW(Zero Length Write) is a mechanism to enable VT-d pre-fetching on IOMMU. + * + * 1. Sequential Access or 1D mode + * Set ZLW_EN -> 1 + * set ZLW_PAGE_CROSS_1D -> 1 + * Set ZLW_N to "N" pages so that ZLW will be inserte N pages ahead where + * N is pre-defined and hardcoded in the platform data + * Set ZLW_2D -> 0 + * + * 2. ZLW 2D mode + * Set ZLW_EN -> 1 + * set ZLW_PAGE_CROSS_1D -> 1, + * Set ZLW_N -> 0 + * Set ZLW_2D -> 1 + * + * 3. ZLW Enable (no 1D or 2D mode) + * Set ZLW_EN -> 1 + * set ZLW_PAGE_CROSS_1D -> 0, + * Set ZLW_N -> 0 + * Set ZLW_2D -> 0 + * + * 4. ZLW disable + * Set ZLW_EN -> 0 + * set ZLW_PAGE_CROSS_1D -> 0, + * Set ZLW_N -> 0 + * Set ZLW_2D -> 0 + * + * To configure the ZLW for the above memory access, four registers are + * available. Hence to track these four settings, we have the following entries + * in the struct ipu6_mmu_hw. Each of these entries are per stream and + * available only for the L1 streams. + * + * a. l1_zlw_en -> To track zlw enabled per stream (ZLW_EN) + * b. l1_zlw_1d_mode -> Track 1D mode per stream. ZLW inserted at page boundary + * c. l1_ins_zlw_ahead_pages -> to track how advance the ZLW need to be inserted + * Insert ZLW request N pages ahead address. + * d. l1_zlw_2d_mode -> To track 2D mode per stream (ZLW_2D) + * + * + * Currently L1/L2 streams, blocks, AT ZLW configurations etc. are pre-defined + * as per the usecase specific calculations. Any change to this pre-defined + * table has to happen in sync with IPU6 FW. + */ +struct ipu6_mmu_hw { + union { + unsigned long offset; + void __iomem *base; + }; + u32 info_bits; + u8 nr_l1streams; + /* + * L1 has variable blocks per stream - total of 64 blocks and maximum of + * 16 blocks per stream. Configurable by using the block start address + * per stream. Block start address is calculated from the block size + */ + u8 l1_block_sz[IPU6_MMU_MAX_TLB_L1_STREAMS]; + /* Is ZLW is enabled in each stream */ + bool l1_zlw_en[IPU6_MMU_MAX_TLB_L1_STREAMS]; + bool l1_zlw_1d_mode[IPU6_MMU_MAX_TLB_L1_STREAMS]; + u8 l1_ins_zlw_ahead_pages[IPU6_MMU_MAX_TLB_L1_STREAMS]; + bool l1_zlw_2d_mode[IPU6_MMU_MAX_TLB_L1_STREAMS]; + + u32 l1_stream_id_reg_offset; + u32 l2_stream_id_reg_offset; + + u8 nr_l2streams; + /* + * L2 has fixed 2 blocks per stream. Block address is calculated + * from the block size + */ + u8 l2_block_sz[IPU6_MMU_MAX_TLB_L2_STREAMS]; + /* flag to track if WA is needed for successive invalidate HW bug */ + bool insert_read_before_invalidate; +}; + +struct ipu6_mmu_pdata { + u32 nr_mmus; + struct ipu6_mmu_hw mmu_hw[IPU6_MMU_MAX_DEVICES]; + int mmid; +}; + +struct ipu6_isys_csi2_pdata { + void __iomem *base; +}; + +struct ipu6_isys_internal_csi2_pdata { + u32 nports; + u32 irq_mask; + u32 ctrl0_irq_edge; + u32 ctrl0_irq_clear; + u32 ctrl0_irq_mask; + u32 ctrl0_irq_enable; + u32 ctrl0_irq_lnp; + u32 ctrl0_irq_status; + u32 fw_access_port_ofs; +}; + +struct ipu6_isys_internal_tpg_pdata { + u32 ntpgs; + u32 *offsets; + u32 *sels; +}; + +struct ipu6_hw_variants { + unsigned long offset; + u32 nr_mmus; + struct ipu6_mmu_hw mmu_hw[IPU6_MMU_MAX_DEVICES]; + u8 cdc_fifos; + u8 cdc_fifo_threshold[IPU6_MAX_VC_IOSF_PORTS]; + u32 dmem_offset; + u32 spc_offset; +}; + +struct ipu6_isys_internal_pdata { + struct ipu6_isys_internal_csi2_pdata csi2; + struct ipu6_hw_variants hw_variant; + u32 num_parallel_streams; + u32 isys_dma_overshoot; + u32 sram_gran_shift; + u32 sram_gran_size; + u32 max_sram_size; + u32 max_streams; + u32 max_send_queues; + u32 max_sram_blocks; + u32 max_devq_size; + u32 sensor_type_start; + u32 sensor_type_end; + u32 ltr; + u32 memopen_threshold; + bool enhanced_iwake; +}; + +struct ipu6_isys_pdata { + void __iomem *base; + const struct ipu6_isys_internal_pdata *ipdata; +}; + +struct ipu6_psys_internal_pdata { + struct ipu6_hw_variants hw_variant; +}; + +struct ipu6_psys_pdata { + void __iomem *base; + const struct ipu6_psys_internal_pdata *ipdata; +}; + +int ipu6_fw_authenticate(void *data, u64 val); +void ipu6_configure_spc(struct ipu6_device *isp, + const struct ipu6_hw_variants *hw_variant, + int pkg_dir_idx, void __iomem *base, u64 *pkg_dir, + dma_addr_t pkg_dir_dma_addr); +#endif /* IPU6_H */ -- 2.43.0